diff --git a/.lock b/.lock new file mode 100644 index 0000000..e69de29 diff --git a/crates.js b/crates.js new file mode 100644 index 0000000..967dc32 --- /dev/null +++ b/crates.js @@ -0,0 +1,2 @@ +window.ALL_CRATES = ["memory_addr","memory_set"]; +//{"start":21,"fragment_lengths":[13,13]} \ No newline at end of file diff --git a/help.html b/help.html new file mode 100644 index 0000000..ebfe408 --- /dev/null +++ b/help.html @@ -0,0 +1 @@ +
Redirecting to ../../memory_addr/struct.PhysAddr.html...
+ + + \ No newline at end of file diff --git a/memory_addr/addr/struct.VirtAddr.html b/memory_addr/addr/struct.VirtAddr.html new file mode 100644 index 0000000..b870852 --- /dev/null +++ b/memory_addr/addr/struct.VirtAddr.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../memory_addr/struct.VirtAddr.html...
+ + + \ No newline at end of file diff --git a/memory_addr/addr/trait.MemoryAddr.html b/memory_addr/addr/trait.MemoryAddr.html new file mode 100644 index 0000000..726dca4 --- /dev/null +++ b/memory_addr/addr/trait.MemoryAddr.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../memory_addr/trait.MemoryAddr.html...
+ + + \ No newline at end of file diff --git a/memory_addr/all.html b/memory_addr/all.html new file mode 100644 index 0000000..a2bc7a4 --- /dev/null +++ b/memory_addr/all.html @@ -0,0 +1 @@ +pub const PAGE_SIZE_4K: usize = 0x1000;
The size of a 4K page (4096 bytes).
+pub const fn align_down(addr: usize, align: usize) -> usize
Align address downwards.
+Returns the greatest x
with alignment align
so that x <= addr
.
The alignment must be a power of two.
+pub const fn align_down_4k(addr: usize) -> usize
Align address downwards to 4096 (bytes).
+pub const fn align_offset(addr: usize, align: usize) -> usize
Returns the offset of the address within the alignment.
+Equivalent to addr % align
, but the alignment must be a power of two.
pub const fn align_offset_4k(addr: usize) -> usize
Returns the offset of the address within a 4K-sized page.
+pub const fn align_up_4k(addr: usize) -> usize
Align address upwards to 4096 (bytes).
+pub const fn is_aligned(addr: usize, align: usize) -> bool
Checks whether the address has the demanded alignment.
+Equivalent to addr % align == 0
, but the alignment must be a power of two.
pub const fn is_aligned_4k(addr: usize) -> bool
Checks whether the address is 4K-aligned.
+Wrappers and helper functions for physical and virtual memory addresses.
+use memory_addr::{pa, va, va_range, PhysAddr, VirtAddr, MemoryAddr};
+
+let phys_addr = PhysAddr::from(0x12345678);
+let virt_addr = VirtAddr::from(0x87654321);
+
+assert_eq!(phys_addr.align_down(0x1000usize), pa!(0x12345000));
+assert_eq!(phys_addr.align_offset_4k(), 0x678);
+assert_eq!(virt_addr.align_up_4k(), va!(0x87655000));
+assert!(!virt_addr.is_aligned_4k());
+assert!(va!(0xabcedf0).is_aligned(16usize));
+
+let va_range = va_range!(0x87654000..0x87655000);
+assert_eq!(va_range.start, va!(0x87654000));
+assert_eq!(va_range.size(), 0x1000);
+assert!(va_range.contains(virt_addr));
+assert!(va_range.contains_range(va_range!(virt_addr..virt_addr + 0x100)));
+assert!(!va_range.contains_range(va_range!(virt_addr..virt_addr + 0x1000)));
AddrRange
. Panics if the range
+is invalid.usize
.Debug
,
+LowerHex
, and UpperHex
+traits for the given address types defined by the def_usize_addr
.PhysAddr::from_usize
.PhysAddrRange
. Panics if the
+range is invalid.VirtAddr::from_usize
.VirtAddrRange
. Panics if the
+range is invalid.A
.Redirecting to ../../memory_addr/struct.PageIter.html...
+ + + \ No newline at end of file diff --git a/memory_addr/macro.addr_range!.html b/memory_addr/macro.addr_range!.html new file mode 100644 index 0000000..95ce9b1 --- /dev/null +++ b/memory_addr/macro.addr_range!.html @@ -0,0 +1,11 @@ + + + + +Redirecting to macro.addr_range.html...
+ + + \ No newline at end of file diff --git a/memory_addr/macro.addr_range.html b/memory_addr/macro.addr_range.html new file mode 100644 index 0000000..56efb0a --- /dev/null +++ b/memory_addr/macro.addr_range.html @@ -0,0 +1,15 @@ +macro_rules! addr_range { + ($range:expr) => { ... }; +}
Converts the given range expression into AddrRange
. Panics if the range
+is invalid.
The concrete address type is inferred from the context.
+use memory_addr::{addr_range, AddrRange};
+
+let range: AddrRange<usize> = addr_range!(0x1000usize..0x2000);
+assert_eq!(range.start, 0x1000usize);
+assert_eq!(range.end, 0x2000usize);
And this will panic:
+ +let _: AddrRange<usize> = addr_range!(0x2000usize..0x1000);
Redirecting to macro.def_usize_addr.html...
+ + + \ No newline at end of file diff --git a/memory_addr/macro.def_usize_addr.html b/memory_addr/macro.def_usize_addr.html new file mode 100644 index 0000000..db14ab2 --- /dev/null +++ b/memory_addr/macro.def_usize_addr.html @@ -0,0 +1,50 @@ +macro_rules! def_usize_addr { + ( + $(#[$meta:meta])* + $vis:vis type $name:ident; + + $($tt:tt)* + ) => { ... }; + () => { ... }; +}
Creates a new address type by wrapping an usize
.
For each $vis type $name;
, this macro generates the following items:
$name
, which contains a single
+private unnamed field of type usize
.Copy
, Clone
,Default
,Ord
, PartialOrd
, Eq
, and PartialEq
.From<usize>
, Into<usize>
(by implementing From<$name> for usize
),Add<usize>
, AddAssign<usize>
, Sub<usize>
, SubAssign<usize>
, andSub<$name>
.const
methods to convert between the address type and usize
:
+from_usize
, which converts an usize
to the address type, andas_usize
, which converts the address type to an usize
.use memory_addr::{def_usize_addr, MemoryAddr};
+
+def_usize_addr! {
+ /// A example address type.
+ #[derive(Debug)]
+ pub type ExampleAddr;
+}
+
+const EXAMPLE: ExampleAddr = ExampleAddr::from_usize(0x1234);
+const EXAMPLE_USIZE: usize = EXAMPLE.as_usize();
+assert_eq!(EXAMPLE_USIZE, 0x1234);
+assert_eq!(EXAMPLE.align_down(0x10usize), ExampleAddr::from_usize(0x1230));
+assert_eq!(EXAMPLE.align_up_4k(), ExampleAddr::from_usize(0x2000));
Redirecting to macro.def_usize_addr_formatter.html...
+ + + \ No newline at end of file diff --git a/memory_addr/macro.def_usize_addr_formatter.html b/memory_addr/macro.def_usize_addr_formatter.html new file mode 100644 index 0000000..c40590a --- /dev/null +++ b/memory_addr/macro.def_usize_addr_formatter.html @@ -0,0 +1,35 @@ +macro_rules! def_usize_addr_formatter { + ( + $name:ident = $format:literal; + + $($tt:tt)* + ) => { ... }; + () => { ... }; +}
Creates implementations for the Debug
,
+LowerHex
, and UpperHex
+traits for the given address types defined by the def_usize_addr
.
For each $name = $format;
, this macro generates the following items:
core::fmt::Debug
for the address type $name
,
+which formats the address with format_args!($format, format_args!("{:#x}", self.0))
,core::fmt::LowerHex
for the address type $name
,
+which formats the address in the same way as core::fmt::Debug
,core::fmt::UpperHex
for the address type $name
,
+which formats the address with format_args!($format, format_args!("{:#X}", self.0))
.use memory_addr::{PhysAddr, VirtAddr, def_usize_addr, def_usize_addr_formatter};
+
+def_usize_addr! {
+ /// An example address type.
+ pub type ExampleAddr;
+}
+
+def_usize_addr_formatter! {
+ ExampleAddr = "EA:{}";
+}
+
+assert_eq!(format!("{:?}", PhysAddr::from(0x1abc)), "PA:0x1abc");
+assert_eq!(format!("{:x}", VirtAddr::from(0x1abc)), "VA:0x1abc");
+assert_eq!(format!("{:X}", ExampleAddr::from(0x1abc)), "EA:0x1ABC");
Redirecting to macro.pa.html...
+ + + \ No newline at end of file diff --git a/memory_addr/macro.pa.html b/memory_addr/macro.pa.html new file mode 100644 index 0000000..2440d3d --- /dev/null +++ b/memory_addr/macro.pa.html @@ -0,0 +1,4 @@ +macro_rules! pa { + ($addr:expr) => { ... }; +}
Alias for PhysAddr::from_usize
.
Redirecting to macro.pa_range.html...
+ + + \ No newline at end of file diff --git a/memory_addr/macro.pa_range.html b/memory_addr/macro.pa_range.html new file mode 100644 index 0000000..c730190 --- /dev/null +++ b/memory_addr/macro.pa_range.html @@ -0,0 +1,14 @@ +macro_rules! pa_range { + ($range:expr) => { ... }; +}
Converts the given range expression into PhysAddrRange
. Panics if the
+range is invalid.
use memory_addr::pa_range;
+
+let range = pa_range!(0x1000..0x2000);
+assert_eq!(range.start, 0x1000.into());
+assert_eq!(range.end, 0x2000.into());
And this will panic:
+ +let _ = pa_range!(0x2000..0x1000);
Redirecting to macro.va.html...
+ + + \ No newline at end of file diff --git a/memory_addr/macro.va.html b/memory_addr/macro.va.html new file mode 100644 index 0000000..62f6fe2 --- /dev/null +++ b/memory_addr/macro.va.html @@ -0,0 +1,4 @@ +macro_rules! va { + ($addr:expr) => { ... }; +}
Alias for VirtAddr::from_usize
.
Redirecting to macro.va_range.html...
+ + + \ No newline at end of file diff --git a/memory_addr/macro.va_range.html b/memory_addr/macro.va_range.html new file mode 100644 index 0000000..6475af5 --- /dev/null +++ b/memory_addr/macro.va_range.html @@ -0,0 +1,14 @@ +macro_rules! va_range { + ($range:expr) => { ... }; +}
Converts the given range expression into VirtAddrRange
. Panics if the
+range is invalid.
use memory_addr::va_range;
+
+let range = va_range!(0x1000..0x2000);
+assert_eq!(range.start, 0x1000.into());
+assert_eq!(range.end, 0x2000.into());
And this will panic:
+ +let _ = va_range!(0x2000..0x1000);
Redirecting to ../../memory_addr/struct.AddrRange.html...
+ + + \ No newline at end of file diff --git a/memory_addr/range/type.PhysAddrRange.html b/memory_addr/range/type.PhysAddrRange.html new file mode 100644 index 0000000..f937eeb --- /dev/null +++ b/memory_addr/range/type.PhysAddrRange.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../memory_addr/type.PhysAddrRange.html...
+ + + \ No newline at end of file diff --git a/memory_addr/range/type.VirtAddrRange.html b/memory_addr/range/type.VirtAddrRange.html new file mode 100644 index 0000000..3a68bfa --- /dev/null +++ b/memory_addr/range/type.VirtAddrRange.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../memory_addr/type.VirtAddrRange.html...
+ + + \ No newline at end of file diff --git a/memory_addr/sidebar-items.js b/memory_addr/sidebar-items.js new file mode 100644 index 0000000..6e9f0e6 --- /dev/null +++ b/memory_addr/sidebar-items.js @@ -0,0 +1 @@ +window.SIDEBAR_ITEMS = {"constant":["PAGE_SIZE_4K"],"fn":["align_down","align_down_4k","align_offset","align_offset_4k","align_up","align_up_4k","is_aligned","is_aligned_4k"],"macro":["addr_range","def_usize_addr","def_usize_addr_formatter","pa","pa_range","va","va_range"],"struct":["AddrRange","PageIter","PhysAddr","VirtAddr"],"trait":["MemoryAddr"],"type":["PageIter4K","PhysAddrRange","VirtAddrRange"]}; \ No newline at end of file diff --git a/memory_addr/struct.AddrRange.html b/memory_addr/struct.AddrRange.html new file mode 100644 index 0000000..3c27742 --- /dev/null +++ b/memory_addr/struct.AddrRange.html @@ -0,0 +1,171 @@ +pub struct AddrRange<A: MemoryAddr> {
+ pub start: A,
+ pub end: A,
+}
A range of a given memory address type A
.
The range is inclusive on the start and exclusive on the end. A range is
+considered empty iff start == end
, and invalid iff start > end
.
+An invalid range should not be created and cannot be obtained without unsafe
+operations, calling methods on an invalid range will cause unexpected
+consequences.
use memory_addr::AddrRange;
+
+let range = AddrRange::<usize>::new(0x1000, 0x2000);
+assert_eq!(range.start, 0x1000);
+assert_eq!(range.end, 0x2000);
start: A
The lower bound of the range (inclusive).
+end: A
The upper bound of the range (exclusive).
+Methods for AddrRange
.
Creates a new address range from the start and end addresses.
+Panics if start > end
.
use memory_addr::AddrRange;
+
+let range = AddrRange::new(0x1000usize, 0x2000);
+assert_eq!(range.start, 0x1000);
+assert_eq!(range.end, 0x2000);
And this will panic:
+ +let _ = AddrRange::new(0x2000usize, 0x1000);
Creates a new address range from the given range.
+Returns None
if start > end
.
use memory_addr::AddrRange;
+
+let range = AddrRange::try_new(0x1000usize, 0x2000).unwrap();
+assert_eq!(range.start, 0x1000);
+assert_eq!(range.end, 0x2000);
+assert!(AddrRange::try_new(0x2000usize, 0x1000).is_none());
Creates a new address range from the given range without checking the +validity.
+The caller must ensure that start <= end
, otherwise the range will be
+invalid and unexpected consequences will occur.
use memory_addr::AddrRange;
+
+let range = unsafe { AddrRange::new_unchecked(0x1000usize, 0x2000) };
+assert_eq!(range.start, 0x1000);
+assert_eq!(range.end, 0x2000);
Creates a new address range from the start address and the size.
+Panics if size
is too large and causes overflow during evaluating the
+end address.
use memory_addr::AddrRange;
+
+let range = AddrRange::from_start_size(0x1000usize, 0x1000);
+assert_eq!(range.start, 0x1000);
+assert_eq!(range.end, 0x2000);
And this will panic:
+ +let _ = AddrRange::from_start_size(0x1000usize, usize::MAX);
Creates a new address range from the start address and the size.
+Returns None
if size
is too large and causes overflow during
+evaluating the end address.
use memory_addr::AddrRange;
+
+let range = AddrRange::try_from_start_size(0x1000usize, 0x1000).unwrap();
+assert_eq!(range.start, 0x1000);
+assert_eq!(range.end, 0x2000);
+assert!(AddrRange::try_from_start_size(0x1000usize, usize::MAX).is_none());
Creates a new address range from the start address and the size without +checking the validity.
+The caller must ensure that size
is not too large and won’t cause
+overflow during evaluating the end address. Failing to do so will
+create an invalid range and cause unexpected consequences.
use memory_addr::AddrRange;
+
+let range = unsafe { AddrRange::from_start_size_unchecked(0x1000usize, 0x1000) };
+assert_eq!(range.start, 0x1000);
+assert_eq!(range.end, 0x2000);
Returns true
if the range is empty.
It’s also guaranteed that false
will be returned if the range is
+invalid (i.e., start > end
).
use memory_addr::AddrRange;
+
+assert!(AddrRange::new(0x1000usize, 0x1000).is_empty());
+assert!(!AddrRange::new(0x1000usize, 0x2000).is_empty());
Returns the size of the range.
+use memory_addr::AddrRange;
+
+assert_eq!(AddrRange::new(0x1000usize, 0x1000).size(), 0);
+assert_eq!(AddrRange::new(0x1000usize, 0x2000).size(), 0x1000);
Checks if the range contains the given address.
+use memory_addr::AddrRange;
+
+let range = AddrRange::new(0x1000usize, 0x2000);
+assert!(!range.contains(0x0fff));
+assert!(range.contains(0x1000));
+assert!(range.contains(0x1fff));
+assert!(!range.contains(0x2000));
Checks if the range contains the given address range.
+use memory_addr::{addr_range, AddrRange};
+
+let range = AddrRange::new(0x1000usize, 0x2000);
+assert!(!range.contains_range(addr_range!(0x0usize..0xfff)));
+assert!(!range.contains_range(addr_range!(0x0fffusize..0x1fff)));
+assert!(range.contains_range(addr_range!(0x1001usize..0x1fff)));
+assert!(range.contains_range(addr_range!(0x1000usize..0x2000)));
+assert!(!range.contains_range(addr_range!(0x1001usize..0x2001)));
+assert!(!range.contains_range(addr_range!(0x2001usize..0x3001)));
Checks if the range is contained in the given address range.
+use memory_addr::{addr_range, AddrRange};
+
+let range = AddrRange::new(0x1000usize, 0x2000);
+assert!(!range.contained_in(addr_range!(0xfffusize..0x1fff)));
+assert!(!range.contained_in(addr_range!(0x1001usize..0x2001)));
+assert!(range.contained_in(addr_range!(0xfffusize..0x2001)));
+assert!(range.contained_in(addr_range!(0x1000usize..0x2000)));
Checks if the range overlaps with the given address range.
+use memory_addr::{addr_range, AddrRange};
+
+let range = AddrRange::new(0x1000usize, 0x2000usize);
+assert!(!range.overlaps(addr_range!(0xfffusize..0xfff)));
+assert!(!range.overlaps(addr_range!(0x2000usize..0x2000)));
+assert!(!range.overlaps(addr_range!(0xfffusize..0x1000)));
+assert!(range.overlaps(addr_range!(0xfffusize..0x1001)));
+assert!(range.overlaps(addr_range!(0x1fffusize..0x2001)));
+assert!(range.overlaps(addr_range!(0xfffusize..0x2001)));
clone_to_uninit
)pub struct PageIter<const PAGE_SIZE: usize, A>where
+ A: MemoryAddr,{ /* private fields */ }
A page-by-page iterator.
+The page size is specified by the generic parameter PAGE_SIZE
, which must
+be a power of 2.
The address type is specified by the type parameter A
.
use memory_addr::PageIter;
+
+let mut iter = PageIter::<0x1000, usize>::new(0x1000, 0x3000).unwrap();
+assert_eq!(iter.next(), Some(0x1000));
+assert_eq!(iter.next(), Some(0x2000));
+assert_eq!(iter.next(), None);
+
+assert!(PageIter::<0x1000, usize>::new(0x1000, 0x3001).is_none());
iter_next_chunk
)N
values. Read moreiter_advance_by
)n
elements. Read moren
th element of the iterator. Read moreiter_intersperse
)separator
+between adjacent items of the original iterator. Read moren
elements. Read moren
elements, or fewer
+if the underlying iterator ends sooner. Read moreiter_map_windows
)f
for each contiguous window of size N
over
+self
and returns an iterator over the outputs of f
. Like slice::windows()
,
+the windows during mapping overlap as well. Read moreiter_collect_into
)iter_is_partitioned
)true
precede all those that return false
. Read moreiterator_try_reduce
)try_find
)iter_array_chunks
)N
elements of the iterator at a time. Read moreiter_order_by
)Iterator
with those
+of another with respect to the specified comparison function. Read morePartialOrd
elements of
+this Iterator
with those of another. The comparison works like short-circuit
+evaluation, returning a result without comparing the remaining elements.
+As soon as an order can be determined, the evaluation stops and a result is returned. Read moreiter_order_by
)Iterator
with those
+of another with respect to the specified comparison function. Read moreiter_order_by
)Iterator
are lexicographically
+less than those of another. Read moreIterator
are lexicographically
+less or equal to those of another. Read moreIterator
are lexicographically
+greater than those of another. Read moreIterator
are lexicographically
+greater than or equal to those of another. Read morepub struct PhysAddr(/* private fields */);
A physical memory address.
++=
operation. Read more-=
operation. Read moreclone_to_uninit
)pub struct VirtAddr(/* private fields */);
A virtual memory address.
+Creates a new virtual address from a raw pointer.
+Creates a new virtual address from a mutable raw pointer.
+Converts the virtual address to a raw pointer of a specific type.
+Converts the virtual address to a mutable raw pointer.
+Converts the virtual address to a mutable raw pointer of a specific +type.
++=
operation. Read more-=
operation. Read moreclone_to_uninit
)pub trait MemoryAddr:
+ Copy
+ + From<usize>
+ + Into<usize>
+ + Ord {
+Show 23 methods
// Provided methods
+ fn align_down<U>(self, align: U) -> Self
+ where U: Into<usize> { ... }
+ fn align_up<U>(self, align: U) -> Self
+ where U: Into<usize> { ... }
+ fn align_offset<U>(self, align: U) -> usize
+ where U: Into<usize> { ... }
+ fn is_aligned<U>(self, align: U) -> bool
+ where U: Into<usize> { ... }
+ fn align_down_4k(self) -> Self { ... }
+ fn align_up_4k(self) -> Self { ... }
+ fn align_offset_4k(self) -> usize { ... }
+ fn is_aligned_4k(self) -> bool { ... }
+ fn offset(self, offset: isize) -> Self { ... }
+ fn wrapping_offset(self, offset: isize) -> Self { ... }
+ fn offset_from(self, base: Self) -> isize { ... }
+ fn add(self, rhs: usize) -> Self { ... }
+ fn wrapping_add(self, rhs: usize) -> Self { ... }
+ fn overflowing_add(self, rhs: usize) -> (Self, bool) { ... }
+ fn checked_add(self, rhs: usize) -> Option<Self> { ... }
+ fn sub(self, rhs: usize) -> Self { ... }
+ fn wrapping_sub(self, rhs: usize) -> Self { ... }
+ fn overflowing_sub(self, rhs: usize) -> (Self, bool) { ... }
+ fn checked_sub(self, rhs: usize) -> Option<Self> { ... }
+ fn sub_addr(self, rhs: Self) -> usize { ... }
+ fn wrapping_sub_addr(self, rhs: Self) -> usize { ... }
+ fn overflowing_sub_addr(self, rhs: Self) -> (usize, bool) { ... }
+ fn checked_sub_addr(self, rhs: Self) -> Option<usize> { ... }
+}
A trait for memory address types.
+Memory address types here include both physical and virtual addresses, as +well as any other similar types like guest physical addresses in a +hypervisor.
+This trait is automatically implemented for any type that is Copy
,
+From<usize>
, Into<usize>
, and Ord
, providing a set of utility methods
+for address alignment and arithmetic.
Aligns the address downwards to the given alignment.
+Returns the offset of the address within the given alignment.
+Checks whether the address has the demanded alignment.
+Aligns the address downwards to 4096 (bytes).
+Aligns the address upwards to 4096 (bytes).
+Returns the offset of the address within a 4K-sized page.
+Checks whether the address is 4K-aligned.
+Adds a given offset to the address to get a new address.
+Panics if the result overflows.
+Adds a given offset to the address to get a new address.
+Unlike offset
, this method always wraps around on overflow.
Gets the distance between two addresses.
+Panics if the result is not representable by isize
.
Adds a given unsigned offset to the address to get a new address.
+This method is similar to offset
, but it takes an unsigned offset.
Panics if the result overflows.
+Adds a given unsigned offset to the address to get a new address.
+Unlike add
, this method always wraps around on overflow.
Adds a given unsigned offset to the address to get a new address.
+Unlike add
, this method returns a tuple of the new address and a boolean indicating
+whether the addition has overflowed.
Adds a given unsigned offset to the address to get a new address.
+Unlike add
, this method returns None
on overflow.
Subtracts a given unsigned offset from the address to get a new address.
+This method is similar to offset(-rhs)
, but it takes an unsigned offset.
Panics if the result overflows.
+Subtracts a given unsigned offset from the address to get a new address.
+Unlike sub
, this method always wraps around on overflowed.
Subtracts a given unsigned offset from the address to get a new address.
+Unlike sub
, this method returns a tuple of the new address and a boolean indicating
+whether the subtraction has overflowed.
Subtracts a given unsigned offset from the address to get a new address.
+Unlike sub
, this method returns None
on overflow.
Subtracts another address from the address to get the offset between them.
+Panics if the result overflows.
+Subtracts another address from the address to get the offset between them.
+Unlike sub_addr
, this method always wraps around on overflow.
Subtracts another address from the address to get the offset between them.
+Unlike sub_addr
, this method returns a tuple of the offset and a boolean indicating
+whether the subtraction has overflowed.
Subtracts another address from the address to get the offset between them.
+Unlike sub_addr
, this method returns None
on overflow.
pub type PageIter4K<A> = PageIter<PAGE_SIZE_4K, A>;
A PageIter
for 4K pages.
struct PageIter4K<A> { /* private fields */ }
pub type PhysAddrRange = AddrRange<PhysAddr>;
A range of physical addresses PhysAddr
.
struct PhysAddrRange {
+ pub start: PhysAddr,
+ pub end: PhysAddr,
+}
start: PhysAddr
The lower bound of the range (inclusive).
+end: PhysAddr
The upper bound of the range (exclusive).
+pub type VirtAddrRange = AddrRange<VirtAddr>;
A range of virtual addresses VirtAddr
.
struct VirtAddrRange {
+ pub start: VirtAddr,
+ pub end: VirtAddr,
+}
start: VirtAddr
The lower bound of the range (inclusive).
+end: VirtAddr
The upper bound of the range (exclusive).
+Redirecting to ../../memory_set/struct.MemoryArea.html...
+ + + \ No newline at end of file diff --git a/memory_set/backend/trait.MappingBackend.html b/memory_set/backend/trait.MappingBackend.html new file mode 100644 index 0000000..3969c2c --- /dev/null +++ b/memory_set/backend/trait.MappingBackend.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../memory_set/trait.MappingBackend.html...
+ + + \ No newline at end of file diff --git a/memory_set/enum.MappingError.html b/memory_set/enum.MappingError.html new file mode 100644 index 0000000..3faad7b --- /dev/null +++ b/memory_set/enum.MappingError.html @@ -0,0 +1,20 @@ +pub enum MappingError {
+ InvalidParam,
+ AlreadyExists,
+ BadState,
+}
Error type for memory mapping operations.
+Invalid parameter (e.g., addr
, size
, flags
, etc.)
The given range overlaps with an existing mapping.
+The backend page table is in a bad state.
+Data structures and operations for managing memory mappings.
+It is useful to implement mmap
, munmap
and mprotect
.
use memory_addr::{va, va_range, VirtAddr};
+use memory_set::{MappingBackend, MemoryArea, MemorySet};
+
+const MAX_ADDR: usize = 0x10000;
+
+/// A mock memory flags.
+type MockFlags = u8;
+/// A mock page table, which is a simple array that maps addresses to flags.
+type MockPageTable = [MockFlags; MAX_ADDR];
+
+/// A mock mapping backend that manipulates the page table on `map` and `unmap`.
+#[derive(Clone)]
+struct MockBackend;
+
+let mut pt = [0; MAX_ADDR];
+let mut memory_set = MemorySet::<MockBackend>::new();
+
+// Map [0x1000..0x5000).
+memory_set.map(
+ /* area: */ MemoryArea::new(va!(0x1000), 0x4000, 1, MockBackend),
+ /* page_table: */ &mut pt,
+ /* unmap_overlap */ false,
+).unwrap();
+// Unmap [0x2000..0x4000), will split the area into two parts.
+memory_set.unmap(va!(0x2000), 0x2000, &mut pt).unwrap();
+
+let areas = memory_set.iter().collect::<Vec<_>>();
+assert_eq!(areas.len(), 2);
+assert_eq!(areas[0].va_range(), va_range!(0x1000..0x2000));
+assert_eq!(areas[1].va_range(), va_range!(0x4000..0x5000));
+
+// Underlying operations to do when manipulating mappings.
+impl MappingBackend for MockBackend {
+ type Addr = VirtAddr;
+ type Flags = MockFlags;
+ type PageTable = MockPageTable;
+
+ fn map(&self, start: VirtAddr, size: usize, flags: MockFlags, pt: &mut MockPageTable) -> bool {
+ for entry in pt.iter_mut().skip(start.as_usize()).take(size) {
+ if *entry != 0 {
+ return false;
+ }
+ *entry = flags;
+ }
+ true
+ }
+
+ fn unmap(&self, start: VirtAddr, size: usize, pt: &mut MockPageTable) -> bool {
+ for entry in pt.iter_mut().skip(start.as_usize()).take(size) {
+ if *entry == 0 {
+ return false;
+ }
+ *entry = 0;
+ }
+ true
+ }
+
+ fn protect(
+ &self,
+ start: VirtAddr,
+ size: usize,
+ new_flags: MockFlags,
+ pt: &mut MockPageTable,
+ ) -> bool {
+ for entry in pt.iter_mut().skip(start.as_usize()).take(size) {
+ if *entry == 0 {
+ return false;
+ }
+ *entry = new_flags;
+ }
+ true
+ }
+}
MemoryArea
).MemoryArea
.Result
type with MappingError
as the error type.Redirecting to ../../memory_set/struct.MemorySet.html...
+ + + \ No newline at end of file diff --git a/memory_set/sidebar-items.js b/memory_set/sidebar-items.js new file mode 100644 index 0000000..159f6d0 --- /dev/null +++ b/memory_set/sidebar-items.js @@ -0,0 +1 @@ +window.SIDEBAR_ITEMS = {"enum":["MappingError"],"struct":["MemoryArea","MemorySet"],"trait":["MappingBackend"],"type":["MappingResult"]}; \ No newline at end of file diff --git a/memory_set/struct.MemoryArea.html b/memory_set/struct.MemoryArea.html new file mode 100644 index 0000000..cd678fe --- /dev/null +++ b/memory_set/struct.MemoryArea.html @@ -0,0 +1,44 @@ +pub struct MemoryArea<B: MappingBackend> { /* private fields */ }
A memory area represents a continuous range of virtual memory with the same +flags.
+The target physical memory frames are determined by MappingBackend
and
+may not be contiguous.
pub struct MemorySet<B: MappingBackend> { /* private fields */ }
A container that maintains memory mappings (MemoryArea
).
Returns the iterator over all memory areas.
+Returns whether the given address range overlaps with any existing area.
+Finds the memory area that contains the given address.
+Finds a free area that can accommodate the given size.
+The search starts from the given hint
address, and the area should be
+within the given limit
range.
Returns the start address of the free area. Returns None
if no such
+area is found.
Add a new memory mapping.
+The mapping is represented by a MemoryArea
.
If the new area overlaps with any existing area, the behavior is
+determined by the unmap_overlap
parameter. If it is true
, the
+overlapped regions will be unmapped first. Otherwise, it returns an
+error.
Remove memory mappings within the given address range.
+All memory areas that are fully contained in the range will be removed +directly. If the area intersects with the boundary, it will be shrinked. +If the unmapped range is in the middle of an existing area, it will be +split into two areas.
+Remove all memory areas and the underlying mappings.
+Change the flags of memory mappings within the given address range.
+update_flags
is a function that receives old flags and processes
+new flags (e.g., some flags can not be changed through this interface).
+It returns None
if there is no bit to change.
Memory areas will be skipped according to update_flags
. Memory areas
+that are fully contained in the range or contains the range or
+intersects with the boundary will be handled similarly to munmap
.
pub trait MappingBackend: Clone {
+ type Addr: MemoryAddr;
+ type Flags: Copy;
+ type PageTable;
+
+ // Required methods
+ fn map(
+ &self,
+ start: Self::Addr,
+ size: usize,
+ flags: Self::Flags,
+ page_table: &mut Self::PageTable,
+ ) -> bool;
+ fn unmap(
+ &self,
+ start: Self::Addr,
+ size: usize,
+ page_table: &mut Self::PageTable,
+ ) -> bool;
+ fn protect(
+ &self,
+ start: Self::Addr,
+ size: usize,
+ new_flags: Self::Flags,
+ page_table: &mut Self::PageTable,
+ ) -> bool;
+}
Underlying operations to do when manipulating mappings within the specific
+MemoryArea
.
The backend can be different for different memory areas. e.g., for linear +mappings, the target physical address is known when it is added to the page +table. For lazy mappings, an empty mapping needs to be added to the page +table to trigger a page fault.
+The address type used in the memory area.
+What to do when mapping a region within the area with the given flags.
+pub type MappingResult<T = ()> = Result<T, MappingError>;
A Result
type with MappingError
as the error type.
enum MappingResult<T = ()> {
+ Ok(T),
+ Err(MappingError),
+}
A
.\nA trait for memory address types.\nThe size of a 4K page (4096 bytes).\nA page-by-page iterator.\nA PageIter
for 4K pages.\nA physical memory address.\nA range of physical addresses PhysAddr
.\nA virtual memory address.\nA range of virtual addresses VirtAddr
.\nAdds a given unsigned offset to the address to get a new …\nAdds a given unsigned offset to the address to get a new …\nConverts the given range expression into AddrRange
. Panics …\nAlign address downwards.\nAligns the address downwards to the given alignment.\nAligns the address downwards to the given alignment.\nAlign address downwards to 4096 (bytes).\nAligns the address downwards to 4096 (bytes).\nAligns the address downwards to 4096 (bytes).\nReturns the offset of the address within the alignment.\nReturns the offset of the address within the given …\nReturns the offset of the address within the given …\nReturns the offset of the address within a 4K-sized page.\nReturns the offset of the address within a 4K-sized page.\nReturns the offset of the address within a 4K-sized page.\nAlign address upwards.\nAligns the address upwards to the given alignment.\nAligns the address upwards to the given alignment.\nAlign address upwards to 4096 (bytes).\nAligns the address upwards to 4096 (bytes).\nAligns the address upwards to 4096 (bytes).\nConverts the virtual address to a mutable raw pointer.\nConverts the virtual address to a mutable raw pointer of a …\nConverts the virtual address to a raw pointer.\nConverts the virtual address to a raw pointer of a …\nConverts an PhysAddr
to an usize
.\nConverts an VirtAddr
to an usize
.\nAdds a given unsigned offset to the address to get a new …\nAdds a given unsigned offset to the address to get a new …\nSubtracts a given unsigned offset from the address to get …\nSubtracts a given unsigned offset from the address to get …\nSubtracts another address from the address to get the …\nSubtracts another address from the address to get the …\nChecks if the range is contained in the given address …\nChecks if the range contains the given address.\nChecks if the range contains the given address range.\nCreates a new address type by wrapping an usize
.\nCreates implementations for the Debug
, LowerHex
, and …\nThe upper bound of the range (exclusive).\nThe upper bound of the range (exclusive).\nThe upper bound of the range (exclusive).\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nCreates a new virtual address from a mutable raw pointer.\nCreates a new virtual address from a raw pointer.\nCreates a new address range from the start address and the …\nCreates a new address range from the start address and the …\nConverts an usize
to an PhysAddr
.\nConverts an usize
to an VirtAddr
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nChecks whether the address has the demanded alignment.\nChecks whether the address has the demanded alignment.\nChecks whether the address has the demanded alignment.\nChecks whether the address is 4K-aligned.\nChecks whether the address is 4K-aligned.\nChecks whether the address is 4K-aligned.\nReturns true
if the range is empty.\nCreates a new PageIter
.\nCreates a new address range from the start and end …\nCreates a new address range from the given range without …\nAdds a given offset to the address to get a new address.\nAdds a given offset to the address to get a new address.\nGets the distance between two addresses.\nGets the distance between two addresses.\nAdds a given unsigned offset to the address to get a new …\nAdds a given unsigned offset to the address to get a new …\nSubtracts a given unsigned offset from the address to get …\nSubtracts a given unsigned offset from the address to get …\nSubtracts another address from the address to get the …\nSubtracts another address from the address to get the …\nChecks if the range overlaps with the given address range.\nAlias for PhysAddr::from_usize
.\nConverts the given range expression into PhysAddrRange
. …\nReturns the size of the range.\nThe lower bound of the range (inclusive).\nThe lower bound of the range (inclusive).\nThe lower bound of the range (inclusive).\nSubtracts a given unsigned offset from the address to get …\nSubtracts a given unsigned offset from the address to get …\nSubtracts another address from the address to get the …\nSubtracts another address from the address to get the …\nCreates a new address range from the start address and the …\nCreates a new address range from the given range.\nAlias for VirtAddr::from_usize
.\nConverts the given range expression into VirtAddrRange
. …\nAdds a given unsigned offset to the address to get a new …\nAdds a given unsigned offset to the address to get a new …\nAdds a given offset to the address to get a new address.\nAdds a given offset to the address to get a new address.\nSubtracts a given unsigned offset from the address to get …\nSubtracts a given unsigned offset from the address to get …\nSubtracts another address from the address to get the …\nSubtracts another address from the address to get the …")
\ No newline at end of file
diff --git a/search.desc/memory_set/memory_set-desc-0-.js b/search.desc/memory_set/memory_set-desc-0-.js
new file mode 100644
index 0000000..c137096
--- /dev/null
+++ b/search.desc/memory_set/memory_set-desc-0-.js
@@ -0,0 +1 @@
+searchState.loadedDescShard("memory_set", 0, "memory_set\nThe address type used in the memory area.\nThe given range overlaps with an existing mapping.\nThe backend page table is in a bad state.\nContains the error value\nThe flags type used in the memory area.\nInvalid parameter (e.g., addr
, size
, flags
, etc.)\nUnderlying operations to do when manipulating mappings …\nError type for memory mapping operations.\nA Result
type with MappingError
as the error type.\nA memory area represents a continuous range of virtual …\nA container that maintains memory mappings (MemoryArea
).\nContains the success value\nThe page table type used in the memory area.\nReturns the mapping backend of the memory area.\nRemove all memory areas and the underlying mappings.\nReturns the end address of the memory area.\nFinds the memory area that contains the given address.\nFinds a free area that can accommodate the given size.\nReturns the memory flags, e.g., the permission bits.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nCalls U::from(self)
.\nCalls U::from(self)
.\nCalls U::from(self)
.\nReturns true
if the memory set contains no memory areas.\nReturns the iterator over all memory areas.\nReturns the number of memory areas in the memory set.\nWhat to do when mapping a region within the area with the …\nAdd a new memory mapping.\nCreates a new memory area.\nCreates a new memory set.\nReturns whether the given address range overlaps with any …\nWhat to do when changing access flags.\nChange the flags of memory mappings within the given …\nReturns the size of the memory area.\nReturns the start address of the memory area.\nWhat to do when unmaping a memory region within the area.\nRemove memory mappings within the given address range.\nReturns the virtual address range.")
\ No newline at end of file
diff --git a/settings.html b/settings.html
new file mode 100644
index 0000000..d05b674
--- /dev/null
+++ b/settings.html
@@ -0,0 +1 @@
+1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 +647 +648 +649 +650 +651 +652 +653 +654 +655 +656 +657 +658 +659 +660 +661 +662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 +740 +741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 +761 +762 +763 +764 +765 +766 +767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 +783 +784 +785 +786 +787 +
use core::cmp::Ord;
+
+/// A trait for memory address types.
+///
+/// Memory address types here include both physical and virtual addresses, as
+/// well as any other similar types like guest physical addresses in a
+/// hypervisor.
+///
+/// This trait is automatically implemented for any type that is `Copy`,
+/// `From<usize>`, `Into<usize>`, and `Ord`, providing a set of utility methods
+/// for address alignment and arithmetic.
+pub trait MemoryAddr:
+ // The address type should be trivially copyable. This implies `Clone`.
+ Copy
+ // The address type should be convertible to and from `usize`.
+ + From<usize>
+ + Into<usize>
+ // The address type should be comparable.
+ + Ord
+{
+ // No required methods for now. Following are some utility methods.
+
+ //
+ // This section contains utility methods for address alignment.
+ //
+
+ /// Aligns the address downwards to the given alignment.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn align_down<U>(self, align: U) -> Self
+ where
+ U: Into<usize>,
+ {
+ Self::from(crate::align_down(self.into(), align.into()))
+ }
+
+ /// Aligns the address upwards to the given alignment.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn align_up<U>(self, align: U) -> Self
+ where
+ U: Into<usize>,
+ {
+ Self::from(crate::align_up(self.into(), align.into()))
+ }
+
+ /// Returns the offset of the address within the given alignment.
+ #[inline]
+ #[must_use = "this function has no side effects, so it can be removed if the return value is not used"]
+ fn align_offset<U>(self, align: U) -> usize
+ where
+ U: Into<usize>,
+ {
+ crate::align_offset(self.into(), align.into())
+ }
+
+ /// Checks whether the address has the demanded alignment.
+ #[inline]
+ #[must_use = "this function has no side effects, so it can be removed if the return value is not used"]
+ fn is_aligned<U>(self, align: U) -> bool
+ where
+ U: Into<usize>,
+ {
+ crate::is_aligned(self.into(), align.into())
+ }
+
+ /// Aligns the address downwards to 4096 (bytes).
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn align_down_4k(self) -> Self {
+ Self::from(crate::align_down(self.into(), crate::PAGE_SIZE_4K))
+ }
+
+ /// Aligns the address upwards to 4096 (bytes).
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn align_up_4k(self) -> Self {
+ Self::from(crate::align_up(self.into(), crate::PAGE_SIZE_4K))
+ }
+
+ /// Returns the offset of the address within a 4K-sized page.
+ #[inline]
+ #[must_use = "this function has no side effects, so it can be removed if the return value is not used"]
+ fn align_offset_4k(self) -> usize {
+ crate::align_offset(self.into(), crate::PAGE_SIZE_4K)
+ }
+
+ /// Checks whether the address is 4K-aligned.
+ #[inline]
+ #[must_use = "this function has no side effects, so it can be removed if the return value is not used"]
+ fn is_aligned_4k(self) -> bool {
+ crate::is_aligned(self.into(), crate::PAGE_SIZE_4K)
+ }
+
+ //
+ // This section contains utility methods for address arithmetic.
+ //
+
+ /// Adds a given offset to the address to get a new address.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the result overflows.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn offset(self, offset: isize) -> Self {
+ // todo: use `strict_add_signed` when it's stable.
+ Self::from(usize::checked_add_signed(self.into(), offset).expect("overflow in `MemoryAddr::offset`"))
+ }
+
+ /// Adds a given offset to the address to get a new address.
+ ///
+ /// Unlike `offset`, this method always wraps around on overflow.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn wrapping_offset(self, offset: isize) -> Self {
+ Self::from(usize::wrapping_add_signed(self.into(), offset))
+ }
+
+ /// Gets the distance between two addresses.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the result is not representable by `isize`.
+ #[inline]
+ #[must_use = "this function has no side effects, so it can be removed if the return value is not used"]
+ fn offset_from(self, base: Self) -> isize {
+ let result = usize::wrapping_sub(self.into(), base.into()) as isize;
+ if (result > 0) ^ (base < self) {
+ // The result has overflowed.
+ panic!("overflow in `MemoryAddr::offset_from`");
+ } else {
+ result
+ }
+ }
+
+ /// Adds a given **unsigned** offset to the address to get a new address.
+ ///
+ /// This method is similar to `offset`, but it takes an unsigned offset.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the result overflows.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn add(self, rhs: usize) -> Self {
+ Self::from(usize::checked_add(self.into(), rhs).expect("overflow in `MemoryAddr::add`"))
+ }
+
+ /// Adds a given **unsigned** offset to the address to get a new address.
+ ///
+ /// Unlike `add`, this method always wraps around on overflow.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn wrapping_add(self, rhs: usize) -> Self {
+ Self::from(usize::wrapping_add(self.into(), rhs))
+ }
+
+ /// Adds a given **unsigned** offset to the address to get a new address.
+ ///
+ /// Unlike `add`, this method returns a tuple of the new address and a boolean indicating
+ /// whether the addition has overflowed.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn overflowing_add(self, rhs: usize) -> (Self, bool) {
+ let (result, overflow) = self.into().overflowing_add(rhs);
+ (Self::from(result), overflow)
+ }
+
+ /// Adds a given **unsigned** offset to the address to get a new address.
+ ///
+ /// Unlike `add`, this method returns `None` on overflow.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn checked_add(self, rhs: usize) -> Option<Self> {
+ usize::checked_add(self.into(), rhs).map(Self::from)
+ }
+
+ /// Subtracts a given **unsigned** offset from the address to get a new address.
+ ///
+ /// This method is similar to `offset(-rhs)`, but it takes an unsigned offset.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the result overflows.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn sub(self, rhs: usize) -> Self {
+ Self::from(usize::checked_sub(self.into(), rhs).expect("overflow in `MemoryAddr::sub`"))
+ }
+
+ /// Subtracts a given **unsigned** offset from the address to get a new address.
+ ///
+ /// Unlike `sub`, this method always wraps around on overflowed.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn wrapping_sub(self, rhs: usize) -> Self {
+ Self::from(usize::wrapping_sub(self.into(), rhs))
+ }
+
+ /// Subtracts a given **unsigned** offset from the address to get a new address.
+ ///
+ /// Unlike `sub`, this method returns a tuple of the new address and a boolean indicating
+ /// whether the subtraction has overflowed.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn overflowing_sub(self, rhs: usize) -> (Self, bool) {
+ let (result, overflow) = self.into().overflowing_sub(rhs);
+ (Self::from(result), overflow)
+ }
+
+ /// Subtracts a given **unsigned** offset from the address to get a new address.
+ ///
+ /// Unlike `sub`, this method returns `None` on overflow.
+ #[inline]
+ #[must_use = "this returns a new address, without modifying the original"]
+ fn checked_sub(self, rhs: usize) -> Option<Self> {
+ usize::checked_sub(self.into(), rhs).map(Self::from)
+ }
+
+ /// Subtracts another address from the address to get the offset between them.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the result overflows.
+ #[inline]
+ #[must_use = "this function has no side effects, so it can be removed if the return value is not used"]
+ fn sub_addr(self, rhs: Self) -> usize {
+ usize::checked_sub(self.into(), rhs.into()).expect("overflow in `MemoryAddr::sub_addr`")
+ }
+
+ /// Subtracts another address from the address to get the offset between them.
+ ///
+ /// Unlike `sub_addr`, this method always wraps around on overflow.
+ #[inline]
+ #[must_use = "this function has no side effects, so it can be removed if the return value is not used"]
+ fn wrapping_sub_addr(self, rhs: Self) -> usize {
+ usize::wrapping_sub(self.into(), rhs.into())
+ }
+
+ /// Subtracts another address from the address to get the offset between them.
+ ///
+ /// Unlike `sub_addr`, this method returns a tuple of the offset and a boolean indicating
+ /// whether the subtraction has overflowed.
+ #[inline]
+ #[must_use = "this function has no side effects, so it can be removed if the return value is not used"]
+ fn overflowing_sub_addr(self, rhs: Self) -> (usize, bool) {
+ usize::overflowing_sub(self.into(), rhs.into())
+ }
+
+ /// Subtracts another address from the address to get the offset between them.
+ ///
+ /// Unlike `sub_addr`, this method returns `None` on overflow.
+ #[inline]
+ #[must_use = "this function has no side effects, so it can be removed if the return value is not used"]
+ fn checked_sub_addr(self, rhs: Self) -> Option<usize> {
+ usize::checked_sub(self.into(), rhs.into())
+ }
+}
+
+/// Implement the `MemoryAddr` trait for any type that is `Copy`, `From<usize>`,
+/// `Into<usize>`, and `Ord`.
+impl<T> MemoryAddr for T where T: Copy + From<usize> + Into<usize> + Ord {}
+
+/// Creates a new address type by wrapping an `usize`.
+///
+/// For each `$vis type $name;`, this macro generates the following items:
+/// - Definition of the new address type `$name`, which contains a single
+/// private unnamed field of type `usize`.
+/// - Default implementations (i.e. derived implementations) for the following
+/// traits:
+/// - `Copy`, `Clone`,
+/// - `Default`,
+/// - `Ord`, `PartialOrd`, `Eq`, and `PartialEq`.
+/// - Implementations for the following traits:
+/// - `From<usize>`, `Into<usize>` (by implementing `From<$name> for usize`),
+/// - `Add<usize>`, `AddAssign<usize>`, `Sub<usize>`, `SubAssign<usize>`, and
+/// - `Sub<$name>`.
+/// - Two `const` methods to convert between the address type and `usize`:
+/// - `from_usize`, which converts an `usize` to the address type, and
+/// - `as_usize`, which converts the address type to an `usize`.
+///
+/// # Example
+///
+/// ```
+/// use memory_addr::{def_usize_addr, MemoryAddr};
+///
+/// def_usize_addr! {
+/// /// A example address type.
+/// #[derive(Debug)]
+/// pub type ExampleAddr;
+/// }
+///
+/// # fn main() {
+/// const EXAMPLE: ExampleAddr = ExampleAddr::from_usize(0x1234);
+/// const EXAMPLE_USIZE: usize = EXAMPLE.as_usize();
+/// assert_eq!(EXAMPLE_USIZE, 0x1234);
+/// assert_eq!(EXAMPLE.align_down(0x10usize), ExampleAddr::from_usize(0x1230));
+/// assert_eq!(EXAMPLE.align_up_4k(), ExampleAddr::from_usize(0x2000));
+/// # }
+/// ```
+#[macro_export]
+macro_rules! def_usize_addr {
+ (
+ $(#[$meta:meta])*
+ $vis:vis type $name:ident;
+
+ $($tt:tt)*
+ ) => {
+ #[repr(transparent)]
+ #[derive(Copy, Clone, Default, Ord, PartialOrd, Eq, PartialEq)]
+ $(#[$meta])*
+ pub struct $name(usize);
+
+ impl $name {
+ #[doc = concat!("Converts an `usize` to an [`", stringify!($name), "`].")]
+ #[inline]
+ pub const fn from_usize(addr: usize) -> Self {
+ Self(addr)
+ }
+
+ #[doc = concat!("Converts an [`", stringify!($name), "`] to an `usize`.")]
+ #[inline]
+ pub const fn as_usize(self) -> usize {
+ self.0
+ }
+ }
+
+ impl From<usize> for $name {
+ #[inline]
+ fn from(addr: usize) -> Self {
+ Self(addr)
+ }
+ }
+
+ impl From<$name> for usize {
+ #[inline]
+ fn from(addr: $name) -> usize {
+ addr.0
+ }
+ }
+
+ impl core::ops::Add<usize> for $name {
+ type Output = Self;
+ #[inline]
+ fn add(self, rhs: usize) -> Self {
+ Self(self.0 + rhs)
+ }
+ }
+
+ impl core::ops::AddAssign<usize> for $name {
+ #[inline]
+ fn add_assign(&mut self, rhs: usize) {
+ self.0 += rhs;
+ }
+ }
+
+ impl core::ops::Sub<usize> for $name {
+ type Output = Self;
+ #[inline]
+ fn sub(self, rhs: usize) -> Self {
+ Self(self.0 - rhs)
+ }
+ }
+
+ impl core::ops::SubAssign<usize> for $name {
+ #[inline]
+ fn sub_assign(&mut self, rhs: usize) {
+ self.0 -= rhs;
+ }
+ }
+
+ impl core::ops::Sub<$name> for $name {
+ type Output = usize;
+ #[inline]
+ fn sub(self, rhs: $name) -> usize {
+ self.0 - rhs.0
+ }
+ }
+
+ $crate::def_usize_addr!($($tt)*);
+ };
+ () => {};
+}
+
+/// Creates implementations for the [`Debug`](core::fmt::Debug),
+/// [`LowerHex`](core::fmt::LowerHex), and [`UpperHex`](core::fmt::UpperHex)
+/// traits for the given address types defined by the [`def_usize_addr`].
+///
+/// For each `$name = $format;`, this macro generates the following items:
+/// - An implementation of [`core::fmt::Debug`] for the address type `$name`,
+/// which formats the address with `format_args!($format,
+/// format_args!("{:#x}", self.0))`,
+/// - An implementation of [`core::fmt::LowerHex`] for the address type `$name`,
+/// which formats the address in the same way as [`core::fmt::Debug`],
+/// - An implementation of [`core::fmt::UpperHex`] for the address type `$name`,
+/// which formats the address with `format_args!($format,
+/// format_args!("{:#X}", self.0))`.
+///
+/// # Example
+///
+/// ```
+/// use memory_addr::{PhysAddr, VirtAddr, def_usize_addr, def_usize_addr_formatter};
+///
+/// def_usize_addr! {
+/// /// An example address type.
+/// pub type ExampleAddr;
+/// }
+///
+/// def_usize_addr_formatter! {
+/// ExampleAddr = "EA:{}";
+/// }
+///
+/// # fn main() {
+/// assert_eq!(format!("{:?}", PhysAddr::from(0x1abc)), "PA:0x1abc");
+/// assert_eq!(format!("{:x}", VirtAddr::from(0x1abc)), "VA:0x1abc");
+/// assert_eq!(format!("{:X}", ExampleAddr::from(0x1abc)), "EA:0x1ABC");
+/// # }
+/// ```
+#[macro_export]
+macro_rules! def_usize_addr_formatter {
+ (
+ $name:ident = $format:literal;
+
+ $($tt:tt)*
+ ) => {
+ impl core::fmt::Debug for $name {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ f.write_fmt(format_args!($format, format_args!("{:#x}", self.0)))
+ }
+ }
+
+ impl core::fmt::LowerHex for $name {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ f.write_fmt(format_args!($format, format_args!("{:#x}", self.0)))
+ }
+ }
+
+ impl core::fmt::UpperHex for $name {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ f.write_fmt(format_args!($format, format_args!("{:#X}", self.0)))
+ }
+ }
+
+ $crate::def_usize_addr_formatter!($($tt)*);
+ };
+ () => {};
+}
+
+def_usize_addr! {
+ /// A physical memory address.
+ pub type PhysAddr;
+
+ /// A virtual memory address.
+ pub type VirtAddr;
+}
+
+def_usize_addr_formatter! {
+ PhysAddr = "PA:{}";
+ VirtAddr = "VA:{}";
+}
+
+impl VirtAddr {
+ /// Creates a new virtual address from a raw pointer.
+ #[inline]
+ pub fn from_ptr_of<T>(ptr: *const T) -> Self {
+ Self(ptr as usize)
+ }
+
+ /// Creates a new virtual address from a mutable raw pointer.
+ #[inline]
+ pub fn from_mut_ptr_of<T>(ptr: *mut T) -> Self {
+ Self(ptr as usize)
+ }
+
+ /// Converts the virtual address to a raw pointer.
+ #[inline]
+ pub const fn as_ptr(self) -> *const u8 {
+ self.0 as *const u8
+ }
+
+ /// Converts the virtual address to a raw pointer of a specific type.
+ #[inline]
+ pub const fn as_ptr_of<T>(self) -> *const T {
+ self.0 as *const T
+ }
+
+ /// Converts the virtual address to a mutable raw pointer.
+ #[inline]
+ pub const fn as_mut_ptr(self) -> *mut u8 {
+ self.0 as *mut u8
+ }
+
+ /// Converts the virtual address to a mutable raw pointer of a specific
+ /// type.
+ #[inline]
+ pub const fn as_mut_ptr_of<T>(self) -> *mut T {
+ self.0 as *mut T
+ }
+}
+
+/// Alias for [`PhysAddr::from_usize`].
+#[macro_export]
+macro_rules! pa {
+ ($addr:expr) => {
+ $crate::PhysAddr::from_usize($addr)
+ };
+}
+
+/// Alias for [`VirtAddr::from_usize`].
+#[macro_export]
+macro_rules! va {
+ ($addr:expr) => {
+ $crate::VirtAddr::from_usize($addr)
+ };
+}
+
+#[cfg(test)]
+mod test {
+ use core::mem::size_of;
+
+ use super::*;
+
+ def_usize_addr! {
+ /// An example address type.
+ pub type ExampleAddr;
+ /// Another example address type.
+ pub type AnotherAddr;
+ }
+
+ def_usize_addr_formatter! {
+ ExampleAddr = "EA:{}";
+ AnotherAddr = "AA:{}";
+ }
+
+ #[test]
+ fn test_addr() {
+ let addr = va!(0x2000);
+ assert!(addr.is_aligned_4k());
+ assert!(!addr.is_aligned(0x10000usize));
+ assert_eq!(addr.align_offset_4k(), 0);
+ assert_eq!(addr.align_down_4k(), va!(0x2000));
+ assert_eq!(addr.align_up_4k(), va!(0x2000));
+
+ let addr = va!(0x2fff);
+ assert!(!addr.is_aligned_4k());
+ assert_eq!(addr.align_offset_4k(), 0xfff);
+ assert_eq!(addr.align_down_4k(), va!(0x2000));
+ assert_eq!(addr.align_up_4k(), va!(0x3000));
+
+ let align = 0x100000;
+ let addr = va!(align * 5) + 0x2000;
+ assert!(addr.is_aligned_4k());
+ assert!(!addr.is_aligned(align));
+ assert_eq!(addr.align_offset(align), 0x2000);
+ assert_eq!(addr.align_down(align), va!(align * 5));
+ assert_eq!(addr.align_up(align), va!(align * 6));
+ }
+
+ #[test]
+ pub fn test_addr_convert_and_comparison() {
+ let example1 = ExampleAddr::from_usize(0x1234);
+ let example2 = ExampleAddr::from(0x5678);
+ let another1 = AnotherAddr::from_usize(0x9abc);
+ let another2 = AnotherAddr::from(0xdef0);
+
+ assert_eq!(example1.as_usize(), 0x1234);
+ assert_eq!(Into::<usize>::into(example2), 0x5678);
+ assert_eq!(Into::<usize>::into(another1), 0x9abc);
+ assert_eq!(another2.as_usize(), 0xdef0);
+
+ assert_eq!(example1, ExampleAddr::from(0x1234));
+ assert_eq!(example2, ExampleAddr::from_usize(0x5678));
+ assert_eq!(another1, AnotherAddr::from_usize(0x9abc));
+ assert_eq!(another2, AnotherAddr::from(0xdef0));
+
+ assert!(example1 < example2);
+ assert!(example1 <= example2);
+ assert!(example2 > example1);
+ assert!(example2 >= example1);
+ assert!(example1 != example2);
+ }
+
+ #[test]
+ pub fn test_addr_fmt() {
+ assert_eq!(format!("{:?}", ExampleAddr::from(0x1abc)), "EA:0x1abc");
+ assert_eq!(format!("{:x}", AnotherAddr::from(0x1abc)), "AA:0x1abc");
+ assert_eq!(format!("{:X}", ExampleAddr::from(0x1abc)), "EA:0x1ABC");
+ }
+
+ #[test]
+ pub fn test_alignment() {
+ let alignment = 0x1000usize;
+ let base = alignment * 2;
+ let offset = 0x123usize;
+ let addr = ExampleAddr::from_usize(base + offset);
+
+ assert_eq!(addr.align_down(alignment), ExampleAddr::from_usize(base));
+ assert_eq!(
+ addr.align_up(alignment),
+ ExampleAddr::from_usize(base + alignment)
+ );
+ assert_eq!(addr.align_offset(alignment), offset);
+ assert!(!addr.is_aligned(alignment));
+ assert!(ExampleAddr::from_usize(base).is_aligned(alignment));
+ assert_eq!(
+ ExampleAddr::from_usize(base).align_up(alignment),
+ ExampleAddr::from_usize(base)
+ );
+ }
+
+ #[test]
+ pub fn test_addr_arithmetic() {
+ let base = 0x1234usize;
+ let offset = 0x100usize;
+ let with_offset = base + offset;
+
+ let addr = ExampleAddr::from_usize(base);
+ let offset_addr = ExampleAddr::from_usize(with_offset);
+
+ assert_eq!(addr.offset(offset as isize), offset_addr);
+ assert_eq!(addr.wrapping_offset(offset as isize), offset_addr);
+ assert_eq!(offset_addr.offset_from(addr), offset as isize);
+ assert_eq!(addr.add(offset), offset_addr);
+ assert_eq!(addr.wrapping_add(offset), offset_addr);
+ assert_eq!(offset_addr.sub(offset), addr);
+ assert_eq!(offset_addr.wrapping_sub(offset), addr);
+ assert_eq!(offset_addr.sub_addr(addr), offset);
+ assert_eq!(offset_addr.wrapping_sub_addr(addr), offset);
+
+ assert_eq!(addr + offset, offset_addr);
+ assert_eq!(offset_addr - offset, addr);
+ assert_eq!(offset_addr - addr, offset);
+ }
+
+ #[test]
+ pub fn test_addr_wrapping_arithmetic() {
+ let base = usize::MAX - 0x100usize;
+ let offset = 0x200usize;
+ let with_offset = base.wrapping_add(offset);
+
+ let addr = ExampleAddr::from_usize(base);
+ let offset_addr = ExampleAddr::from_usize(with_offset);
+
+ assert_eq!(addr.wrapping_offset(offset as isize), offset_addr);
+ assert_eq!(offset_addr.wrapping_offset(-(offset as isize)), addr);
+ assert_eq!(addr.wrapping_add(offset), offset_addr);
+ assert_eq!(offset_addr.wrapping_sub(offset), addr);
+ assert_eq!(offset_addr.wrapping_sub_addr(addr), offset);
+ }
+
+ #[test]
+ pub fn test_addr_checked_arithmetic() {
+ let low_addr = ExampleAddr::from_usize(0x100usize);
+ let high_addr = ExampleAddr::from_usize(usize::MAX - 0x100usize);
+ let small_offset = 0x50usize;
+ let large_offset = 0x200usize;
+
+ assert_eq!(
+ low_addr.checked_sub(small_offset),
+ Some(low_addr.wrapping_sub(small_offset))
+ );
+ assert_eq!(low_addr.checked_sub(large_offset), None);
+ assert_eq!(
+ high_addr.checked_add(small_offset),
+ Some(high_addr.wrapping_add(small_offset))
+ );
+ assert_eq!(high_addr.checked_add(large_offset), None);
+
+ assert_eq!(
+ high_addr.checked_sub_addr(low_addr),
+ Some(usize::MAX - 0x200usize)
+ );
+ assert_eq!(low_addr.checked_sub_addr(high_addr), None);
+ }
+
+ #[test]
+ pub fn test_addr_overflowing_arithmetic() {
+ let low_addr = ExampleAddr::from_usize(0x100usize);
+ let high_addr = ExampleAddr::from_usize(usize::MAX - 0x100usize);
+ let small_offset = 0x50usize;
+ let large_offset = 0x200usize;
+
+ assert_eq!(
+ low_addr.overflowing_sub(small_offset),
+ (low_addr.wrapping_sub(small_offset), false)
+ );
+ assert_eq!(
+ low_addr.overflowing_sub(large_offset),
+ (low_addr.wrapping_sub(large_offset), true)
+ );
+ assert_eq!(
+ high_addr.overflowing_add(small_offset),
+ (high_addr.wrapping_add(small_offset), false)
+ );
+ assert_eq!(
+ high_addr.overflowing_add(large_offset),
+ (high_addr.wrapping_add(large_offset), true)
+ );
+ assert_eq!(
+ high_addr.overflowing_sub_addr(low_addr),
+ (high_addr.wrapping_sub_addr(low_addr), false)
+ );
+ assert_eq!(
+ low_addr.overflowing_sub_addr(high_addr),
+ (low_addr.wrapping_sub_addr(high_addr), true)
+ );
+ }
+
+ #[test]
+ #[should_panic]
+ pub fn test_addr_offset_overflow() {
+ let addr = ExampleAddr::from_usize(usize::MAX);
+ let _ = addr.offset(1);
+ }
+
+ #[test]
+ #[should_panic]
+ pub fn test_addr_offset_from_overflow() {
+ let addr = ExampleAddr::from_usize(usize::MAX);
+ let _ = addr.offset_from(ExampleAddr::from_usize(0));
+ }
+
+ #[test]
+ #[should_panic]
+ pub fn test_addr_offset_from_underflow() {
+ let addr = ExampleAddr::from_usize(0);
+ let _ = addr.offset_from(ExampleAddr::from_usize(usize::MAX));
+ }
+
+ #[test]
+ #[should_panic]
+ pub fn test_addr_add_overflow() {
+ let addr = ExampleAddr::from_usize(usize::MAX);
+ let _ = addr.add(1);
+ }
+
+ #[test]
+ #[should_panic]
+ pub fn test_addr_sub_underflow() {
+ let addr = ExampleAddr::from_usize(0);
+ let _ = addr.sub(1);
+ }
+
+ #[test]
+ #[should_panic]
+ pub fn test_addr_sub_addr_overflow() {
+ let addr = ExampleAddr::from_usize(0);
+ let _ = addr.sub_addr(ExampleAddr::from_usize(1));
+ }
+
+ #[test]
+ pub fn test_virt_addr_ptr() {
+ let a: [usize; 4] = [0x1234, 0x5678, 0x9abc, 0xdef0];
+
+ let va0 = VirtAddr::from_ptr_of(&a as *const usize);
+ let va1 = va0.add(size_of::<usize>());
+ let va2 = va1.add(size_of::<usize>());
+ let va3 = va2.add(size_of::<usize>());
+
+ let p0 = va0.as_ptr() as *const usize;
+ let p1 = va1.as_ptr_of::<usize>();
+ let p2 = va2.as_mut_ptr() as *mut usize;
+ let p3 = va3.as_mut_ptr_of::<usize>();
+
+ // testing conversion back to virt addr
+ assert_eq!(va0, VirtAddr::from_ptr_of(p0));
+ assert_eq!(va1, VirtAddr::from_ptr_of(p1));
+ assert_eq!(va2, VirtAddr::from_mut_ptr_of(p2));
+ assert_eq!(va3, VirtAddr::from_mut_ptr_of(p3));
+
+ // testing pointer read/write
+ assert!(unsafe { *p0 } == a[0]);
+ assert!(unsafe { *p1 } == a[1]);
+ assert!(unsafe { *p2 } == a[2]);
+ assert!(unsafe { *p3 } == a[3]);
+
+ unsafe {
+ *p2 = 0xdeadbeef;
+ }
+ unsafe {
+ *p3 = 0xcafebabe;
+ }
+ assert_eq!(a[2], 0xdeadbeef);
+ assert_eq!(a[3], 0xcafebabe);
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +
use crate::MemoryAddr;
+
+/// A page-by-page iterator.
+///
+/// The page size is specified by the generic parameter `PAGE_SIZE`, which must
+/// be a power of 2.
+///
+/// The address type is specified by the type parameter `A`.
+///
+/// # Examples
+///
+/// ```
+/// use memory_addr::PageIter;
+///
+/// let mut iter = PageIter::<0x1000, usize>::new(0x1000, 0x3000).unwrap();
+/// assert_eq!(iter.next(), Some(0x1000));
+/// assert_eq!(iter.next(), Some(0x2000));
+/// assert_eq!(iter.next(), None);
+///
+/// assert!(PageIter::<0x1000, usize>::new(0x1000, 0x3001).is_none());
+/// ```
+pub struct PageIter<const PAGE_SIZE: usize, A>
+where
+ A: MemoryAddr,
+{
+ start: A,
+ end: A,
+}
+
+impl<A, const PAGE_SIZE: usize> PageIter<PAGE_SIZE, A>
+where
+ A: MemoryAddr,
+{
+ /// Creates a new [`PageIter`].
+ ///
+ /// Returns `None` if `PAGE_SIZE` is not a power of 2, or `start` or `end`
+ /// is not page-aligned.
+ pub fn new(start: A, end: A) -> Option<Self> {
+ if !PAGE_SIZE.is_power_of_two()
+ || !start.is_aligned(PAGE_SIZE)
+ || !end.is_aligned(PAGE_SIZE)
+ {
+ None
+ } else {
+ Some(Self { start, end })
+ }
+ }
+}
+
+impl<A, const PAGE_SIZE: usize> Iterator for PageIter<PAGE_SIZE, A>
+where
+ A: MemoryAddr,
+{
+ type Item = A;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.start < self.end {
+ let ret = self.start;
+ self.start = self.start.add(PAGE_SIZE);
+ Some(ret)
+ } else {
+ None
+ }
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +
#![cfg_attr(not(test), no_std)]
+#![doc = include_str!("../README.md")]
+
+mod addr;
+mod iter;
+mod range;
+
+pub use self::addr::{MemoryAddr, PhysAddr, VirtAddr};
+pub use self::iter::PageIter;
+pub use self::range::{AddrRange, PhysAddrRange, VirtAddrRange};
+
+/// The size of a 4K page (4096 bytes).
+pub const PAGE_SIZE_4K: usize = 0x1000;
+
+/// A [`PageIter`] for 4K pages.
+pub type PageIter4K<A> = PageIter<PAGE_SIZE_4K, A>;
+
+/// Align address downwards.
+///
+/// Returns the greatest `x` with alignment `align` so that `x <= addr`.
+///
+/// The alignment must be a power of two.
+#[inline]
+pub const fn align_down(addr: usize, align: usize) -> usize {
+ addr & !(align - 1)
+}
+
+/// Align address upwards.
+///
+/// Returns the smallest `x` with alignment `align` so that `x >= addr`.
+///
+/// The alignment must be a power of two.
+#[inline]
+pub const fn align_up(addr: usize, align: usize) -> usize {
+ (addr + align - 1) & !(align - 1)
+}
+
+/// Returns the offset of the address within the alignment.
+///
+/// Equivalent to `addr % align`, but the alignment must be a power of two.
+#[inline]
+pub const fn align_offset(addr: usize, align: usize) -> usize {
+ addr & (align - 1)
+}
+
+/// Checks whether the address has the demanded alignment.
+///
+/// Equivalent to `addr % align == 0`, but the alignment must be a power of two.
+#[inline]
+pub const fn is_aligned(addr: usize, align: usize) -> bool {
+ align_offset(addr, align) == 0
+}
+
+/// Align address downwards to 4096 (bytes).
+#[inline]
+pub const fn align_down_4k(addr: usize) -> usize {
+ align_down(addr, PAGE_SIZE_4K)
+}
+
+/// Align address upwards to 4096 (bytes).
+#[inline]
+pub const fn align_up_4k(addr: usize) -> usize {
+ align_up(addr, PAGE_SIZE_4K)
+}
+
+/// Returns the offset of the address within a 4K-sized page.
+#[inline]
+pub const fn align_offset_4k(addr: usize) -> usize {
+ align_offset(addr, PAGE_SIZE_4K)
+}
+
+/// Checks whether the address is 4K-aligned.
+#[inline]
+pub const fn is_aligned_4k(addr: usize) -> bool {
+ is_aligned(addr, PAGE_SIZE_4K)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_align() {
+ assert_eq!(align_down(0x12345678, 0x1000), 0x12345000);
+ assert_eq!(align_up(0x12345678, 0x1000), 0x12346000);
+ assert_eq!(align_offset(0x12345678, 0x1000), 0x678);
+ assert!(is_aligned(0x12345000, 0x1000));
+ assert!(!is_aligned(0x12345678, 0x1000));
+
+ assert_eq!(align_down_4k(0x12345678), 0x12345000);
+ assert_eq!(align_up_4k(0x12345678), 0x12346000);
+ assert_eq!(align_offset_4k(0x12345678), 0x678);
+ assert!(is_aligned_4k(0x12345000));
+ assert!(!is_aligned_4k(0x12345678));
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +
use core::{fmt, ops::Range};
+
+use crate::{MemoryAddr, PhysAddr, VirtAddr};
+
+/// A range of a given memory address type `A`.
+///
+/// The range is inclusive on the start and exclusive on the end. A range is
+/// considered **empty** iff `start == end`, and **invalid** iff `start > end`.
+/// An invalid range should not be created and cannot be obtained without unsafe
+/// operations, calling methods on an invalid range will cause unexpected
+/// consequences.
+///
+/// # Example
+///
+/// ```
+/// use memory_addr::AddrRange;
+///
+/// let range = AddrRange::<usize>::new(0x1000, 0x2000);
+/// assert_eq!(range.start, 0x1000);
+/// assert_eq!(range.end, 0x2000);
+/// ```
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub struct AddrRange<A: MemoryAddr> {
+ /// The lower bound of the range (inclusive).
+ pub start: A,
+ /// The upper bound of the range (exclusive).
+ pub end: A,
+}
+
+/// Methods for [`AddrRange`].
+impl<A> AddrRange<A>
+where
+ A: MemoryAddr,
+{
+ /// Creates a new address range from the start and end addresses.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `start > end`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::AddrRange;
+ ///
+ /// let range = AddrRange::new(0x1000usize, 0x2000);
+ /// assert_eq!(range.start, 0x1000);
+ /// assert_eq!(range.end, 0x2000);
+ /// ```
+ ///
+ /// And this will panic:
+ ///
+ /// ```should_panic
+ /// # use memory_addr::AddrRange;
+ /// let _ = AddrRange::new(0x2000usize, 0x1000);
+ /// ```
+ #[inline]
+ pub fn new(start: A, end: A) -> Self {
+ assert!(
+ start <= end,
+ "invalid `AddrRange`: {}..{}",
+ start.into(),
+ end.into()
+ );
+ Self { start, end }
+ }
+
+ /// Creates a new address range from the given range.
+ ///
+ /// Returns `None` if `start > end`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::AddrRange;
+ ///
+ /// let range = AddrRange::try_new(0x1000usize, 0x2000).unwrap();
+ /// assert_eq!(range.start, 0x1000);
+ /// assert_eq!(range.end, 0x2000);
+ /// assert!(AddrRange::try_new(0x2000usize, 0x1000).is_none());
+ /// ```
+ #[inline]
+ pub fn try_new(start: A, end: A) -> Option<Self> {
+ if start <= end {
+ Some(Self { start, end })
+ } else {
+ None
+ }
+ }
+
+ /// Creates a new address range from the given range without checking the
+ /// validity.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `start <= end`, otherwise the range will be
+ /// invalid and unexpected consequences will occur.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::AddrRange;
+ ///
+ /// let range = unsafe { AddrRange::new_unchecked(0x1000usize, 0x2000) };
+ /// assert_eq!(range.start, 0x1000);
+ /// assert_eq!(range.end, 0x2000);
+ /// ```
+ #[inline]
+ pub const unsafe fn new_unchecked(start: A, end: A) -> Self {
+ Self { start, end }
+ }
+
+ /// Creates a new address range from the start address and the size.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `size` is too large and causes overflow during evaluating the
+ /// end address.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::AddrRange;
+ ///
+ /// let range = AddrRange::from_start_size(0x1000usize, 0x1000);
+ /// assert_eq!(range.start, 0x1000);
+ /// assert_eq!(range.end, 0x2000);
+ /// ```
+ ///
+ /// And this will panic:
+ ///
+ /// ```should_panic
+ /// # use memory_addr::AddrRange;
+ /// let _ = AddrRange::from_start_size(0x1000usize, usize::MAX);
+ /// ```
+ #[inline]
+ pub fn from_start_size(start: A, size: usize) -> Self {
+ if let Some(end) = start.checked_add(size) {
+ Self { start, end }
+ } else {
+ panic!(
+ "size too large for `AddrRange`: {} + {}",
+ start.into(),
+ size
+ );
+ }
+ }
+
+ /// Creates a new address range from the start address and the size.
+ ///
+ /// Returns `None` if `size` is too large and causes overflow during
+ /// evaluating the end address.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::AddrRange;
+ ///
+ /// let range = AddrRange::try_from_start_size(0x1000usize, 0x1000).unwrap();
+ /// assert_eq!(range.start, 0x1000);
+ /// assert_eq!(range.end, 0x2000);
+ /// assert!(AddrRange::try_from_start_size(0x1000usize, usize::MAX).is_none());
+ /// ```
+ #[inline]
+ pub fn try_from_start_size(start: A, size: usize) -> Option<Self> {
+ start.checked_add(size).map(|end| Self { start, end })
+ }
+
+ /// Creates a new address range from the start address and the size without
+ /// checking the validity.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `size` is not too large and won't cause
+ /// overflow during evaluating the end address. Failing to do so will
+ /// create an invalid range and cause unexpected consequences.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::AddrRange;
+ ///
+ /// let range = unsafe { AddrRange::from_start_size_unchecked(0x1000usize, 0x1000) };
+ /// assert_eq!(range.start, 0x1000);
+ /// assert_eq!(range.end, 0x2000);
+ /// ```
+ #[inline]
+ pub unsafe fn from_start_size_unchecked(start: A, size: usize) -> Self {
+ Self {
+ start,
+ end: start.wrapping_add(size),
+ }
+ }
+
+ /// Returns `true` if the range is empty.
+ ///
+ /// It's also guaranteed that `false` will be returned if the range is
+ /// invalid (i.e., `start > end`).
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::AddrRange;
+ ///
+ /// assert!(AddrRange::new(0x1000usize, 0x1000).is_empty());
+ /// assert!(!AddrRange::new(0x1000usize, 0x2000).is_empty());
+ /// ```
+ #[inline]
+ pub fn is_empty(self) -> bool {
+ self.start >= self.end
+ }
+
+ /// Returns the size of the range.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::AddrRange;
+ ///
+ /// assert_eq!(AddrRange::new(0x1000usize, 0x1000).size(), 0);
+ /// assert_eq!(AddrRange::new(0x1000usize, 0x2000).size(), 0x1000);
+ /// ```
+ #[inline]
+ pub fn size(self) -> usize {
+ self.end.wrapping_sub_addr(self.start)
+ }
+
+ /// Checks if the range contains the given address.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::AddrRange;
+ ///
+ /// let range = AddrRange::new(0x1000usize, 0x2000);
+ /// assert!(!range.contains(0x0fff));
+ /// assert!(range.contains(0x1000));
+ /// assert!(range.contains(0x1fff));
+ /// assert!(!range.contains(0x2000));
+ /// ```
+ #[inline]
+ pub fn contains(self, addr: A) -> bool {
+ self.start <= addr && addr < self.end
+ }
+
+ /// Checks if the range contains the given address range.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::{addr_range, AddrRange};
+ ///
+ /// let range = AddrRange::new(0x1000usize, 0x2000);
+ /// assert!(!range.contains_range(addr_range!(0x0usize..0xfff)));
+ /// assert!(!range.contains_range(addr_range!(0x0fffusize..0x1fff)));
+ /// assert!(range.contains_range(addr_range!(0x1001usize..0x1fff)));
+ /// assert!(range.contains_range(addr_range!(0x1000usize..0x2000)));
+ /// assert!(!range.contains_range(addr_range!(0x1001usize..0x2001)));
+ /// assert!(!range.contains_range(addr_range!(0x2001usize..0x3001)));
+ /// ```
+ #[inline]
+ pub fn contains_range(self, other: Self) -> bool {
+ self.start <= other.start && other.end <= self.end
+ }
+
+ /// Checks if the range is contained in the given address range.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::{addr_range, AddrRange};
+ ///
+ /// let range = AddrRange::new(0x1000usize, 0x2000);
+ /// assert!(!range.contained_in(addr_range!(0xfffusize..0x1fff)));
+ /// assert!(!range.contained_in(addr_range!(0x1001usize..0x2001)));
+ /// assert!(range.contained_in(addr_range!(0xfffusize..0x2001)));
+ /// assert!(range.contained_in(addr_range!(0x1000usize..0x2000)));
+ /// ```
+ #[inline]
+ pub fn contained_in(self, other: Self) -> bool {
+ other.contains_range(self)
+ }
+
+ /// Checks if the range overlaps with the given address range.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memory_addr::{addr_range, AddrRange};
+ ///
+ /// let range = AddrRange::new(0x1000usize, 0x2000usize);
+ /// assert!(!range.overlaps(addr_range!(0xfffusize..0xfff)));
+ /// assert!(!range.overlaps(addr_range!(0x2000usize..0x2000)));
+ /// assert!(!range.overlaps(addr_range!(0xfffusize..0x1000)));
+ /// assert!(range.overlaps(addr_range!(0xfffusize..0x1001)));
+ /// assert!(range.overlaps(addr_range!(0x1fffusize..0x2001)));
+ /// assert!(range.overlaps(addr_range!(0xfffusize..0x2001)));
+ /// ```
+ #[inline]
+ pub fn overlaps(self, other: Self) -> bool {
+ self.start < other.end && other.start < self.end
+ }
+}
+
+/// Conversion from [`Range`] to [`AddrRange`], provided that the type of the
+/// endpoints can be converted to the address type `A`.
+impl<A, T> TryFrom<Range<T>> for AddrRange<A>
+where
+ A: MemoryAddr + From<T>,
+{
+ type Error = ();
+
+ #[inline]
+ fn try_from(range: Range<T>) -> Result<Self, Self::Error> {
+ Self::try_new(range.start.into(), range.end.into()).ok_or(())
+ }
+}
+
+/// Implementations of [`Default`] for [`AddrRange`].
+///
+/// The default value is an empty range `Range { start: 0, end: 0 }`.
+impl<A> Default for AddrRange<A>
+where
+ A: MemoryAddr,
+{
+ #[inline]
+ fn default() -> Self {
+ Self {
+ start: 0.into(),
+ end: 0.into(),
+ }
+ }
+}
+
+/// Implementations of [`Debug`](fmt::Debug) for [`AddrRange`].
+impl<A> fmt::Debug for AddrRange<A>
+where
+ A: MemoryAddr + fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{:?}..{:?}", self.start, self.end)
+ }
+}
+
+/// Implementations of [`LowerHex`](fmt::LowerHex) for [`AddrRange`].
+impl<A> fmt::LowerHex for AddrRange<A>
+where
+ A: MemoryAddr + fmt::LowerHex,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{:x}..{:x}", self.start, self.end)
+ }
+}
+
+/// Implementations of [`UpperHex`](fmt::UpperHex) for [`AddrRange`].
+impl<A> fmt::UpperHex for AddrRange<A>
+where
+ A: MemoryAddr + fmt::UpperHex,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{:X}..{:X}", self.start, self.end)
+ }
+}
+
+/// A range of virtual addresses [`VirtAddr`].
+pub type VirtAddrRange = AddrRange<VirtAddr>;
+/// A range of physical addresses [`PhysAddr`].
+pub type PhysAddrRange = AddrRange<PhysAddr>;
+
+/// Converts the given range expression into [`AddrRange`]. Panics if the range
+/// is invalid.
+///
+/// The concrete address type is inferred from the context.
+///
+/// # Example
+///
+/// ```
+/// use memory_addr::{addr_range, AddrRange};
+///
+/// let range: AddrRange<usize> = addr_range!(0x1000usize..0x2000);
+/// assert_eq!(range.start, 0x1000usize);
+/// assert_eq!(range.end, 0x2000usize);
+/// ```
+///
+/// And this will panic:
+///
+/// ```should_panic
+/// # use memory_addr::{addr_range, AddrRange};
+/// let _: AddrRange<usize> = addr_range!(0x2000usize..0x1000);
+/// ```
+#[macro_export]
+macro_rules! addr_range {
+ ($range:expr) => {
+ $crate::AddrRange::try_from($range).expect("invalid address range in `addr_range!`")
+ };
+}
+
+/// Converts the given range expression into [`VirtAddrRange`]. Panics if the
+/// range is invalid.
+///
+/// # Example
+///
+/// ```
+/// use memory_addr::va_range;
+///
+/// let range = va_range!(0x1000..0x2000);
+/// assert_eq!(range.start, 0x1000.into());
+/// assert_eq!(range.end, 0x2000.into());
+/// ```
+///
+/// And this will panic:
+///
+/// ```should_panic
+/// # use memory_addr::va_range;
+/// let _ = va_range!(0x2000..0x1000);
+/// ```
+#[macro_export]
+macro_rules! va_range {
+ ($range:expr) => {
+ $crate::VirtAddrRange::try_from($range).expect("invalid address range in `va_range!`")
+ };
+}
+
+/// Converts the given range expression into [`PhysAddrRange`]. Panics if the
+/// range is invalid.
+///
+/// # Example
+///
+/// ```
+/// use memory_addr::pa_range;
+///
+/// let range = pa_range!(0x1000..0x2000);
+/// assert_eq!(range.start, 0x1000.into());
+/// assert_eq!(range.end, 0x2000.into());
+/// ```
+///
+/// And this will panic:
+///
+/// ```should_panic
+/// # use memory_addr::pa_range;
+/// let _ = pa_range!(0x2000..0x1000);
+/// ```
+#[macro_export]
+macro_rules! pa_range {
+ ($range:expr) => {
+ $crate::PhysAddrRange::try_from($range).expect("invalid address range in `pa_range!`")
+ };
+}
+
+#[cfg(test)]
+mod test {
+ use crate::{va, va_range, VirtAddrRange};
+
+ #[test]
+ fn test_range_format() {
+ let range = va_range!(0xfec000..0xfff000usize);
+
+ assert_eq!(format!("{:?}", range), "VA:0xfec000..VA:0xfff000");
+ assert_eq!(format!("{:x}", range), "VA:0xfec000..VA:0xfff000");
+ assert_eq!(format!("{:X}", range), "VA:0xFEC000..VA:0xFFF000");
+ }
+
+ #[test]
+ fn test_range() {
+ let start = va!(0x1000);
+ let end = va!(0x2000);
+ let range = va_range!(start..end);
+
+ println!("range: {:?}", range);
+
+ assert!((0x1000..0x1000).is_empty());
+ assert!((0x1000..0xfff).is_empty());
+ assert!(!range.is_empty());
+
+ assert_eq!(range.start, start);
+ assert_eq!(range.end, end);
+ assert_eq!(range.size(), 0x1000);
+
+ assert!(range.contains(va!(0x1000)));
+ assert!(range.contains(va!(0x1080)));
+ assert!(!range.contains(va!(0x2000)));
+
+ assert!(!range.contains_range(addr_range!(0xfff..0x1fff)));
+ assert!(!range.contains_range(addr_range!(0xfff..0x2000)));
+ assert!(!range.contains_range(va_range!(0xfff..0x2001))); // test both `va_range!` and `addr_range!`
+ assert!(range.contains_range(va_range!(0x1000..0x1fff)));
+ assert!(range.contains_range(addr_range!(0x1000..0x2000)));
+ assert!(!range.contains_range(addr_range!(0x1000..0x2001)));
+ assert!(range.contains_range(va_range!(0x1001..0x1fff)));
+ assert!(range.contains_range(va_range!(0x1001..0x2000)));
+ assert!(!range.contains_range(va_range!(0x1001..0x2001)));
+ assert!(!range.contains_range(VirtAddrRange::from_start_size(0xfff.into(), 0x1)));
+ assert!(!range.contains_range(VirtAddrRange::from_start_size(0x2000.into(), 0x1)));
+
+ assert!(range.contained_in(addr_range!(0xfff..0x2000)));
+ assert!(range.contained_in(addr_range!(0x1000..0x2000)));
+ assert!(range.contained_in(va_range!(0x1000..0x2001)));
+
+ assert!(!range.overlaps(addr_range!(0x800..0x1000)));
+ assert!(range.overlaps(addr_range!(0x800..0x1001)));
+ assert!(range.overlaps(addr_range!(0x1800..0x2000)));
+ assert!(range.overlaps(va_range!(0x1800..0x2001)));
+ assert!(!range.overlaps(va_range!(0x2000..0x2800)));
+ assert!(range.overlaps(va_range!(0xfff..0x2001)));
+
+ let default_range: VirtAddrRange = Default::default();
+ assert!(default_range.is_empty());
+ assert_eq!(default_range.size(), 0);
+ assert_eq!(default_range.start, va!(0));
+ assert_eq!(default_range.end, va!(0));
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +
use core::fmt;
+
+use memory_addr::{AddrRange, MemoryAddr};
+
+use crate::{MappingBackend, MappingError, MappingResult};
+
+/// A memory area represents a continuous range of virtual memory with the same
+/// flags.
+///
+/// The target physical memory frames are determined by [`MappingBackend`] and
+/// may not be contiguous.
+pub struct MemoryArea<B: MappingBackend> {
+ va_range: AddrRange<B::Addr>,
+ flags: B::Flags,
+ backend: B,
+}
+
+impl<B: MappingBackend> MemoryArea<B> {
+ /// Creates a new memory area.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `start + size` overflows.
+ pub fn new(start: B::Addr, size: usize, flags: B::Flags, backend: B) -> Self {
+ Self {
+ va_range: AddrRange::from_start_size(start, size),
+ flags,
+ backend,
+ }
+ }
+
+ /// Returns the virtual address range.
+ pub const fn va_range(&self) -> AddrRange<B::Addr> {
+ self.va_range
+ }
+
+ /// Returns the memory flags, e.g., the permission bits.
+ pub const fn flags(&self) -> B::Flags {
+ self.flags
+ }
+
+ /// Returns the start address of the memory area.
+ pub const fn start(&self) -> B::Addr {
+ self.va_range.start
+ }
+
+ /// Returns the end address of the memory area.
+ pub const fn end(&self) -> B::Addr {
+ self.va_range.end
+ }
+
+ /// Returns the size of the memory area.
+ pub fn size(&self) -> usize {
+ self.va_range.size()
+ }
+
+ /// Returns the mapping backend of the memory area.
+ pub const fn backend(&self) -> &B {
+ &self.backend
+ }
+}
+
+impl<B: MappingBackend> MemoryArea<B> {
+ /// Changes the flags.
+ pub(crate) fn set_flags(&mut self, new_flags: B::Flags) {
+ self.flags = new_flags;
+ }
+
+ /// Changes the end address of the memory area.
+ pub(crate) fn set_end(&mut self, new_end: B::Addr) {
+ self.va_range.end = new_end;
+ }
+
+ /// Maps the whole memory area in the page table.
+ pub(crate) fn map_area(&self, page_table: &mut B::PageTable) -> MappingResult {
+ self.backend
+ .map(self.start(), self.size(), self.flags, page_table)
+ .then_some(())
+ .ok_or(MappingError::BadState)
+ }
+
+ /// Unmaps the whole memory area in the page table.
+ pub(crate) fn unmap_area(&self, page_table: &mut B::PageTable) -> MappingResult {
+ self.backend
+ .unmap(self.start(), self.size(), page_table)
+ .then_some(())
+ .ok_or(MappingError::BadState)
+ }
+
+ /// Changes the flags in the page table.
+ pub(crate) fn protect_area(
+ &mut self,
+ new_flags: B::Flags,
+ page_table: &mut B::PageTable,
+ ) -> MappingResult {
+ self.backend
+ .protect(self.start(), self.size(), new_flags, page_table);
+ Ok(())
+ }
+
+ /// Shrinks the memory area at the left side.
+ ///
+ /// The start address of the memory area is increased by `new_size`. The
+ /// shrunk part is unmapped.
+ ///
+ /// `new_size` must be greater than 0 and less than the current size.
+ pub(crate) fn shrink_left(
+ &mut self,
+ new_size: usize,
+ page_table: &mut B::PageTable,
+ ) -> MappingResult {
+ assert!(new_size > 0 && new_size < self.size());
+
+ let old_size = self.size();
+ let unmap_size = old_size - new_size;
+
+ if !self.backend.unmap(self.start(), unmap_size, page_table) {
+ return Err(MappingError::BadState);
+ }
+ // Use wrapping_add to avoid overflow check.
+ // Safety: `unmap_size` is less than the current size, so it will never
+ // overflow.
+ self.va_range.start = self.va_range.start.wrapping_add(unmap_size);
+ Ok(())
+ }
+
+ /// Shrinks the memory area at the right side.
+ ///
+ /// The end address of the memory area is decreased by `new_size`. The
+ /// shrunk part is unmapped.
+ ///
+ /// `new_size` must be greater than 0 and less than the current size.
+ pub(crate) fn shrink_right(
+ &mut self,
+ new_size: usize,
+ page_table: &mut B::PageTable,
+ ) -> MappingResult {
+ assert!(new_size > 0 && new_size < self.size());
+ let old_size = self.size();
+ let unmap_size = old_size - new_size;
+
+ // Use wrapping_add to avoid overflow check.
+ // Safety: `new_size` is less than the current size, so it will never overflow.
+ let unmap_start = self.start().wrapping_add(new_size);
+
+ if !self.backend.unmap(unmap_start, unmap_size, page_table) {
+ return Err(MappingError::BadState);
+ }
+
+ // Use wrapping_sub to avoid overflow check, same as above.
+ self.va_range.end = self.va_range.end.wrapping_sub(unmap_size);
+ Ok(())
+ }
+
+ /// Splits the memory area at the given position.
+ ///
+ /// The original memory area is shrunk to the left part, and the right part
+ /// is returned.
+ ///
+ /// Returns `None` if the given position is not in the memory area, or one
+ /// of the parts is empty after splitting.
+ pub(crate) fn split(&mut self, pos: B::Addr) -> Option<Self> {
+ if self.start() < pos && pos < self.end() {
+ let new_area = Self::new(
+ pos,
+ // Use wrapping_sub_addr to avoid overflow check. It is safe because
+ // `pos` is within the memory area.
+ self.end().wrapping_sub_addr(pos),
+ self.flags,
+ self.backend.clone(),
+ );
+ self.va_range.end = pos;
+ Some(new_area)
+ } else {
+ None
+ }
+ }
+}
+
+impl<B: MappingBackend> fmt::Debug for MemoryArea<B>
+where
+ B::Addr: fmt::Debug,
+ B::Flags: fmt::Debug + Copy,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("MemoryArea")
+ .field("va_range", &self.va_range)
+ .field("flags", &self.flags)
+ .finish()
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +
use memory_addr::MemoryAddr;
+
+/// Underlying operations to do when manipulating mappings within the specific
+/// [`MemoryArea`](crate::MemoryArea).
+///
+/// The backend can be different for different memory areas. e.g., for linear
+/// mappings, the target physical address is known when it is added to the page
+/// table. For lazy mappings, an empty mapping needs to be added to the page
+/// table to trigger a page fault.
+pub trait MappingBackend: Clone {
+ /// The address type used in the memory area.
+ type Addr: MemoryAddr;
+ /// The flags type used in the memory area.
+ type Flags: Copy;
+ /// The page table type used in the memory area.
+ type PageTable;
+
+ /// What to do when mapping a region within the area with the given flags.
+ fn map(
+ &self,
+ start: Self::Addr,
+ size: usize,
+ flags: Self::Flags,
+ page_table: &mut Self::PageTable,
+ ) -> bool;
+
+ /// What to do when unmaping a memory region within the area.
+ fn unmap(&self, start: Self::Addr, size: usize, page_table: &mut Self::PageTable) -> bool;
+
+ /// What to do when changing access flags.
+ fn protect(
+ &self,
+ start: Self::Addr,
+ size: usize,
+ new_flags: Self::Flags,
+ page_table: &mut Self::PageTable,
+ ) -> bool;
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +
#![cfg_attr(not(test), no_std)]
+#![doc = include_str!("../README.md")]
+
+extern crate alloc;
+
+mod area;
+mod backend;
+mod set;
+
+#[cfg(test)]
+mod tests;
+
+pub use self::area::MemoryArea;
+pub use self::backend::MappingBackend;
+pub use self::set::MemorySet;
+
+/// Error type for memory mapping operations.
+#[derive(Debug, Eq, PartialEq)]
+pub enum MappingError {
+ /// Invalid parameter (e.g., `addr`, `size`, `flags`, etc.)
+ InvalidParam,
+ /// The given range overlaps with an existing mapping.
+ AlreadyExists,
+ /// The backend page table is in a bad state.
+ BadState,
+}
+
+/// A [`Result`] type with [`MappingError`] as the error type.
+pub type MappingResult<T = ()> = Result<T, MappingError>;
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +
use alloc::collections::BTreeMap;
+#[allow(unused_imports)] // this is a weird false alarm
+use alloc::vec::Vec;
+use core::fmt;
+
+use memory_addr::{AddrRange, MemoryAddr};
+
+use crate::{MappingBackend, MappingError, MappingResult, MemoryArea};
+
+/// A container that maintains memory mappings ([`MemoryArea`]).
+pub struct MemorySet<B: MappingBackend> {
+ areas: BTreeMap<B::Addr, MemoryArea<B>>,
+}
+
+impl<B: MappingBackend> MemorySet<B> {
+ /// Creates a new memory set.
+ pub const fn new() -> Self {
+ Self {
+ areas: BTreeMap::new(),
+ }
+ }
+
+ /// Returns the number of memory areas in the memory set.
+ pub fn len(&self) -> usize {
+ self.areas.len()
+ }
+
+ /// Returns `true` if the memory set contains no memory areas.
+ pub fn is_empty(&self) -> bool {
+ self.areas.is_empty()
+ }
+
+ /// Returns the iterator over all memory areas.
+ pub fn iter(&self) -> impl Iterator<Item = &MemoryArea<B>> {
+ self.areas.values()
+ }
+
+ /// Returns whether the given address range overlaps with any existing area.
+ pub fn overlaps(&self, range: AddrRange<B::Addr>) -> bool {
+ if let Some((_, before)) = self.areas.range(..range.start).last() {
+ if before.va_range().overlaps(range) {
+ return true;
+ }
+ }
+ if let Some((_, after)) = self.areas.range(range.start..).next() {
+ if after.va_range().overlaps(range) {
+ return true;
+ }
+ }
+ false
+ }
+
+ /// Finds the memory area that contains the given address.
+ pub fn find(&self, addr: B::Addr) -> Option<&MemoryArea<B>> {
+ let candidate = self.areas.range(..=addr).last().map(|(_, a)| a);
+ candidate.filter(|a| a.va_range().contains(addr))
+ }
+
+ /// Finds a free area that can accommodate the given size.
+ ///
+ /// The search starts from the given `hint` address, and the area should be
+ /// within the given `limit` range.
+ ///
+ /// Returns the start address of the free area. Returns `None` if no such
+ /// area is found.
+ pub fn find_free_area(
+ &self,
+ hint: B::Addr,
+ size: usize,
+ limit: AddrRange<B::Addr>,
+ ) -> Option<B::Addr> {
+ // brute force: try each area's end address as the start.
+ let mut last_end = hint.max(limit.start);
+ for (&addr, area) in self.areas.iter() {
+ if last_end.checked_add(size).is_some_and(|end| end <= addr) {
+ return Some(last_end);
+ }
+ last_end = area.end();
+ }
+ if last_end
+ .checked_add(size)
+ .is_some_and(|end| end <= limit.end)
+ {
+ Some(last_end)
+ } else {
+ None
+ }
+ }
+
+ /// Add a new memory mapping.
+ ///
+ /// The mapping is represented by a [`MemoryArea`].
+ ///
+ /// If the new area overlaps with any existing area, the behavior is
+ /// determined by the `unmap_overlap` parameter. If it is `true`, the
+ /// overlapped regions will be unmapped first. Otherwise, it returns an
+ /// error.
+ pub fn map(
+ &mut self,
+ area: MemoryArea<B>,
+ page_table: &mut B::PageTable,
+ unmap_overlap: bool,
+ ) -> MappingResult {
+ if area.va_range().is_empty() {
+ return Err(MappingError::InvalidParam);
+ }
+
+ if self.overlaps(area.va_range()) {
+ if unmap_overlap {
+ self.unmap(area.start(), area.size(), page_table)?;
+ } else {
+ return Err(MappingError::AlreadyExists);
+ }
+ }
+
+ area.map_area(page_table)?;
+ assert!(self.areas.insert(area.start(), area).is_none());
+ Ok(())
+ }
+
+ /// Remove memory mappings within the given address range.
+ ///
+ /// All memory areas that are fully contained in the range will be removed
+ /// directly. If the area intersects with the boundary, it will be shrinked.
+ /// If the unmapped range is in the middle of an existing area, it will be
+ /// split into two areas.
+ pub fn unmap(
+ &mut self,
+ start: B::Addr,
+ size: usize,
+ page_table: &mut B::PageTable,
+ ) -> MappingResult {
+ let range =
+ AddrRange::try_from_start_size(start, size).ok_or(MappingError::InvalidParam)?;
+ if range.is_empty() {
+ return Ok(());
+ }
+
+ let end = range.end;
+
+ // Unmap entire areas that are contained by the range.
+ self.areas.retain(|_, area| {
+ if area.va_range().contained_in(range) {
+ area.unmap_area(page_table).unwrap();
+ false
+ } else {
+ true
+ }
+ });
+
+ // Shrink right if the area intersects with the left boundary.
+ if let Some((&before_start, before)) = self.areas.range_mut(..start).last() {
+ let before_end = before.end();
+ if before_end > start {
+ if before_end <= end {
+ // the unmapped area is at the end of `before`.
+ before.shrink_right(start.sub_addr(before_start), page_table)?;
+ } else {
+ // the unmapped area is in the middle `before`, need to split.
+ let right_part = before.split(end).unwrap();
+ before.shrink_right(start.sub_addr(before_start), page_table)?;
+ assert_eq!(right_part.start().into(), Into::<usize>::into(end));
+ self.areas.insert(end, right_part);
+ }
+ }
+ }
+
+ // Shrink left if the area intersects with the right boundary.
+ if let Some((&after_start, after)) = self.areas.range_mut(start..).next() {
+ let after_end = after.end();
+ if after_start < end {
+ // the unmapped area is at the start of `after`.
+ let mut new_area = self.areas.remove(&after_start).unwrap();
+ new_area.shrink_left(after_end.sub_addr(end), page_table)?;
+ assert_eq!(new_area.start().into(), Into::<usize>::into(end));
+ self.areas.insert(end, new_area);
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Remove all memory areas and the underlying mappings.
+ pub fn clear(&mut self, page_table: &mut B::PageTable) -> MappingResult {
+ for (_, area) in self.areas.iter() {
+ area.unmap_area(page_table)?;
+ }
+ self.areas.clear();
+ Ok(())
+ }
+
+ /// Change the flags of memory mappings within the given address range.
+ ///
+ /// `update_flags` is a function that receives old flags and processes
+ /// new flags (e.g., some flags can not be changed through this interface).
+ /// It returns [`None`] if there is no bit to change.
+ ///
+ /// Memory areas will be skipped according to `update_flags`. Memory areas
+ /// that are fully contained in the range or contains the range or
+ /// intersects with the boundary will be handled similarly to `munmap`.
+ pub fn protect(
+ &mut self,
+ start: B::Addr,
+ size: usize,
+ update_flags: impl Fn(B::Flags) -> Option<B::Flags>,
+ page_table: &mut B::PageTable,
+ ) -> MappingResult {
+ let end = start.checked_add(size).ok_or(MappingError::InvalidParam)?;
+ let mut to_insert = Vec::new();
+ for (&area_start, area) in self.areas.iter_mut() {
+ let area_end = area.end();
+
+ if let Some(new_flags) = update_flags(area.flags()) {
+ if area_start >= end {
+ // [ prot ]
+ // [ area ]
+ break;
+ } else if area_end <= start {
+ // [ prot ]
+ // [ area ]
+ // Do nothing
+ } else if area_start >= start && area_end <= end {
+ // [ prot ]
+ // [ area ]
+ area.protect_area(new_flags, page_table)?;
+ area.set_flags(new_flags);
+ } else if area_start < start && area_end > end {
+ // [ prot ]
+ // [ left | area | right ]
+ let right_part = area.split(end).unwrap();
+ area.set_end(start);
+
+ let mut middle_part =
+ MemoryArea::new(start, size, area.flags(), area.backend().clone());
+ middle_part.protect_area(new_flags, page_table)?;
+ middle_part.set_flags(new_flags);
+
+ to_insert.push((right_part.start(), right_part));
+ to_insert.push((middle_part.start(), middle_part));
+ } else if area_end > end {
+ // [ prot ]
+ // [ area | right ]
+ let right_part = area.split(end).unwrap();
+ area.protect_area(new_flags, page_table)?;
+ area.set_flags(new_flags);
+
+ to_insert.push((right_part.start(), right_part));
+ } else {
+ // [ prot ]
+ // [ left | area ]
+ let mut right_part = area.split(start).unwrap();
+ right_part.protect_area(new_flags, page_table)?;
+ right_part.set_flags(new_flags);
+
+ to_insert.push((right_part.start(), right_part));
+ }
+ }
+ }
+ self.areas.extend(to_insert);
+ Ok(())
+ }
+}
+
+impl<B: MappingBackend> fmt::Debug for MemorySet<B>
+where
+ B::Addr: fmt::Debug,
+ B::Flags: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_list().entries(self.areas.values()).finish()
+ }
+}
+
fn:
) to \
+ restrict the search to a given item kind.","Accepted kinds are: fn
, mod
, struct
, \
+ enum
, trait
, type
, macro
, \
+ and const
.","Search functions by type signature (e.g., vec -> usize
or \
+ -> vec
or String, enum:Cow -> bool
)","You can look for items with an exact name by putting double quotes around \
+ your request: \"string\"
","Look for functions that accept or return \
+ slices and \
+ arrays by writing \
+ square brackets (e.g., -> [u8]
or [] -> Option
)","Look for items inside another one by searching for a path: vec::Vec
",].map(x=>""+x+"
").join("");const div_infos=document.createElement("div");addClass(div_infos,"infos");div_infos.innerHTML="${value.replaceAll(" ", " ")}
`}else{error[index]=value}});output+=`Takes each element in the Iterator
: if it is an Err
, no further\nelements are taken, and the Err
is returned. Should no Err
occur, a\ncontainer with the values of each Result
is returned.
Here is an example which increments every integer in a vector,\nchecking for overflow:
\n\nlet v = vec![1, 2];\nlet res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32|\n x.checked_add(1).ok_or(\"Overflow!\")\n).collect();\nassert_eq!(res, Ok(vec![2, 3]));
Here is another example that tries to subtract one from another list\nof integers, this time checking for underflow:
\n\nlet v = vec![1, 2, 0];\nlet res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32|\n x.checked_sub(1).ok_or(\"Underflow!\")\n).collect();\nassert_eq!(res, Err(\"Underflow!\"));
Here is a variation on the previous example, showing that no\nfurther elements are taken from iter
after the first Err
.
let v = vec![3, 2, 1, 10];\nlet mut shared = 0;\nlet res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32| {\n shared += x;\n x.checked_sub(2).ok_or(\"Underflow!\")\n}).collect();\nassert_eq!(res, Err(\"Underflow!\"));\nassert_eq!(shared, 6);
Since the third element caused an underflow, no further elements were taken,\nso the final value of shared
is 6 (= 3 + 2 + 1
), not 16.
try_trait_v2
)Residual
type. Read moreReturns a consuming iterator over the possibly contained value.
\nThe iterator yields one value if the result is Result::Ok
, otherwise none.
let x: Result<u32, &str> = Ok(5);\nlet v: Vec<u32> = x.into_iter().collect();\nassert_eq!(v, [5]);\n\nlet x: Result<u32, &str> = Err(\"nothing!\");\nlet v: Vec<u32> = x.into_iter().collect();\nassert_eq!(v, []);
Takes each element in the Iterator
: if it is an Err
, no further\nelements are taken, and the Err
is returned. Should no Err
\noccur, the product of all elements is returned.
This multiplies each number in a vector of strings,\nif a string could not be parsed the operation returns Err
:
let nums = vec![\"5\", \"10\", \"1\", \"2\"];\nlet total: Result<usize, _> = nums.iter().map(|w| w.parse::<usize>()).product();\nassert_eq!(total, Ok(100));\nlet nums = vec![\"5\", \"10\", \"one\", \"2\"];\nlet total: Result<usize, _> = nums.iter().map(|w| w.parse::<usize>()).product();\nassert!(total.is_err());
Maps a Result<&mut T, E>
to a Result<T, E>
by copying the contents of the\nOk
part.
let mut val = 12;\nlet x: Result<&mut i32, i32> = Ok(&mut val);\nassert_eq!(x, Ok(&mut 12));\nlet copied = x.copied();\nassert_eq!(copied, Ok(12));
Maps a Result<&mut T, E>
to a Result<T, E>
by cloning the contents of the\nOk
part.
let mut val = 12;\nlet x: Result<&mut i32, i32> = Ok(&mut val);\nassert_eq!(x, Ok(&mut 12));\nlet cloned = x.cloned();\nassert_eq!(cloned, Ok(12));
Transposes a Result
of an Option
into an Option
of a Result
.
Ok(None)
will be mapped to None
.\nOk(Some(_))
and Err(_)
will be mapped to Some(Ok(_))
and Some(Err(_))
.
#[derive(Debug, Eq, PartialEq)]\nstruct SomeErr;\n\nlet x: Result<Option<i32>, SomeErr> = Ok(Some(5));\nlet y: Option<Result<i32, SomeErr>> = Some(Ok(5));\nassert_eq!(x.transpose(), y);
result_flattening
)Converts from Result<Result<T, E>, E>
to Result<T, E>
#![feature(result_flattening)]\nlet x: Result<Result<&'static str, u32>, u32> = Ok(Ok(\"hello\"));\nassert_eq!(Ok(\"hello\"), x.flatten());\n\nlet x: Result<Result<&'static str, u32>, u32> = Ok(Err(6));\nassert_eq!(Err(6), x.flatten());\n\nlet x: Result<Result<&'static str, u32>, u32> = Err(6);\nassert_eq!(Err(6), x.flatten());
Flattening only removes one level of nesting at a time:
\n\n#![feature(result_flattening)]\nlet x: Result<Result<Result<&'static str, u32>, u32>, u32> = Ok(Ok(Ok(\"hello\")));\nassert_eq!(Ok(Ok(\"hello\")), x.flatten());\nassert_eq!(Ok(\"hello\"), x.flatten().flatten());
Returns true
if the result is Ok
and the value inside of it matches a predicate.
let x: Result<u32, &str> = Ok(2);\nassert_eq!(x.is_ok_and(|x| x > 1), true);\n\nlet x: Result<u32, &str> = Ok(0);\nassert_eq!(x.is_ok_and(|x| x > 1), false);\n\nlet x: Result<u32, &str> = Err(\"hey\");\nassert_eq!(x.is_ok_and(|x| x > 1), false);
Returns true
if the result is Err
and the value inside of it matches a predicate.
use std::io::{Error, ErrorKind};\n\nlet x: Result<u32, Error> = Err(Error::new(ErrorKind::NotFound, \"!\"));\nassert_eq!(x.is_err_and(|x| x.kind() == ErrorKind::NotFound), true);\n\nlet x: Result<u32, Error> = Err(Error::new(ErrorKind::PermissionDenied, \"!\"));\nassert_eq!(x.is_err_and(|x| x.kind() == ErrorKind::NotFound), false);\n\nlet x: Result<u32, Error> = Ok(123);\nassert_eq!(x.is_err_and(|x| x.kind() == ErrorKind::NotFound), false);
Converts from Result<T, E>
to Option<E>
.
Converts self
into an Option<E>
, consuming self
,\nand discarding the success value, if any.
let x: Result<u32, &str> = Ok(2);\nassert_eq!(x.err(), None);\n\nlet x: Result<u32, &str> = Err(\"Nothing here\");\nassert_eq!(x.err(), Some(\"Nothing here\"));
Converts from &Result<T, E>
to Result<&T, &E>
.
Produces a new Result
, containing a reference\ninto the original, leaving the original in place.
let x: Result<u32, &str> = Ok(2);\nassert_eq!(x.as_ref(), Ok(&2));\n\nlet x: Result<u32, &str> = Err(\"Error\");\nassert_eq!(x.as_ref(), Err(&\"Error\"));
Converts from &mut Result<T, E>
to Result<&mut T, &mut E>
.
fn mutate(r: &mut Result<i32, i32>) {\n match r.as_mut() {\n Ok(v) => *v = 42,\n Err(e) => *e = 0,\n }\n}\n\nlet mut x: Result<i32, i32> = Ok(2);\nmutate(&mut x);\nassert_eq!(x.unwrap(), 42);\n\nlet mut x: Result<i32, i32> = Err(13);\nmutate(&mut x);\nassert_eq!(x.unwrap_err(), 0);
Maps a Result<T, E>
to Result<U, E>
by applying a function to a\ncontained Ok
value, leaving an Err
value untouched.
This function can be used to compose the results of two functions.
\nPrint the numbers on each line of a string multiplied by two.
\n\nlet line = \"1\\n2\\n3\\n4\\n\";\n\nfor num in line.lines() {\n match num.parse::<i32>().map(|i| i * 2) {\n Ok(n) => println!(\"{n}\"),\n Err(..) => {}\n }\n}
Returns the provided default (if Err
), or\napplies a function to the contained value (if Ok
).
Arguments passed to map_or
are eagerly evaluated; if you are passing\nthe result of a function call, it is recommended to use map_or_else
,\nwhich is lazily evaluated.
let x: Result<_, &str> = Ok(\"foo\");\nassert_eq!(x.map_or(42, |v| v.len()), 3);\n\nlet x: Result<&str, _> = Err(\"bar\");\nassert_eq!(x.map_or(42, |v| v.len()), 42);
Maps a Result<T, E>
to U
by applying fallback function default
to\na contained Err
value, or function f
to a contained Ok
value.
This function can be used to unpack a successful result\nwhile handling an error.
\nlet k = 21;\n\nlet x : Result<_, &str> = Ok(\"foo\");\nassert_eq!(x.map_or_else(|e| k * 2, |v| v.len()), 3);\n\nlet x : Result<&str, _> = Err(\"bar\");\nassert_eq!(x.map_or_else(|e| k * 2, |v| v.len()), 42);
Maps a Result<T, E>
to Result<T, F>
by applying a function to a\ncontained Err
value, leaving an Ok
value untouched.
This function can be used to pass through a successful result while handling\nan error.
\nfn stringify(x: u32) -> String { format!(\"error code: {x}\") }\n\nlet x: Result<u32, u32> = Ok(2);\nassert_eq!(x.map_err(stringify), Ok(2));\n\nlet x: Result<u32, u32> = Err(13);\nassert_eq!(x.map_err(stringify), Err(\"error code: 13\".to_string()));
Converts from Result<T, E>
(or &Result<T, E>
) to Result<&<T as Deref>::Target, &E>
.
Coerces the Ok
variant of the original Result
via Deref
\nand returns the new Result
.
let x: Result<String, u32> = Ok(\"hello\".to_string());\nlet y: Result<&str, &u32> = Ok(\"hello\");\nassert_eq!(x.as_deref(), y);\n\nlet x: Result<String, u32> = Err(42);\nlet y: Result<&str, &u32> = Err(&42);\nassert_eq!(x.as_deref(), y);
Converts from Result<T, E>
(or &mut Result<T, E>
) to Result<&mut <T as DerefMut>::Target, &mut E>
.
Coerces the Ok
variant of the original Result
via DerefMut
\nand returns the new Result
.
let mut s = \"HELLO\".to_string();\nlet mut x: Result<String, u32> = Ok(\"hello\".to_string());\nlet y: Result<&mut str, &mut u32> = Ok(&mut s);\nassert_eq!(x.as_deref_mut().map(|x| { x.make_ascii_uppercase(); x }), y);\n\nlet mut i = 42;\nlet mut x: Result<String, u32> = Err(42);\nlet y: Result<&mut str, &mut u32> = Err(&mut i);\nassert_eq!(x.as_deref_mut().map(|x| { x.make_ascii_uppercase(); x }), y);
Returns an iterator over the possibly contained value.
\nThe iterator yields one value if the result is Result::Ok
, otherwise none.
let x: Result<u32, &str> = Ok(7);\nassert_eq!(x.iter().next(), Some(&7));\n\nlet x: Result<u32, &str> = Err(\"nothing!\");\nassert_eq!(x.iter().next(), None);
Returns a mutable iterator over the possibly contained value.
\nThe iterator yields one value if the result is Result::Ok
, otherwise none.
let mut x: Result<u32, &str> = Ok(7);\nmatch x.iter_mut().next() {\n Some(v) => *v = 40,\n None => {},\n}\nassert_eq!(x, Ok(40));\n\nlet mut x: Result<u32, &str> = Err(\"nothing!\");\nassert_eq!(x.iter_mut().next(), None);
Returns the contained Ok
value, consuming the self
value.
Because this function may panic, its use is generally discouraged.\nInstead, prefer to use pattern matching and handle the Err
\ncase explicitly, or call unwrap_or
, unwrap_or_else
, or\nunwrap_or_default
.
Panics if the value is an Err
, with a panic message including the\npassed message, and the content of the Err
.
let x: Result<u32, &str> = Err(\"emergency failure\");\nx.expect(\"Testing expect\"); // panics with `Testing expect: emergency failure`
We recommend that expect
messages are used to describe the reason you\nexpect the Result
should be Ok
.
let path = std::env::var(\"IMPORTANT_PATH\")\n .expect(\"env variable `IMPORTANT_PATH` should be set by `wrapper_script.sh`\");
Hint: If you’re having trouble remembering how to phrase expect\nerror messages remember to focus on the word “should” as in “env\nvariable should be set by blah” or “the given binary should be available\nand executable by the current user”.
\nFor more detail on expect message styles and the reasoning behind our recommendation please\nrefer to the section on “Common Message\nStyles” in the\nstd::error
module docs.
Returns the contained Ok
value, consuming the self
value.
Because this function may panic, its use is generally discouraged.\nInstead, prefer to use pattern matching and handle the Err
\ncase explicitly, or call unwrap_or
, unwrap_or_else
, or\nunwrap_or_default
.
Panics if the value is an Err
, with a panic message provided by the\nErr
’s value.
Basic usage:
\n\nlet x: Result<u32, &str> = Ok(2);\nassert_eq!(x.unwrap(), 2);
let x: Result<u32, &str> = Err(\"emergency failure\");\nx.unwrap(); // panics with `emergency failure`
Returns the contained Ok
value or a default
Consumes the self
argument then, if Ok
, returns the contained\nvalue, otherwise if Err
, returns the default value for that\ntype.
Converts a string to an integer, turning poorly-formed strings\ninto 0 (the default value for integers). parse
converts\na string to any other type that implements FromStr
, returning an\nErr
on error.
let good_year_from_input = \"1909\";\nlet bad_year_from_input = \"190blarg\";\nlet good_year = good_year_from_input.parse().unwrap_or_default();\nlet bad_year = bad_year_from_input.parse().unwrap_or_default();\n\nassert_eq!(1909, good_year);\nassert_eq!(0, bad_year);
Returns the contained Err
value, consuming the self
value.
Panics if the value is an Ok
, with a panic message including the\npassed message, and the content of the Ok
.
let x: Result<u32, &str> = Ok(10);\nx.expect_err(\"Testing expect_err\"); // panics with `Testing expect_err: 10`
Returns the contained Err
value, consuming the self
value.
Panics if the value is an Ok
, with a custom panic message provided\nby the Ok
’s value.
let x: Result<u32, &str> = Ok(2);\nx.unwrap_err(); // panics with `2`
let x: Result<u32, &str> = Err(\"emergency failure\");\nassert_eq!(x.unwrap_err(), \"emergency failure\");
unwrap_infallible
)Returns the contained Ok
value, but never panics.
Unlike unwrap
, this method is known to never panic on the\nresult types it is implemented for. Therefore, it can be used\ninstead of unwrap
as a maintainability safeguard that will fail\nto compile if the error type of the Result
is later changed\nto an error that can actually occur.
\nfn only_good_news() -> Result<String, !> {\n Ok(\"this is fine\".into())\n}\n\nlet s: String = only_good_news().into_ok();\nprintln!(\"{s}\");
unwrap_infallible
)Returns the contained Err
value, but never panics.
Unlike unwrap_err
, this method is known to never panic on the\nresult types it is implemented for. Therefore, it can be used\ninstead of unwrap_err
as a maintainability safeguard that will fail\nto compile if the ok type of the Result
is later changed\nto a type that can actually occur.
\nfn only_bad_news() -> Result<!, String> {\n Err(\"Oops, it failed\".into())\n}\n\nlet error: String = only_bad_news().into_err();\nprintln!(\"{error}\");
Returns res
if the result is Ok
, otherwise returns the Err
value of self
.
Arguments passed to and
are eagerly evaluated; if you are passing the\nresult of a function call, it is recommended to use and_then
, which is\nlazily evaluated.
let x: Result<u32, &str> = Ok(2);\nlet y: Result<&str, &str> = Err(\"late error\");\nassert_eq!(x.and(y), Err(\"late error\"));\n\nlet x: Result<u32, &str> = Err(\"early error\");\nlet y: Result<&str, &str> = Ok(\"foo\");\nassert_eq!(x.and(y), Err(\"early error\"));\n\nlet x: Result<u32, &str> = Err(\"not a 2\");\nlet y: Result<&str, &str> = Err(\"late error\");\nassert_eq!(x.and(y), Err(\"not a 2\"));\n\nlet x: Result<u32, &str> = Ok(2);\nlet y: Result<&str, &str> = Ok(\"different result type\");\nassert_eq!(x.and(y), Ok(\"different result type\"));
Calls op
if the result is Ok
, otherwise returns the Err
value of self
.
This function can be used for control flow based on Result
values.
fn sq_then_to_string(x: u32) -> Result<String, &'static str> {\n x.checked_mul(x).map(|sq| sq.to_string()).ok_or(\"overflowed\")\n}\n\nassert_eq!(Ok(2).and_then(sq_then_to_string), Ok(4.to_string()));\nassert_eq!(Ok(1_000_000).and_then(sq_then_to_string), Err(\"overflowed\"));\nassert_eq!(Err(\"not a number\").and_then(sq_then_to_string), Err(\"not a number\"));
Often used to chain fallible operations that may return Err
.
use std::{io::ErrorKind, path::Path};\n\n// Note: on Windows \"/\" maps to \"C:\\\"\nlet root_modified_time = Path::new(\"/\").metadata().and_then(|md| md.modified());\nassert!(root_modified_time.is_ok());\n\nlet should_fail = Path::new(\"/bad/path\").metadata().and_then(|md| md.modified());\nassert!(should_fail.is_err());\nassert_eq!(should_fail.unwrap_err().kind(), ErrorKind::NotFound);
Returns res
if the result is Err
, otherwise returns the Ok
value of self
.
Arguments passed to or
are eagerly evaluated; if you are passing the\nresult of a function call, it is recommended to use or_else
, which is\nlazily evaluated.
let x: Result<u32, &str> = Ok(2);\nlet y: Result<u32, &str> = Err(\"late error\");\nassert_eq!(x.or(y), Ok(2));\n\nlet x: Result<u32, &str> = Err(\"early error\");\nlet y: Result<u32, &str> = Ok(2);\nassert_eq!(x.or(y), Ok(2));\n\nlet x: Result<u32, &str> = Err(\"not a 2\");\nlet y: Result<u32, &str> = Err(\"late error\");\nassert_eq!(x.or(y), Err(\"late error\"));\n\nlet x: Result<u32, &str> = Ok(2);\nlet y: Result<u32, &str> = Ok(100);\nassert_eq!(x.or(y), Ok(2));
Calls op
if the result is Err
, otherwise returns the Ok
value of self
.
This function can be used for control flow based on result values.
\nfn sq(x: u32) -> Result<u32, u32> { Ok(x * x) }\nfn err(x: u32) -> Result<u32, u32> { Err(x) }\n\nassert_eq!(Ok(2).or_else(sq).or_else(sq), Ok(2));\nassert_eq!(Ok(2).or_else(err).or_else(sq), Ok(2));\nassert_eq!(Err(3).or_else(sq).or_else(err), Ok(9));\nassert_eq!(Err(3).or_else(err).or_else(err), Err(3));
Returns the contained Ok
value or a provided default.
Arguments passed to unwrap_or
are eagerly evaluated; if you are passing\nthe result of a function call, it is recommended to use unwrap_or_else
,\nwhich is lazily evaluated.
let default = 2;\nlet x: Result<u32, &str> = Ok(9);\nassert_eq!(x.unwrap_or(default), 9);\n\nlet x: Result<u32, &str> = Err(\"error\");\nassert_eq!(x.unwrap_or(default), default);
Returns the contained Ok
value, consuming the self
value,\nwithout checking that the value is not an Err
.
Calling this method on an Err
is undefined behavior.
let x: Result<u32, &str> = Ok(2);\nassert_eq!(unsafe { x.unwrap_unchecked() }, 2);
let x: Result<u32, &str> = Err(\"emergency failure\");\nunsafe { x.unwrap_unchecked(); } // Undefined behavior!
Returns the contained Err
value, consuming the self
value,\nwithout checking that the value is not an Ok
.
Calling this method on an Ok
is undefined behavior.
let x: Result<u32, &str> = Ok(2);\nunsafe { x.unwrap_err_unchecked() }; // Undefined behavior!
let x: Result<u32, &str> = Err(\"emergency failure\");\nassert_eq!(unsafe { x.unwrap_err_unchecked() }, \"emergency failure\");
Takes each element in the Iterator
: if it is an Err
, no further\nelements are taken, and the Err
is returned. Should no Err
\noccur, the sum of all elements is returned.
This sums up every integer in a vector, rejecting the sum if a negative\nelement is encountered:
\n\nlet f = |&x: &i32| if x < 0 { Err(\"Negative element found\") } else { Ok(x) };\nlet v = vec![1, 2];\nlet res: Result<i32, _> = v.iter().map(f).sum();\nassert_eq!(res, Ok(3));\nlet v = vec![1, -2];\nlet res: Result<i32, _> = v.iter().map(f).sum();\nassert_eq!(res, Err(\"Negative element found\"));
try_trait_v2
)?
when not short-circuiting.try_trait_v2
)FromResidual::from_residual
\nas part of ?
when short-circuiting. Read moretry_trait_v2
)Output
type. Read moretry_trait_v2
)?
to decide whether the operator should produce a value\n(because this returned ControlFlow::Continue
)\nor propagate a value back to the caller\n(because this returned ControlFlow::Break
). Read moreMethods for AddrRange
.
Creates a new address range from the start and end addresses.
\nPanics if start > end
.
use memory_addr::AddrRange;\n\nlet range = AddrRange::new(0x1000usize, 0x2000);\nassert_eq!(range.start, 0x1000);\nassert_eq!(range.end, 0x2000);
And this will panic:
\n\nlet _ = AddrRange::new(0x2000usize, 0x1000);
Creates a new address range from the given range.
\nReturns None
if start > end
.
use memory_addr::AddrRange;\n\nlet range = AddrRange::try_new(0x1000usize, 0x2000).unwrap();\nassert_eq!(range.start, 0x1000);\nassert_eq!(range.end, 0x2000);\nassert!(AddrRange::try_new(0x2000usize, 0x1000).is_none());
Creates a new address range from the given range without checking the\nvalidity.
\nThe caller must ensure that start <= end
, otherwise the range will be\ninvalid and unexpected consequences will occur.
use memory_addr::AddrRange;\n\nlet range = unsafe { AddrRange::new_unchecked(0x1000usize, 0x2000) };\nassert_eq!(range.start, 0x1000);\nassert_eq!(range.end, 0x2000);
Creates a new address range from the start address and the size.
\nPanics if size
is too large and causes overflow during evaluating the\nend address.
use memory_addr::AddrRange;\n\nlet range = AddrRange::from_start_size(0x1000usize, 0x1000);\nassert_eq!(range.start, 0x1000);\nassert_eq!(range.end, 0x2000);
And this will panic:
\n\nlet _ = AddrRange::from_start_size(0x1000usize, usize::MAX);
Creates a new address range from the start address and the size.
\nReturns None
if size
is too large and causes overflow during\nevaluating the end address.
use memory_addr::AddrRange;\n\nlet range = AddrRange::try_from_start_size(0x1000usize, 0x1000).unwrap();\nassert_eq!(range.start, 0x1000);\nassert_eq!(range.end, 0x2000);\nassert!(AddrRange::try_from_start_size(0x1000usize, usize::MAX).is_none());
Creates a new address range from the start address and the size without\nchecking the validity.
\nThe caller must ensure that size
is not too large and won’t cause\noverflow during evaluating the end address. Failing to do so will\ncreate an invalid range and cause unexpected consequences.
use memory_addr::AddrRange;\n\nlet range = unsafe { AddrRange::from_start_size_unchecked(0x1000usize, 0x1000) };\nassert_eq!(range.start, 0x1000);\nassert_eq!(range.end, 0x2000);
Returns true
if the range is empty.
It’s also guaranteed that false
will be returned if the range is\ninvalid (i.e., start > end
).
use memory_addr::AddrRange;\n\nassert!(AddrRange::new(0x1000usize, 0x1000).is_empty());\nassert!(!AddrRange::new(0x1000usize, 0x2000).is_empty());
Returns the size of the range.
\nuse memory_addr::AddrRange;\n\nassert_eq!(AddrRange::new(0x1000usize, 0x1000).size(), 0);\nassert_eq!(AddrRange::new(0x1000usize, 0x2000).size(), 0x1000);
Checks if the range contains the given address.
\nuse memory_addr::AddrRange;\n\nlet range = AddrRange::new(0x1000usize, 0x2000);\nassert!(!range.contains(0x0fff));\nassert!(range.contains(0x1000));\nassert!(range.contains(0x1fff));\nassert!(!range.contains(0x2000));
Checks if the range contains the given address range.
\nuse memory_addr::{addr_range, AddrRange};\n\nlet range = AddrRange::new(0x1000usize, 0x2000);\nassert!(!range.contains_range(addr_range!(0x0usize..0xfff)));\nassert!(!range.contains_range(addr_range!(0x0fffusize..0x1fff)));\nassert!(range.contains_range(addr_range!(0x1001usize..0x1fff)));\nassert!(range.contains_range(addr_range!(0x1000usize..0x2000)));\nassert!(!range.contains_range(addr_range!(0x1001usize..0x2001)));\nassert!(!range.contains_range(addr_range!(0x2001usize..0x3001)));
Checks if the range is contained in the given address range.
\nuse memory_addr::{addr_range, AddrRange};\n\nlet range = AddrRange::new(0x1000usize, 0x2000);\nassert!(!range.contained_in(addr_range!(0xfffusize..0x1fff)));\nassert!(!range.contained_in(addr_range!(0x1001usize..0x2001)));\nassert!(range.contained_in(addr_range!(0xfffusize..0x2001)));\nassert!(range.contained_in(addr_range!(0x1000usize..0x2000)));
Checks if the range overlaps with the given address range.
\nuse memory_addr::{addr_range, AddrRange};\n\nlet range = AddrRange::new(0x1000usize, 0x2000usize);\nassert!(!range.overlaps(addr_range!(0xfffusize..0xfff)));\nassert!(!range.overlaps(addr_range!(0x2000usize..0x2000)));\nassert!(!range.overlaps(addr_range!(0xfffusize..0x1000)));\nassert!(range.overlaps(addr_range!(0xfffusize..0x1001)));\nassert!(range.overlaps(addr_range!(0x1fffusize..0x2001)));\nassert!(range.overlaps(addr_range!(0xfffusize..0x2001)));
iter_next_chunk
)N
values. Read moreiter_advance_by
)n
elements. Read moren
th element of the iterator. Read moreiter_intersperse
)separator
\nbetween adjacent items of the original iterator. Read moren
elements. Read moren
elements, or fewer\nif the underlying iterator ends sooner. Read moreiter_map_windows
)f
for each contiguous window of size N
over\nself
and returns an iterator over the outputs of f
. Like slice::windows()
,\nthe windows during mapping overlap as well. Read moreiter_collect_into
)iter_is_partitioned
)true
precede all those that return false
. Read moreiterator_try_reduce
)try_find
)iter_array_chunks
)N
elements of the iterator at a time. Read moreiter_order_by
)Iterator
with those\nof another with respect to the specified comparison function. Read morePartialOrd
elements of\nthis Iterator
with those of another. The comparison works like short-circuit\nevaluation, returning a result without comparing the remaining elements.\nAs soon as an order can be determined, the evaluation stops and a result is returned. Read moreiter_order_by
)Iterator
with those\nof another with respect to the specified comparison function. Read moreiter_order_by
)Iterator
are lexicographically\nless than those of another. Read moreIterator
are lexicographically\nless or equal to those of another. Read moreIterator
are lexicographically\ngreater than those of another. Read moreIterator
are lexicographically\ngreater than or equal to those of another. Read more