ostd/io/io_mem/mod.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
// SPDX-License-Identifier: MPL-2.0
//! I/O memory and its allocator that allocates memory I/O (MMIO) to device drivers.
mod allocator;
use core::ops::{Deref, Range};
use align_ext::AlignExt;
pub(super) use self::allocator::init;
pub(crate) use self::allocator::IoMemAllocatorBuilder;
use crate::{
mm::{
kspace::kvirt_area::KVirtArea,
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
FallibleVmRead, FallibleVmWrite, HasPaddr, Infallible, Paddr, PodOnce, VmIo, VmIoOnce,
VmReader, VmWriter, PAGE_SIZE,
},
prelude::*,
Error,
};
/// I/O memory.
#[derive(Debug, Clone)]
pub struct IoMem {
kvirt_area: Arc<KVirtArea>,
// The actually used range for MMIO is `kvirt_area.start + offset..kvirt_area.start + offset + limit`
offset: usize,
limit: usize,
pa: Paddr,
}
impl HasPaddr for IoMem {
fn paddr(&self) -> Paddr {
self.pa
}
}
impl IoMem {
/// Acquires an `IoMem` instance for the given range.
pub fn acquire(range: Range<Paddr>) -> Result<IoMem> {
allocator::IO_MEM_ALLOCATOR
.get()
.unwrap()
.acquire(range)
.ok_or(Error::AccessDenied)
}
/// Returns the physical address of the I/O memory.
pub fn paddr(&self) -> Paddr {
self.pa
}
/// Returns the length of the I/O memory region.
pub fn length(&self) -> usize {
self.limit
}
/// Slices the `IoMem`, returning another `IoMem` representing the subslice.
///
/// # Panics
///
/// This method will panic if the range is empty or out of bounds.
pub fn slice(&self, range: Range<usize>) -> Self {
// This ensures `range.start < range.end` and `range.end <= limit`.
assert!(!range.is_empty() && range.end <= self.limit);
// We've checked the range is in bounds, so we can construct the new `IoMem` safely.
Self {
kvirt_area: self.kvirt_area.clone(),
offset: self.offset + range.start,
limit: range.len(),
pa: self.pa + range.start,
}
}
/// Creates a new `IoMem`.
///
/// # Safety
///
/// - The given physical address range must be in the I/O memory region.
/// - Reading from or writing to I/O memory regions may have side effects. Those side effects
/// must not cause soundness problems (e.g., they must not corrupt the kernel memory).
pub(crate) unsafe fn new(range: Range<Paddr>, flags: PageFlags, cache: CachePolicy) -> Self {
let first_page_start = range.start.align_down(PAGE_SIZE);
let last_page_end = range.end.align_up(PAGE_SIZE);
let frames_range = first_page_start..last_page_end;
let area_size = frames_range.len();
#[cfg(target_arch = "x86_64")]
let priv_flags = crate::arch::if_tdx_enabled!({
assert!(
first_page_start == range.start && last_page_end == range.end,
"I/O memory is not page aligned, which cannot be unprotected in TDX: {:#x?}..{:#x?}",
range.start,
range.end,
);
let num_pages = area_size / PAGE_SIZE;
// SAFETY:
// - The range `first_page_start..last_page_end` is always page aligned.
// - FIXME: We currently do not limit the I/O memory allocator with the maximum GPA,
// so the address range may not fall in the GPA limit.
// - FIXME: The I/O memory can be at a high address, so it may not be contained in the
// linear mapping.
// - The caller guarantees that operations on the I/O memory do not have any side
// effects that may cause soundness problems, so the pages can safely be viewed as
// untyped memory.
unsafe { crate::arch::tdx_guest::unprotect_gpa_range(first_page_start, num_pages).unwrap() };
PrivilegedPageFlags::SHARED
} else {
PrivilegedPageFlags::empty()
});
#[cfg(not(target_arch = "x86_64"))]
let priv_flags = PrivilegedPageFlags::empty();
let prop = PageProperty {
flags,
cache,
priv_flags,
};
// SAFETY: The caller of `IoMem::new()` ensures that the given
// physical address range is I/O memory, so it is safe to map.
let kva = unsafe { KVirtArea::map_untracked_frames(area_size, 0, frames_range, prop) };
Self {
kvirt_area: Arc::new(kva),
offset: range.start - first_page_start,
limit: range.len(),
pa: range.start,
}
}
}
// For now, we reuse `VmReader` and `VmWriter` to access I/O memory.
//
// Note that I/O memory is not normal typed or untyped memory. Strictly speaking, it is not
// "memory", but rather I/O ports that communicate directly with the hardware. However, this code
// is in OSTD, so we can rely on the implementation details of `VmReader` and `VmWriter`, which we
// know are also suitable for accessing I/O memory.
impl IoMem {
fn reader(&self) -> VmReader<'_, Infallible> {
// SAFETY: The constructor of the `IoMem` structure has already ensured the
// safety of reading from the mapped physical address, and the mapping is valid.
unsafe {
VmReader::from_kernel_space(
(self.kvirt_area.deref().start() + self.offset) as *mut u8,
self.limit,
)
}
}
fn writer(&self) -> VmWriter<'_, Infallible> {
// SAFETY: The constructor of the `IoMem` structure has already ensured the
// safety of writing to the mapped physical address, and the mapping is valid.
unsafe {
VmWriter::from_kernel_space(
(self.kvirt_area.deref().start() + self.offset) as *mut u8,
self.limit,
)
}
}
}
impl VmIo for IoMem {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
let offset = offset + self.offset;
if self
.limit
.checked_sub(offset)
.is_none_or(|remain| remain < writer.avail())
{
return Err(Error::InvalidArgs);
}
self.reader()
.skip(offset)
.read_fallible(writer)
.map_err(|(e, _)| e)?;
debug_assert!(!writer.has_avail());
Ok(())
}
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
let offset = offset + self.offset;
if self
.limit
.checked_sub(offset)
.is_none_or(|remain| remain < reader.remain())
{
return Err(Error::InvalidArgs);
}
self.writer()
.skip(offset)
.write_fallible(reader)
.map_err(|(e, _)| e)?;
debug_assert!(!reader.has_remain());
Ok(())
}
}
impl VmIoOnce for IoMem {
fn read_once<T: PodOnce>(&self, offset: usize) -> Result<T> {
self.reader().skip(offset).read_once()
}
fn write_once<T: PodOnce>(&self, offset: usize, new_val: &T) -> Result<()> {
self.writer().skip(offset).write_once(new_val)
}
}
impl Drop for IoMem {
fn drop(&mut self) {
// TODO: Multiple `IoMem` instances should not overlap, we should refactor the driver code and
// remove the `Clone` and `IoMem::slice`. After refactoring, the `Drop` can be implemented to recycle
// the `IoMem`.
}
}