|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | + |
| 3 | +//! Kernel page allocation and management. |
| 4 | +
|
| 5 | +use crate::{bindings, error::code::*, error::Result}; |
| 6 | +use core::{ |
| 7 | + alloc::AllocError, |
| 8 | + ffi::c_void, |
| 9 | + ptr::{self, NonNull}, |
| 10 | +}; |
| 11 | + |
| 12 | +/// A bitwise shift for the page size. |
| 13 | +pub const PAGE_SHIFT: usize = bindings::PAGE_SHIFT as usize; |
| 14 | +/// The number of bytes in a page. |
| 15 | +pub const PAGE_SIZE: usize = 1 << PAGE_SHIFT; |
| 16 | +/// A bitwise mask for the page size. |
| 17 | +pub const PAGE_MASK: usize = PAGE_SIZE - 1; |
| 18 | + |
| 19 | +/// A pointer to a page that owns the page allocation. |
| 20 | +/// |
| 21 | +/// # Invariants |
| 22 | +/// |
| 23 | +/// The pointer points at a page, and has ownership over the page. |
| 24 | +pub struct Page { |
| 25 | + page: NonNull<bindings::page>, |
| 26 | +} |
| 27 | + |
| 28 | +// SAFETY: It is safe to transfer page allocations between threads. |
| 29 | +unsafe impl Send for Page {} |
| 30 | + |
| 31 | +// SAFETY: Calling `&self` methods on this type in parallel is safe. It might |
| 32 | +// allow you to perform a data race on bytes stored in the page, but we treat |
| 33 | +// this like data races on user pointers. |
| 34 | +unsafe impl Sync for Page {} |
| 35 | + |
| 36 | +impl Page { |
| 37 | + /// Allocates a new set of contiguous pages. |
| 38 | + pub fn new() -> Result<Self, AllocError> { |
| 39 | + // SAFETY: These are the correct arguments to allocate a single page. |
| 40 | + let page = unsafe { |
| 41 | + bindings::alloc_pages( |
| 42 | + bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::__GFP_HIGHMEM, |
| 43 | + 0, |
| 44 | + ) |
| 45 | + }; |
| 46 | + |
| 47 | + match NonNull::new(page) { |
| 48 | + // INVARIANT: We checked that the allocation above succeeded. |
| 49 | + Some(page) => Ok(Self { page }), |
| 50 | + None => Err(AllocError), |
| 51 | + } |
| 52 | + } |
| 53 | + |
| 54 | + /// Returns a raw pointer to the page. |
| 55 | + pub fn as_ptr(&self) -> *mut bindings::page { |
| 56 | + self.page.as_ptr() |
| 57 | + } |
| 58 | + |
| 59 | + /// Runs a piece of code with this page mapped to an address. |
| 60 | + /// |
| 61 | + /// It is up to the caller to use the provided raw pointer correctly. |
| 62 | + pub fn with_page_mapped<T>(&self, f: impl FnOnce(*mut c_void) -> T) -> T { |
| 63 | + // SAFETY: `page` is valid due to the type invariants on `Page`. |
| 64 | + let mapped_addr = unsafe { bindings::kmap_local_page(self.as_ptr()) }; |
| 65 | + |
| 66 | + let res = f(mapped_addr); |
| 67 | + |
| 68 | + // SAFETY: This unmaps the page mapped above. |
| 69 | + // |
| 70 | + // Since this API takes the user code as a closure, it can only be used |
| 71 | + // in a manner where the pages are unmapped in reverse order. This is as |
| 72 | + // required by `kunmap_local`. |
| 73 | + // |
| 74 | + // In other words, if this call to `kunmap_local` happens when a |
| 75 | + // different page should be unmapped first, then there must necessarily |
| 76 | + // be a call to `kmap_local_page` other than the call just above in |
| 77 | + // `with_page_mapped` that made that possible. In this case, it is the |
| 78 | + // unsafe block that wraps that other call that is incorrect. |
| 79 | + unsafe { bindings::kunmap_local(mapped_addr) }; |
| 80 | + |
| 81 | + res |
| 82 | + } |
| 83 | + |
| 84 | + /// Runs a piece of code with a raw pointer to a slice of this page, with |
| 85 | + /// bounds checking. |
| 86 | + /// |
| 87 | + /// If `f` is called, then it will be called with a pointer that points at |
| 88 | + /// `off` bytes into the page, and the pointer will be valid for at least |
| 89 | + /// `len` bytes. The pointer is only valid on this task, as this method uses |
| 90 | + /// a local mapping. |
| 91 | + /// |
| 92 | + /// If `off` and `len` refers to a region outside of this page, then this |
| 93 | + /// method returns `EINVAL` and does not call `f`. |
| 94 | + pub fn with_pointer_into_page<T>( |
| 95 | + &self, |
| 96 | + off: usize, |
| 97 | + len: usize, |
| 98 | + f: impl FnOnce(*mut u8) -> Result<T>, |
| 99 | + ) -> Result<T> { |
| 100 | + let bounds_ok = off <= PAGE_SIZE && len <= PAGE_SIZE && (off + len) <= PAGE_SIZE; |
| 101 | + |
| 102 | + if bounds_ok { |
| 103 | + self.with_page_mapped(move |page_addr| { |
| 104 | + // SAFETY: The `off` integer is at most `PAGE_SIZE`, so this pointer offset will |
| 105 | + // result in a pointer that is in bounds or one off the end of the page. |
| 106 | + f(unsafe { page_addr.cast::<u8>().add(off) }) |
| 107 | + }) |
| 108 | + } else { |
| 109 | + Err(EINVAL) |
| 110 | + } |
| 111 | + } |
| 112 | + |
| 113 | + /// Maps the page and reads from it into the given buffer. |
| 114 | + /// |
| 115 | + /// # Safety |
| 116 | + /// |
| 117 | + /// Callers must ensure that `dest` is valid for writing `len` bytes. |
| 118 | + pub unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) -> Result { |
| 119 | + self.with_pointer_into_page(offset, len, move |from_ptr| { |
| 120 | + // SAFETY: If `with_pointer_into_page` calls into this closure, then |
| 121 | + // it has performed a bounds check and guarantees that `from_ptr` is |
| 122 | + // valid for `len` bytes. |
| 123 | + unsafe { ptr::copy(from_ptr, dest, len) }; |
| 124 | + Ok(()) |
| 125 | + }) |
| 126 | + } |
| 127 | + |
| 128 | + /// Maps the page and writes into it from the given buffer. |
| 129 | + /// |
| 130 | + /// # Safety |
| 131 | + /// |
| 132 | + /// Callers must ensure that `src` is valid for reading `len` bytes. |
| 133 | + pub unsafe fn write(&self, src: *const u8, offset: usize, len: usize) -> Result { |
| 134 | + self.with_pointer_into_page(offset, len, move |to_ptr| { |
| 135 | + // SAFETY: If `with_pointer_into_page` calls into this closure, then |
| 136 | + // it has performed a bounds check and guarantees that `to_ptr` is |
| 137 | + // valid for `len` bytes. |
| 138 | + unsafe { ptr::copy(src, to_ptr, len) }; |
| 139 | + Ok(()) |
| 140 | + }) |
| 141 | + } |
| 142 | + |
| 143 | + /// Maps the page and zeroes the given slice. |
| 144 | + pub fn fill_zero(&self, offset: usize, len: usize) -> Result { |
| 145 | + self.with_pointer_into_page(offset, len, move |to_ptr| { |
| 146 | + // SAFETY: If `with_pointer_into_page` calls into this closure, then |
| 147 | + // it has performed a bounds check and guarantees that `to_ptr` is |
| 148 | + // valid for `len` bytes. |
| 149 | + unsafe { ptr::write_bytes(to_ptr, 0u8, len) }; |
7688
td> | 150 | + Ok(()) |
| 151 | + }) |
| 152 | + } |
| 153 | +} |
| 154 | + |
| 155 | +impl Drop for Page { |
| 156 | + fn drop(&mut self) { |
| 157 | + // SAFETY: By the type invariants, we have ownership of the page and can |
| 158 | + // free it. |
| 159 | + unsafe { bindings::__free_pages(self.page.as_ptr(), 0) }; |
| 160 | + } |
| 161 | +} |
0 commit comments