8000 LIST: rust: add abstraction for `struct page` · Rust-for-Linux/linux@0171513 · GitHub
[go: up one dir, main page]

Skip to content

Commit 0171513

Browse files
Darksonnmetaspace
authored andcommitted
LIST: rust: add abstraction for struct page
Adds a new struct called `Page` that wraps a pointer to `struct page`. This struct is assumed to hold ownership over the page, so that Rust code can allocate and manage pages directly. The page type has various methods for reading and writing into the page. These methods will temporarily map the page to allow the operation. All of these methods use a helper that takes an offset and length, performs bounds checks, and returns a pointer to the given offset in the page. This patch only adds support for pages of order zero, as that is all Rust Binder needs. However, it is written to make it easy to add support for higher-order pages in the future. To do that, you would add a const generic parameter to `Page` that specifies the order. Most of the methods do not need to be adjusted, as the logic for dealing with mapping multiple pages at once can be isolated to just the `with_pointer_into_page` method. Finally, the struct can be renamed to `Pages<ORDER>`, and the type alias `Page = Pages<0>` can be introduced. Rust Binder needs to manage pages directly as that is how transactions are delivered: Each process has an mmap'd region for incoming transactions. When an incoming transaction arrives, the Binder driver will choose a region in the mmap, allocate and map the relevant pages manually, and copy the incoming transaction directly into the page. This architecture allows the driver to copy transactions directly from the address space of one process to another, without an intermediate copy to a kernel buffer. This code is based on Wedson's page abstractions from the old rust branch, but it has been modified by Alice by removing the incomplete support for higher-order pages, and by introducing the `with_*` helpers to consolidate the bounds checking logic into a single place. Co-developed-by: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Alice Ryhl <aliceryhl@google.com> --- Source: https://lore.kernel.org/all/20240124-alice-mm-v1-3-d1abcec83c44@google.com/
1 parent e8f897f commit 0171513

File tree

4 files changed

+183
-0
lines changed

4 files changed

+183
-0
lines changed

rust/bindings/bindings_helper.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,3 +21,4 @@
2121
const size_t RUST_CONST_HELPER_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN;
2222
const gfp_t RUST_CONST_HELPER_GFP_KERNEL = GFP_KERNEL;
2323
const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO;
24+
const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM;

rust/helpers.c

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@
2525
#include <linux/build_bug.h>
2626
#include <linux/err.h>
2727
#include <linux/errname.h>
28+
#include <linux/gfp.h>
29+
#include <linux/highmem.h>
2830
#include <linux/mutex.h>
2931
#include <linux/refcount.h>
3032
#include <linux/sched/signal.h>
@@ -79,6 +81,24 @@ int rust_helper_signal_pending(struct task_struct *t)
7981
}
8082
EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
8183

84+
struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
85+
{
86+
return alloc_pages(gfp_mask, order);
87+
}
88+
EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
89+
90+
void *rust_helper_kmap_local_page(struct page *page)
91+
{
92+
return kmap_local_page(page);
93+
}
94+
EXPORT_SYMBOL_GPL(rust_helper_kmap_local_page);
95+
96+
void rust_helper_kunmap_local(const void *addr)
97+
{
98+
kunmap_local(addr);
99+
}
100+
EXPORT_SYMBOL_GPL(rust_helper_kunmap_local);
101+
82102
refcount_t rust_helper_REFCOUNT_INIT(int n)
83103
{
84104
return (refcount_t)REFCOUNT_INIT(n);

rust/kernel/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ pub mod ioctl;
4141
pub mod kunit;
4242
#[cfg(CONFIG_NET)]
4343
pub mod net;
44+
pub mod page;
4445
pub mod prelude;
4546
pub mod print;
4647
mod static_assert;

rust/kernel/page.rs

Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
//! Kernel page allocation and management.
4+
5+
use crate::{bindings, error::code::*, error::Result};
6+
use core::{
7+
alloc::AllocError,
8+
ffi::c_void,
9+
ptr::{self, NonNull},
10+
};
11+
12+
/// A bitwise shift for the page size.
13+
pub const PAGE_SHIFT: usize = bindings::PAGE_SHIFT as usize;
14+
/// The number of bytes in a page.
15+
pub const PAGE_SIZE: usize = 1 << PAGE_SHIFT;
16+
/// A bitwise mask for the page size.
17+
pub const PAGE_MASK: usize = PAGE_SIZE - 1;
18+
19+
/// A pointer to a page that owns the page allocation.
20+
///
21+
/// # Invariants
22+
///
23+
/// The pointer points at a page, and has ownership over the page.
24+
pub struct Page {
25+
page: NonNull<bindings::page>,
26+
}
27+
28+
// SAFETY: It is safe to transfer page allocations between threads.
29+
unsafe impl Send for Page {}
30+
31+
// SAFETY: Calling `&self` methods on this type in parallel is safe. It might
32+
// allow you to perform a data race on bytes stored in the page, but we treat
33+
// this like data races on user pointers.
34+
unsafe impl Sync for Page {}
35+
36+
impl Page {
37+
/// Allocates a new set of contiguous pages.
38+
pub fn new() -> Result<Self, AllocError> {
39+
// SAFETY: These are the correct arguments to allocate a single page.
40+
let page = unsafe {
41+
bindings::alloc_pages(
42+
bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::__GFP_HIGHMEM,
43+
0,
44+
)
45+
};
46+
47+
match NonNull::new(page) {
48+
// INVARIANT: We checked that the allocation above succeeded.
49+
Some(page) => Ok(Self { page }),
50+
None => Err(AllocError),
51+
}
52+
}
53+
54+
/// Returns a raw pointer to the page.
55+
pub fn as_ptr(&self) -> *mut bindings::page {
56+
self.page.as_ptr()
57+
}
58+
59+
/// Runs a piece of code with this page mapped to an address.
60+
///
61+
/// It is up to the caller to use the provided raw pointer correctly.
62+
pub fn with_page_mapped<T>(&self, f: impl FnOnce(*mut c_void) -> T) -> T {
63+
// SAFETY: `page` is valid due to the type invariants on `Page`.
64+
let mapped_addr = unsafe { bindings::kmap_local_page(self.as_ptr()) };
65+
66+
let res = f(mapped_addr);
67+
68+
// SAFETY: This unmaps the page mapped above.
69+
//
70+
// Since this API takes the user code as a closure, it can only be used
71+
// in a manner where the pages are unmapped in reverse order. This is as
72+
// required by `kunmap_local`.
73+
//
74+
// In other words, if this call to `kunmap_local` happens when a
75+
// different page should be unmapped first, then there must necessarily
76+
// be a call to `kmap_local_page` other than the call just above in
77+
// `with_page_mapped` that made that possible. In this case, it is the
78+
// unsafe block that wraps that other call that is incorrect.
79+
unsafe { bindings::kunmap_local(mapped_addr) };
80+
81+
res
82+
}
83+
84+
/// Runs a piece of code with a raw pointer to a slice of this page, with
85+
/// bounds checking.
86+
///
87+
/// If `f` is called, then it will be called with a pointer that points at
88+
/// `off` bytes into the page, and the pointer will be valid for at least
89+
/// `len` bytes. The pointer is only valid on this task, as this method uses
90+
/// a local mapping.
91+
///
92+
/// If `off` and `len` refers to a region outside of this page, then this
93+
/// method returns `EINVAL` and does not call `f`.
94+
pub fn with_pointer_into_page<T>(
95+
&self,
96+
off: usize,
97+
len: usize,
98+
f: impl FnOnce(*mut u8) -> Result<T>,
99+
) -> Result<T> {
100+
let bounds_ok = off <= PAGE_SIZE && len <= PAGE_SIZE && (off + len) <= PAGE_SIZE;
101+
102+
if bounds_ok {
103+
self.with_page_mapped(move |page_addr| {
104+
// SAFETY: The `off` integer is at most `PAGE_SIZE`, so this pointer offset will
105+
// result in a pointer that is in bounds or one off the end of the page.
106+
f(unsafe { page_addr.cast::<u8>().add(off) })
107+
})
108+
} else {
109+
Err(EINVAL)
110+
}
111+
}
112+
113+
/// Maps the page and reads from it into the given buffer.
114+
///
115+
/// # Safety
116+
///
117+
/// Callers must ensure that `dest` is valid for writing `len` bytes.
118+
pub unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
119+
self.with_pointer_into_page(offset, len, move |from_ptr| {
120+
// SAFETY: If `with_pointer_into_page` calls into this closure, then
121+
// it has performed a bounds check and guarantees that `from_ptr` is
122+
// valid for `len` bytes.
123+
unsafe { ptr::copy(from_ptr, dest, len) };
124+
Ok(())
125+
})
126+
}
127+
128+
/// Maps the page and writes into it from the given buffer.
129+
///
130+
/// # Safety
131+
///
132+
/// Callers must ensure that `src` is valid for reading `len` bytes.
133+
pub unsafe fn write(&self, src: *const u8, offset: usize, len: usize) -> Result {
134+
self.with_pointer_into_page(offset, len, move |to_ptr| {
135+
// SAFETY: If `with_pointer_into_page` calls into this closure, then
136+
// it has performed a bounds check and guarantees that `to_ptr` is
137+
// valid for `len` bytes.
138+
unsafe { ptr::copy(src, to_ptr, len) };
139+
Ok(())
140+
})
141+
}
142+
143+
/// Maps the page and zeroes the given slice.
144+
pub fn fill_zero(&self, offset: usize, len: usize) -> Result {
145+
self.with_pointer_into_page(offset, len, move |to_ptr| {
146+
// SAFETY: If `with_pointer_into_page` calls into this closure, then
147+
// it has performed a bounds check and guarantees that `to_ptr` is
148+
// valid for `len` bytes.
149+
unsafe { ptr::write_bytes(to_ptr, 0u8, len) };
150+
Ok(())
151+
})
152+
}
153+
}
154+
155+
impl Drop for Page {
156+
fn drop(&mut self) {
157+
// SAFETY: By the type invariants, we have ownership of the page and can
158+
// free it.
159+
unsafe { bindings::__free_pages(self.page.as_ptr(), 0) };
160+
}
161+
}

0 commit comments

Comments
 (0)
0