Files
android_kernel_samsung_sm8750/rust/kernel/page.rs
2025-08-12 22:16:57 +02:00

212 lines
7.9 KiB
Rust
Executable File

// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2024 Google LLC.
//! Kernel page allocation and management.
use crate::{bindings, error::code::*, error::Result, uaccess::UserSliceReader};
use core::{
alloc::AllocError,
ptr::{self, NonNull},
};
/// A bitwise shift for the page size.
pub const PAGE_SHIFT: usize = bindings::PAGE_SHIFT as usize;
/// The number of bytes in a page.
pub const PAGE_SIZE: usize = 1 << PAGE_SHIFT;
/// A pointer to a page that owns the page allocation.
///
/// # Invariants
///
/// The pointer points at a page, and has ownership over the page.
pub struct Page {
page: NonNull<bindings::page>,
}
// SAFETY: It is safe to transfer page allocations between threads.
unsafe impl Send for Page {}
// SAFETY: As long as the safety requirements for `&self` methods on this type
// are followed, there is no problem with calling them in parallel.
unsafe impl Sync for Page {}
impl Page {
/// Allocates a new page.
pub fn new() -> Result<Self, AllocError> {
// SAFETY: These are the correct arguments to allocate a single page.
let page = unsafe {
bindings::alloc_pages(
bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::__GFP_HIGHMEM,
0,
)
};
let page = NonNull::new(page).ok_or(AllocError)?;
// INVARIANT: We checked that the allocation succeeded.
Ok(Self { page })
}
/// Returns a raw pointer to the page.
pub fn as_ptr(&self) -> *mut bindings::page {
self.page.as_ptr()
}
/// Runs a piece of code with this page mapped to an address.
///
/// The page is unmapped when this call returns.
///
/// It is up to the caller to use the provided raw pointer correctly.
pub fn with_page_mapped<T>(&self, f: impl FnOnce(*mut u8) -> T) -> T {
// SAFETY: `page` is valid due to the type invariants on `Page`.
let mapped_addr = unsafe { bindings::kmap_local_page(self.as_ptr()) };
let res = f(mapped_addr.cast());
// SAFETY: This unmaps the page mapped above.
//
// Since this API takes the user code as a closure, it can only be used
// in a manner where the pages are unmapped in reverse order. This is as
// required by `kunmap_local`.
//
// In other words, if this call to `kunmap_local` happens when a
// different page should be unmapped first, then there must necessarily
// be a call to `kmap_local_page` other than the call just above in
// `with_page_mapped` that made that possible. In this case, it is the
// unsafe block that wraps that other call that is incorrect.
unsafe { bindings::kunmap_local(mapped_addr) };
res
}
/// Runs a piece of code with a raw pointer to a slice of this page, with
/// bounds checking.
///
/// If `f` is called, then it will be called with a pointer that points at
/// `off` bytes into the page, and the pointer will be valid for at least
/// `len` bytes. The pointer is only valid on this task, as this method uses
/// a local mapping.
///
/// If `off` and `len` refers to a region outside of this page, then this
/// method returns `EINVAL` and does not call `f`.
///
/// It is up to the caller to use the provided raw pointer correctly.
pub fn with_pointer_into_page<T>(
&self,
off: usize,
len: usize,
f: impl FnOnce(*mut u8) -> Result<T>,
) -> Result<T> {
let bounds_ok = off <= PAGE_SIZE && len <= PAGE_SIZE && (off + len) <= PAGE_SIZE;
if bounds_ok {
self.with_page_mapped(move |page_addr| {
// SAFETY: The `off` integer is at most `PAGE_SIZE`, so this pointer offset will
// result in a pointer that is in bounds or one off the end of the page.
f(unsafe { page_addr.add(off) })
})
} else {
Err(EINVAL)
}
}
/// Maps the page and reads from it into the given buffer.
///
/// This method will perform bounds checks on the page offset. If `offset ..
/// offset+len` goes outside ot the page, then this call returns `EINVAL`.
///
/// # Safety
///
/// * Callers must ensure that `dst` is valid for writing `len` bytes.
/// * Callers must ensure that this call does not race with a write to the
/// same page that overlaps with this read.
pub unsafe fn read_raw(&self, dst: *mut u8, offset: usize, len: usize) -> Result {
self.with_pointer_into_page(offset, len, move |src| {
// SAFETY: If `with_pointer_into_page` calls into this closure, then
// it has performed a bounds check and guarantees that `src` is
// valid for `len` bytes.
//
// There caller guarantees that there is no data race.
unsafe { ptr::copy(src, dst, len) };
Ok(())
})
}
/// Maps the page and writes into it from the given buffer.
///
/// This method will perform bounds checks on the page offset. If `offset ..
/// offset+len` goes outside ot the page, then this call returns `EINVAL`.
///
/// # Safety
///
/// * Callers must ensure that `src` is valid for reading `len` bytes.
/// * Callers must ensure that this call does not race with a read or write
/// to the same page that overlaps with this write.
pub unsafe fn write_raw(&self, src: *const u8, offset: usize, len: usize) -> Result {
self.with_pointer_into_page(offset, len, move |dst| {
// SAFETY: If `with_pointer_into_page` calls into this closure, then
// it has performed a bounds check and guarantees that `dst` is
// valid for `len` bytes.
//
// There caller guarantees that there is no data race.
unsafe { ptr::copy(src, dst, len) };
Ok(())
})
}
/// Maps the page and zeroes the given slice.
///
/// This method will perform bounds checks on the page offset. If `offset ..
/// offset+len` goes outside ot the page, then this call returns `EINVAL`.
///
/// # Safety
///
/// Callers must ensure that this call does not race with a read or write to
/// the same page that overlaps with this write.
pub unsafe fn fill_zero(&self, offset: usize, len: usize) -> Result {
self.with_pointer_into_page(offset, len, move |dst| {
// SAFETY: If `with_pointer_into_page` calls into this closure, then
// it has performed a bounds check and guarantees that `dst` is
// valid for `len` bytes.
//
// There caller guarantees that there is no data race.
unsafe { ptr::write_bytes(dst, 0u8, len) };
Ok(())
})
}
/// Copies data from userspace into this page.
///
/// This method will perform bounds checks on the page offset. If `offset ..
/// offset+len` goes outside ot the page, then this call returns `EINVAL`.
///
/// # Safety
///
/// Callers must ensure that this call does not race with a read or write to
/// the same page that overlaps with this write.
pub unsafe fn copy_from_user_slice(
&self,
reader: &mut UserSliceReader,
offset: usize,
len: usize,
) -> Result {
self.with_pointer_into_page(offset, len, move |dst| {
// SAFETY: If `with_pointer_into_page` calls into this closure, then
// it has performed a bounds check and guarantees that `dst` is
// valid for `len` bytes.
//
// There caller guarantees that there is no data race when writing
// to `dst`.
unsafe { reader.read_raw(dst, len) }
})
}
}
impl Drop for Page {
fn drop(&mut self) {
// SAFETY: By the type invariants, we have ownership of the page and can
// free it.
unsafe { bindings::__free_pages(self.page.as_ptr(), 0) };
}
}