Bring in syscall layer from old mikros std

This commit is contained in:
pjht 2024-06-05 14:15:50 -05:00
parent 58ab34fafe
commit ce55f176f4
Signed by: pjht
GPG Key ID: 7B5F6AFBEC7EE78E
77 changed files with 15051 additions and 12 deletions

View File

@ -344,6 +344,14 @@ dependencies = [
"serde",
]
[[package]]
name = "bit_field"
version = "0.10.2"
dependencies = [
"compiler_builtins",
"rustc-std-workspace-core",
]
[[package]]
name = "bitflags"
version = "1.3.2"
@ -355,6 +363,10 @@ name = "bitflags"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
dependencies = [
"compiler_builtins",
"rustc-std-workspace-core",
]
[[package]]
name = "block-buffer"
@ -5361,6 +5373,7 @@ dependencies = [
"std_detect",
"unwind",
"wasi",
"x86_64",
]
[[package]]
@ -6234,6 +6247,14 @@ version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "volatile"
version = "0.4.6"
dependencies = [
"compiler_builtins",
"rustc-std-workspace-core",
]
[[package]]
name = "wait-timeout"
version = "0.2.0"
@ -6570,6 +6591,17 @@ version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"
[[package]]
name = "x86_64"
version = "0.15.1"
dependencies = [
"bit_field",
"bitflags 2.5.0",
"compiler_builtins",
"rustc-std-workspace-core",
"volatile",
]
[[package]]
name = "xattr"
version = "1.3.1"

View File

@ -59,6 +59,21 @@ wasi = { version = "0.11.0", features = ['rustc-dep-of-std'], default-features =
r-efi = { version = "4.2.0", features = ['rustc-dep-of-std'] }
r-efi-alloc = { version = "1.0.0", features = ['rustc-dep-of-std'] }
[target.'cfg(target_os = "mikros")'.dependencies]
# elfloader = "0.16.0"
# linked_list_allocator = "0.10.4"
# spin = "0.9.4"
x86_64 = { path = "../../mikros_std_deps/x86_64-0.15.1", default-features = false, features = ["rustc-dep-of-std"] }
# hashbrown = "0.13.1"
# crossbeam-queue = { version = "0.3.8", default-features = false, features = ["alloc"] }
# core2 = { version = "0.4.0", default-features = false, features = ["alloc", "serde"], path = "../core2" }
# derive-try-from-primitive = "1.0.0"
# serde = { version = "1.0.151", default-features = false, features = ["alloc", "derive"] }
# postcard = { version = "1.0.2", default-features = false, features = ["alloc"] }
# elf = { version = "0.7.4", default-features = false }
[features]
backtrace = [
'addr2line/rustc-dep-of-std',

View File

@ -0,0 +1,74 @@
use super::{
buffers::KernelBufferAllocator,
syscalls::{copy_to, drop_space, map_assert_unused, map_only_unused, map_free, new_space},
};
use alloc::vec::Vec;
use crate::mem;
use crate::sync::Mutex;
use x86_64::{
structures::paging::{Page, PageTableFlags},
VirtAddr,
};
#[allow(unused)]
pub static ACTIVE_SPACE: Mutex<AddressSpace> = Mutex::new(AddressSpace(0));
#[derive(Debug)]
pub struct AddressSpace(u64);
#[derive(Debug)]
pub struct PagingError;
#[allow(clippy::new_without_default)] // The idea of a "default" address space makes no sense
impl AddressSpace {
#[must_use]
pub fn new() -> Self {
Self(new_space())
}
#[must_use]
pub fn into_raw(self) -> u64 {
let id = self.0;
mem::forget(self);
id
}
/// Maps virtual pages to newly allocated physical memory and returns the starting page.
/// The newly allocated physical memory contains garbage data, so the mapping will always be writable,
/// as it doesn't make sense to map garbage data read only.
/// The requested virtual page range is asserted to be unmapped, making this function safe.
#[allow(unused)]
pub fn map_assert_unused(&mut self, page: Page, num_pages: usize) -> Result<(), PagingError> {
map_assert_unused(self.0, page, num_pages, PageTableFlags::USER_ACCESSIBLE)
.map_err(|_| PagingError)
}
#[allow(unused)]
pub fn map_only_unused(&mut self, page: Page, num_pages: usize) -> Result<(), PagingError> {
map_only_unused(self.0, page, num_pages, PageTableFlags::USER_ACCESSIBLE)
.map_err(|_| PagingError)
}
/// Maps new virtual pages to new physical memory and returns the starting page.
/// The newly allocated physical memory contains garbage data, so the mapping will always be writable,
/// as it doesn't make sense to map garbage data read only.
#[allow(unused)]
pub fn map_free(&mut self, num_pages: usize) -> Result<Page, PagingError> {
Page::from_start_address(VirtAddr::from_ptr(
map_free(self.0, num_pages, PageTableFlags::USER_ACCESSIBLE)
.map_err(|_| PagingError)?,
))
.map_err(|_| PagingError)
}
pub fn copy_to(&self, dst: *mut u8, src: &[u8]) -> Result<(), PagingError> {
let mut buf = Vec::with_capacity_in(src.len(), KernelBufferAllocator::new());
buf.extend_from_slice(src);
copy_to(self.0, buf, dst, src.len()).map_err(|_| PagingError)
}
}
impl Drop for AddressSpace {
fn drop(&mut self) {
drop_space(self.0);
}
}

View File

@ -3,10 +3,10 @@ mod linked_list_allocator;
use crate::{
alloc::{GlobalAlloc, Layout, System},
sync::Mutex,
arch::asm,
ptr::{self, NonNull}
ptr::NonNull
};
use linked_list_allocator::hole::HoleList;
use super::address_space;
struct Wrap(Mutex<HoleList>);
@ -26,10 +26,15 @@ unsafe impl GlobalAlloc for System {
.unwrap_or_else(|_| {
drop(locked_self);
let num_pages = layout.size().div_ceil(4096) * 2;
let addr = syscall3(2, 0, num_pages as u64, 0x4);
unsafe {
self.dealloc(
ptr::with_exposed_provenance_mut(addr as usize),
address_space::ACTIVE_SPACE
.lock()
.unwrap()
.map_free(num_pages)
.unwrap()
.start_address()
.as_mut_ptr(),
Layout::from_size_align(num_pages * 4096, 4096).unwrap(),
);
}
@ -52,11 +57,3 @@ unsafe impl GlobalAlloc for System {
}
}
}
pub fn syscall3(num: u64, arg1: u64, arg2: u64, arg3: u64) -> u64 {
let retval;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, in("rdx") arg2, in("rsi") arg3, lateout("rax") retval);
};
retval
}

View File

@ -0,0 +1,174 @@
use crate::{
alloc::Allocator,
ptr::{self, NonNull},
};
use crate::sync::Mutex;
use super::syscalls;
pub type KernelBuffer = Vec<u8, KernelBufferAllocator>;
pub struct KernelBufferAllocator(Mutex<Option<(u64, usize)>>);
#[allow(clippy::new_without_default)]
impl KernelBufferAllocator {
pub fn new() -> Self {
Self(Mutex::new(None))
}
pub unsafe fn from_id_len(id: u64, len: usize) -> Self {
Self(Mutex::new(Some((id, len))))
}
pub fn into_id(self) -> Option<u64> {
Some((*self.0.lock().unwrap())?.0)
}
}
#[allow(clippy::unnecessary_wraps)]
fn allocate_buf_from_layout(
layout: core::alloc::Layout,
) -> Result<(NonNull<[u8]>, u64, usize), core::alloc::AllocError> {
assert!(layout.align() <= 4096);
let (id, buf, true_size) = syscalls::new_buffer(layout.size());
let ptr = NonNull::new(ptr::slice_from_raw_parts_mut(buf, layout.size())).unwrap();
Ok((ptr, id, true_size))
}
unsafe impl Allocator for KernelBufferAllocator {
fn allocate(
&self,
layout: core::alloc::Layout,
) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
assert!(self.0.lock().unwrap().is_none());
let (ptr, id, true_size) = allocate_buf_from_layout(layout)?;
*self.0.lock().unwrap() = Some((id, true_size));
Ok(ptr)
}
unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: core::alloc::Layout) {
syscalls::drop_buffer(self.0.lock().unwrap().unwrap().0);
*self.0.lock().unwrap() = None;
}
fn allocate_zeroed(
&self,
layout: core::alloc::Layout,
) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
let ptr = self.allocate(layout)?;
// SAFETY: `alloc` returns a valid memory block
unsafe { ptr.as_non_null_ptr().as_ptr().write_bytes(0, ptr.len()) }
Ok(ptr)
}
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: core::alloc::Layout,
new_layout: core::alloc::Layout,
) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
assert!(new_layout.align() <= 4096);
if new_layout.size() <= self.0.lock().unwrap().unwrap().1 {
Ok(NonNull::new(ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_layout.size(),
))
.unwrap())
} else {
let (new_ptr, new_id, new_true_size) = allocate_buf_from_layout(new_layout)?;
// SAFETY: because `new_layout.size()` must be greater than or equal to
// `old_layout.size()`, both the old and new memory allocation are valid for reads and
// writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
// safe. The safety contract for `dealloc` must be upheld by the caller.
unsafe {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size());
self.deallocate(ptr, old_layout);
}
*self.0.lock().unwrap() = Some((new_id, new_true_size));
Ok(new_ptr)
}
}
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: core::alloc::Layout,
new_layout: core::alloc::Layout,
) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
assert!(new_layout.align() <= 4096);
if new_layout.size() <= self.0.lock().unwrap().unwrap().1 {
Ok(NonNull::new(ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_layout.size(),
))
.unwrap())
} else {
let (new_ptr, new_id, new_true_size) = allocate_buf_from_layout(new_layout)?;
// SAFETY: `allocate_buf_from_layout` returns a valid memory block
unsafe {
new_ptr
.as_non_null_ptr()
.as_ptr()
.write_bytes(0, new_ptr.len());
}
// SAFETY: because `new_layout.size()` must be greater than or equal to
// `old_layout.size()`, both the old and new memory allocation are valid for reads and
// writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
// safe. The safety contract for `dealloc` must be upheld by the caller.
unsafe {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size());
self.deallocate(ptr, old_layout);
}
*self.0.lock().unwrap() = Some((new_id, new_true_size));
Ok(new_ptr)
}
}
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: core::alloc::Layout,
new_layout: core::alloc::Layout,
) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
assert!(new_layout.align() <= 4096);
Ok(NonNull::new(ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_layout.size(),
))
.unwrap())
}
}
pub trait IntoId {
fn into_id(self) -> Option<u64>;
}
impl IntoId for Vec<u8, KernelBufferAllocator> {
fn into_id(self) -> Option<u64> {
self.into_raw_parts_with_alloc().3.into_id()
}
}

View File

@ -1,4 +1,10 @@
#![deny(unsafe_op_in_unsafe_fn)]
#![allow(dead_code)]
#![allow(exported_private_dependencies)]
mod address_space;
mod syscalls;
mod buffers;
pub mod alloc;
pub mod args;

View File

@ -0,0 +1,164 @@
#![allow(clippy::result_unit_err)]
mod raw;
use alloc::vec::Vec;
use core::slice;
#[allow(clippy::wildcard_imports)]
use raw::*;
use x86_64::structures::paging::{Page, PageTableFlags};
use crate::ptr;
use super::{
address_space::AddressSpace,
buffers::{IntoId, KernelBuffer, KernelBufferAllocator},
};
pub(crate) fn print_char(chr: char) {
syscall1(0, chr as u64);
}
pub fn exit() -> ! {
syscall0(1);
unreachable!();
}
pub(crate) fn map_free(space: u64, num_pages: usize, flags: PageTableFlags) -> Result<*mut u8, ()> {
if let start @ 1.. = syscall3(2, space, num_pages as u64, flags.bits()) {
Ok(ptr::with_exposed_provenance_mut(start as usize))
} else {
Err(())
}
}
#[must_use]
pub fn get_initrd() -> &'static [u8] {
let (start, size) = syscall0r2(3);
unsafe { slice::from_raw_parts(ptr::with_exposed_provenance(start as usize), size as usize) }
}
pub(crate) fn new_space() -> u64 {
syscall0(4)
}
pub(crate) fn drop_space(space: u64) {
syscall1(5, space);
}
pub(crate) fn map_assert_unused(
space: u64,
start: Page,
num_pages: usize,
flags: PageTableFlags,
) -> Result<(), ()> {
if syscall4(
6,
space,
start.start_address().as_u64(),
num_pages as u64,
flags.bits(),
) == 1
{
Err(())
} else {
Ok(())
}
}
pub(crate) fn copy_to(
space: u64,
buffer: KernelBuffer,
dst: *mut u8,
len: usize,
) -> Result<(), ()> {
if syscall4(7, space, buffer.into_id().unwrap(), dst as u64, len as u64) == 1 {
Err(())
} else {
Ok(())
}
}
pub fn new_process(entry_point: u64, space: AddressSpace) -> Result<u64, ()> {
let (err, pid) = syscall2r2(8, entry_point, space.into_raw());
if err == 1 {
Err(())
} else {
Ok(pid)
}
}
pub fn register(typ: u64) {
syscall1(9, typ);
}
#[must_use]
pub fn try_get_registered(typ: u64) -> Option<u64> {
if let (0, channel) = syscall1r2(10, typ) {
Some(channel)
} else {
None
}
}
pub(crate) fn ipc_send(pid: u64, buffer: KernelBuffer) -> Result<(), ()> {
let len = buffer.len();
if syscall3(
11,
pid,
buffer.into_raw_parts_with_alloc().3.into_id().unwrap(),
len as u64,
) == 0
{
Ok(())
} else {
Err(())
}
}
pub(crate) fn ipc_recv() -> Option<KernelBuffer> {
if let (data @ 1.., len, id) = syscall0r3(12) {
Some(unsafe {
Vec::from_raw_parts_in(
ptr::with_exposed_provenance_mut(data as usize),
len as usize,
len as usize,
KernelBufferAllocator::from_id_len(id, len as usize),
)
})
} else {
None
}
}
#[must_use]
pub fn get_pid() -> u64 {
syscall0(13)
}
pub(crate) fn drop_buffer(buf: u64) {
syscall1(15, buf);
}
pub(crate) fn new_buffer(len: usize) -> (u64, *mut u8, usize) {
let (id, start, allocated_len) = syscall1r3(16, len as u64);
(id, ptr::with_exposed_provenance_mut(start as usize), allocated_len as usize)
}
pub(crate) fn map_only_unused(
space: u64,
start: Page,
num_pages: usize,
flags: PageTableFlags,
) -> Result<(), ()> {
if syscall4(
14,
space,
start.start_address().as_u64(),
num_pages as u64,
flags.bits(),
) == 1
{
Err(())
} else {
Ok(())
}
}

View File

@ -0,0 +1,137 @@
#![allow(unused)]
use core::arch::asm;
pub fn syscall0(num: u64) -> u64 {
let retval;
unsafe {
asm!("int 0x80", in("rax") num, lateout("rax") retval);
};
retval
}
pub fn syscall1(num: u64, arg1: u64) -> u64 {
let retval;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, lateout("rax") retval);
};
retval
}
pub fn syscall2(num: u64, arg1: u64, arg2: u64) -> u64 {
let retval;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, in("rdx") arg2, lateout("rax") retval);
};
retval
}
pub fn syscall3(num: u64, arg1: u64, arg2: u64, arg3: u64) -> u64 {
let retval;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, in("rdx") arg2, in("rsi") arg3, lateout("rax") retval);
};
retval
}
pub fn syscall4(num: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64) -> u64 {
let retval;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, in("rdx") arg2, in("rsi") arg3, in("rdi") arg4, lateout("rax") retval);
};
retval
}
pub fn syscall0r2(num: u64) -> (u64, u64) {
let retval;
let retval2;
unsafe {
asm!("int 0x80", in("rax") num, lateout("rax") retval, lateout("rcx") retval2);
};
(retval, retval2)
}
pub fn syscall1r2(num: u64, arg1: u64) -> (u64, u64) {
let retval;
let retval2;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, lateout("rax") retval, lateout("rcx") retval2);
};
(retval, retval2)
}
pub fn syscall2r2(num: u64, arg1: u64, arg2: u64) -> (u64, u64) {
let retval;
let retval2;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, in("rdx") arg2, lateout("rax") retval, lateout("rcx") retval2);
};
(retval, retval2)
}
pub fn syscall3r2(num: u64, arg1: u64, arg2: u64, arg3: u64) -> (u64, u64) {
let retval;
let retval2;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, in("rdx") arg2, in("rsi") arg3, lateout("rax") retval, lateout("rcx") retval2);
};
(retval, retval2)
}
pub fn syscall4r2(num: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64) -> (u64, u64) {
let retval;
let retval2;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, in("rdx") arg2, in("rsi") arg3, in("rdi") arg4, lateout("rax") retval, lateout("rcx") retval2);
};
(retval, retval2)
}
pub fn syscall0r3(num: u64) -> (u64, u64, u64) {
let retval;
let retval2;
let retval3;
unsafe {
asm!("int 0x80", in("rax") num, lateout("rax") retval, lateout("rcx") retval2, lateout("rdx") retval3);
};
(retval, retval2, retval3)
}
pub fn syscall1r3(num: u64, arg1: u64) -> (u64, u64, u64) {
let retval;
let retval2;
let retval3;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, lateout("rax") retval, lateout("rcx") retval2, lateout("rdx") retval3);
};
(retval, retval2, retval3)
}
pub fn syscall2r3(num: u64, arg1: u64, arg2: u64) -> (u64, u64, u64) {
let retval;
let retval2;
let retval3;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, in("rdx") arg2, lateout("rax") retval, lateout("rcx") retval2, lateout("rdx") retval3);
};
(retval, retval2, retval3)
}
pub fn syscall3r3(num: u64, arg1: u64, arg2: u64, arg3: u64) -> (u64, u64, u64) {
let retval;
let retval2;
let retval3;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, in("rdx") arg2, in("rsi") arg3, lateout("rax") retval, lateout("rcx") retval2, lateout("rdx") retval3);
};
(retval, retval2, retval3)
}
pub fn syscall4r3(num: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64) -> (u64, u64, u64) {
let retval;
let retval2;
let retval3;
unsafe {
asm!("int 0x80", in("rax") num, in ("rcx") arg1, in("rdx") arg2, in("rsi") arg3, in("rdi") arg4, lateout("rax") retval, lateout("rcx") retval2, lateout("rdx") retval3);
};
(retval, retval2, retval3)
}

View File

@ -0,0 +1,6 @@
{
"git": {
"sha1": "9a5f9ed6216537947ca82ec2d3ecf3dcbf157cec"
},
"path_in_vcs": ""
}

View File

@ -0,0 +1,2 @@
target
Cargo.lock

View File

@ -0,0 +1,16 @@
language: rust
rust:
- stable
- beta
- nightly
cache: cargo
sudo: false
script:
- cargo build --verbose --all
- cargo test --verbose --all
- if [ "${TRAVIS_RUST_VERSION}" = "nightly" ]; then
cargo bench --verbose --all;
fi

View File

@ -0,0 +1,49 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
name = "bit_field"
version = "0.10.2"
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
description = "Simple bit field trait providing get_bit, get_bits, set_bit, and set_bits methods for Rust's integral types."
documentation = "https://docs.rs/bit_field"
readme = "README.md"
keywords = ["no_std"]
license = "Apache-2.0/MIT"
repository = "https://github.com/phil-opp/rust-bit-field"
[package.metadata.release]
dev-version = false
pre-release-commit-message = "Release version {{version}}"
[[package.metadata.release.pre-release-replacements]]
file = "Changelog.md"
search = "## Unreleased"
replace = """
## Unreleased
# {{version}} {{date}}"""
exactly = 1
[dependencies.core]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
[dependencies.compiler_builtins]
version = "0.1"
optional = true
[features]
rustc-dep-of-std = [
"core",
"compiler_builtins",
]

View File

@ -0,0 +1,19 @@
[package]
name = "bit_field"
version = "0.10.2"
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
license = "Apache-2.0/MIT"
description = "Simple bit field trait providing get_bit, get_bits, set_bit, and set_bits methods for Rust's integral types."
keywords = ["no_std"]
repository = "https://github.com/phil-opp/rust-bit-field"
documentation = "https://docs.rs/bit_field"
[dependencies]
[package.metadata.release]
dev-version = false
pre-release-replacements = [
{ file = "Changelog.md", search = "## Unreleased", replace = "## Unreleased\n\n# {{version}} {{date}}", exactly = 1 },
]
pre-release-commit-message = "Release version {{version}}"

View File

@ -0,0 +1,37 @@
## Unreleased
# 0.10.2 2023-02-25
- Add `#[track_caller]` to methods ([#27](https://github.com/phil-opp/rust-bit-field/pull/27))
# 0.10.1 2020-08-23
- Added bit manipulation functions for 128-bit integers ([#24](https://github.com/phil-opp/rust-bit-field/pull/24))
## [0.10.0] - 2019-05-03
### Added
- Support all range types (`Range`, `RangeInclusive`, `RangeFrom`, …) for `get_bits` and `set_bits` methods ([#22](https://github.com/phil-opp/rust-bit-field/pull/22))
### Changed
- **Breaking**: `BitField` trait now has a `BIT_LENGTH` associated const instead of a `bit_length` associated function.
- `BitField` and `BitArray` methods are now inlined which causes much higher performance.
## [0.9.0] - 2017-10-15
### Changed
- Bit indexes in `BitField` is now `usize` instead of `u8`.
## [0.8.0] - 2017-07-16
### Added
- `BitArray` trait to make bit indexing possible with slices.
### Changed
- `bit_length` in `BitField` is now an associated function instead of a method (`bit_length()` instead of `bit_length(&self)`)
## [0.7.0] - 2017-01-16
### Added
- `BitField` was also implemented for: `i8`, `i16`, `i32`, `i64` and `isize`
### Changed
- `length()` method in `BitField` is now called `bit_length()`
- `get_range()` method in `BitField` is now called `get_bits()`
- `set_range()` method in `BitField` is now called `set_bits()`
### Removed
- `zero()` and `one()` constructor was removed from `BitField` trait.

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Philipp Oppermann
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,31 @@
# bit_field
A simple crate which provides the `BitField` trait, which provides methods for operating on individual bits and ranges
of bits on Rust's integral types.
## Documentation
Documentation is available on [docs.rs](https://docs.rs/bit_field)
## Usage
```TOML
[dependencies]
bit_field = "0.10.1"
```
## Example
```rust
extern crate bit_field;
use bit_field::BitField;
let mut x: u8 = 0;
x.set_bit(7, true);
assert_eq!(x, 0b1000_0000);
x.set_bits(0..4, 0b1001);
assert_eq!(x, 0b1000_1001);
```
## License
This crate is dual-licensed under MIT or the Apache License (Version 2.0). See LICENSE-APACHE and LICENSE-MIT for details.

View File

@ -0,0 +1,233 @@
#![feature(test)]
extern crate bit_field;
use bit_field::*;
pub trait BitOper {
const BIT_LEN: usize;
fn get_b(&self, idx: usize) -> bool;
fn set_b(&mut self, idx: usize, val: bool);
fn toggle(&mut self, idx: usize);
}
pub trait BitArrayOper<T: BitOper> {
fn get_blen(&self) -> usize;
fn get_b(&self, idx: usize) -> bool;
fn set_b(&mut self, idx: usize, val: bool);
fn toggle(&mut self, idx: usize);
}
impl BitOper for u8 {
const BIT_LEN: usize = std::mem::size_of::<Self>() as usize * 8;
fn set_b(&mut self, idx: usize, val: bool) {
assert!(idx < Self::BIT_LEN);
if val {
*self |= 1 << idx;
} else {
*self &= !(1 << idx);
}
}
fn get_b(&self, idx: usize) -> bool {
assert!(idx < Self::BIT_LEN);
(self & 1 << idx) != 0
}
fn toggle(&mut self, idx: usize) {
assert!(idx < Self::BIT_LEN);
*self ^= 1 << idx;
}
}
impl BitOper for u32 {
const BIT_LEN: usize = std::mem::size_of::<Self>() as usize * 8;
fn set_b(&mut self, idx: usize, val: bool) {
assert!(idx < Self::BIT_LEN);
if val {
*self |= 1 << idx;
} else {
*self &= !(1 << idx);
}
}
fn get_b(&self, idx: usize) -> bool {
assert!(idx < Self::BIT_LEN);
(self & 1 << idx) != 0
}
fn toggle(&mut self, idx: usize) {
assert!(idx < Self::BIT_LEN);
*self ^= 1 << idx;
}
}
impl BitOper for u64 {
const BIT_LEN: usize = std::mem::size_of::<Self>() as usize * 8;
fn set_b(&mut self, idx: usize, val: bool) {
assert!(idx < Self::BIT_LEN);
if val {
*self |= 1 << idx;
} else {
*self &= !(1 << idx);
}
}
fn get_b(&self, idx: usize) -> bool {
assert!(idx < Self::BIT_LEN);
(self & 1 << idx) != 0
}
fn toggle(&mut self, idx: usize) {
assert!(idx < Self::BIT_LEN);
*self ^= 1 << idx;
}
}
impl<T: BitOper> BitArrayOper<T> for [T] {
fn get_blen(&self) -> usize {
self.len() * T::BIT_LEN
}
fn get_b(&self, idx: usize) -> bool {
self[idx / T::BIT_LEN].get_b(idx % T::BIT_LEN)
}
fn set_b(&mut self, idx: usize, val: bool) {
self[idx / T::BIT_LEN].set_b(idx % T::BIT_LEN, val);
}
fn toggle(&mut self, idx: usize) {
self[idx / T::BIT_LEN].toggle(idx % T::BIT_LEN);
}
}
extern crate test;
use test::Bencher;
const LEN: usize = 256;
fn set_bitfield<T: BitField>(v: &mut Vec<T>) {
for i in 0..v.len() * T::BIT_LENGTH {
v.as_mut_slice().set_bit(i, true);;
}
}
fn get_bitfield<T: BitField>(v: &Vec<T>) {
for i in 0..v.len() * T::BIT_LENGTH {
let _b = v.as_slice().get_bit(i);
}
}
fn set_trivial<T: BitOper>(v: &mut Vec<T>) {
for i in 0..v.len() * T::BIT_LEN {
v.set_b(i, true);
}
}
fn get_trivial<T: BitOper>(v: &Vec<T>) {
for i in 0..v.len() * T::BIT_LEN {
let _b = v.get_b(i);
}
}
#[bench]
fn u8_set_bitfield(b: &mut Bencher) {
let mut v = vec![0u8; LEN];
b.iter(|| {
set_bitfield(&mut v);
});
}
#[bench]
fn u8_set_trivial(b: &mut Bencher) {
let mut v = vec![0u8; LEN];
b.iter(|| {
set_trivial(&mut v);
});
}
#[bench]
fn u8_get_bitfield(b: &mut Bencher) {
let v = vec![1u8; LEN];
b.iter(|| {
get_bitfield(&v);
});
}
#[bench]
fn u8_get_trivial(b: &mut Bencher) {
let v = vec![1u8; LEN];
b.iter(|| {
get_trivial(&v);
});
}
#[bench]
fn u32_set_bitfield(b: &mut Bencher) {
let mut v = vec![0u32; LEN];
b.iter(|| {
set_bitfield(&mut v);
});
}
#[bench]
fn u32_set_trivial(b: &mut Bencher) {
let mut v = vec![0u32; LEN];
b.iter(|| {
set_trivial(&mut v);
});
}
#[bench]
fn u32_get_bitfield(b: &mut Bencher) {
let v = vec![1u32; LEN];
b.iter(|| {
get_bitfield(&v);
});
}
#[bench]
fn u32_get_trivial(b: &mut Bencher) {
let v = vec![1u32; LEN];
b.iter(|| {
get_trivial(&v);
});
}
#[bench]
fn u64_set_bitfield(b: &mut Bencher) {
let mut v = vec![0u64; LEN];
b.iter(|| {
set_bitfield(&mut v);
});
}
#[bench]
fn u64_set_trivial(b: &mut Bencher) {
let mut v = vec![0u64; LEN];
b.iter(|| {
set_trivial(&mut v);
});
}
#[bench]
fn u64_get_bitfield(b: &mut Bencher) {
let v = vec![1u64; LEN];
b.iter(|| {
get_bitfield(&v);
});
}
#[bench]
fn u64_get_trivial(b: &mut Bencher) {
let v = vec![1u64; LEN];
b.iter(|| {
get_trivial(&v);
});
}

View File

@ -0,0 +1,374 @@
//! Provides the abstraction of a bit field, which allows for bit-level update and retrieval
//! operations.
#![no_std]
#[cfg(test)]
mod tests;
use core::ops::{Bound, Range, RangeBounds};
/// A generic trait which provides methods for extracting and setting specific bits or ranges of
/// bits.
pub trait BitField {
/// The number of bits in this bit field.
///
/// ```rust
/// use bit_field::BitField;
///
/// assert_eq!(u32::BIT_LENGTH, 32);
/// assert_eq!(u64::BIT_LENGTH, 64);
/// ```
const BIT_LENGTH: usize;
/// Obtains the bit at the index `bit`; note that index 0 is the least significant bit, while
/// index `length() - 1` is the most significant bit.
///
/// ```rust
/// use bit_field::BitField;
///
/// let value: u32 = 0b110101;
///
/// assert_eq!(value.get_bit(1), false);
/// assert_eq!(value.get_bit(2), true);
/// ```
///
/// ## Panics
///
/// This method will panic if the bit index is out of bounds of the bit field.
fn get_bit(&self, bit: usize) -> bool;
/// Obtains the range of bits specified by `range`; note that index 0 is the least significant
/// bit, while index `length() - 1` is the most significant bit.
///
/// ```rust
/// use bit_field::BitField;
///
/// let value: u32 = 0b110101;
///
/// assert_eq!(value.get_bits(0..3), 0b101);
/// assert_eq!(value.get_bits(2..6), 0b1101);
/// assert_eq!(value.get_bits(..), 0b110101);
/// assert_eq!(value.get_bits(3..=3), value.get_bit(3) as u32);
/// ```
///
/// ## Panics
///
/// This method will panic if the start or end indexes of the range are out of bounds of the
/// bit field.
fn get_bits<T: RangeBounds<usize>>(&self, range: T) -> Self;
/// Sets the bit at the index `bit` to the value `value` (where true means a value of '1' and
/// false means a value of '0'); note that index 0 is the least significant bit, while index
/// `length() - 1` is the most significant bit.
///
/// ```rust
/// use bit_field::BitField;
///
/// let mut value = 0u32;
///
/// value.set_bit(1, true);
/// assert_eq!(value, 2u32);
///
/// value.set_bit(3, true);
/// assert_eq!(value, 10u32);
///
/// value.set_bit(1, false);
/// assert_eq!(value, 8u32);
/// ```
///
/// ## Panics
///
/// This method will panic if the bit index is out of the bounds of the bit field.
fn set_bit(&mut self, bit: usize, value: bool) -> &mut Self;
/// Sets the range of bits defined by the range `range` to the lower bits of `value`; to be
/// specific, if the range is N bits long, the N lower bits of `value` will be used; if any of
/// the other bits in `value` are set to 1, this function will panic.
///
/// ```rust
/// use bit_field::BitField;
///
/// let mut value = 0u32;
///
/// value.set_bits(0..2, 0b11);
/// assert_eq!(value, 0b11);
///
/// value.set_bits(2..=3, 0b11);
/// assert_eq!(value, 0b1111);
///
/// value.set_bits(..4, 0b1010);
/// assert_eq!(value, 0b1010);
/// ```
///
/// ## Panics
///
/// This method will panic if the range is out of bounds of the bit field, or if there are `1`s
/// not in the lower N bits of `value`.
fn set_bits<T: RangeBounds<usize>>(&mut self, range: T, value: Self) -> &mut Self;
}
pub trait BitArray<T: BitField> {
/// Returns the length, eg number of bits, in this bit array.
///
/// ```rust
/// use bit_field::BitArray;
///
/// assert_eq!([0u8, 4u8, 8u8].bit_length(), 24);
/// assert_eq!([0u32, 5u32].bit_length(), 64);
/// ```
fn bit_length(&self) -> usize;
/// Obtains the bit at the index `bit`; note that index 0 is the least significant bit, while
/// index `length() - 1` is the most significant bit.
///
/// ```rust
/// use bit_field::BitArray;
///
/// let value: [u32; 1] = [0b110101];
///
/// assert_eq!(value.get_bit(1), false);
/// assert_eq!(value.get_bit(2), true);
/// ```
///
/// ## Panics
///
/// This method will panic if the bit index is out of bounds of the bit array.
fn get_bit(&self, bit: usize) -> bool;
/// Obtains the range of bits specified by `range`; note that index 0 is the least significant
/// bit, while index `length() - 1` is the most significant bit.
///
/// ```rust
/// use bit_field::BitArray;
///
/// let value: [u32; 2] = [0b110101, 0b11];
///
/// assert_eq!(value.get_bits(0..3), 0b101);
/// assert_eq!(value.get_bits(..6), 0b110101);
/// assert_eq!(value.get_bits(31..33), 0b10);
/// assert_eq!(value.get_bits(5..=32), 0b1_0000_0000_0000_0000_0000_0000_001);
/// assert_eq!(value.get_bits(34..), 0);
/// ```
///
/// ## Panics
///
/// This method will panic if the start or end indexes of the range are out of bounds of the
/// bit array, or if the range can't be contained by the bit field T.
fn get_bits<U: RangeBounds<usize>>(&self, range: U) -> T;
/// Sets the bit at the index `bit` to the value `value` (where true means a value of '1' and
/// false means a value of '0'); note that index 0 is the least significant bit, while index
/// `length() - 1` is the most significant bit.
///
/// ```rust
/// use bit_field::BitArray;
///
/// let mut value = [0u32];
///
/// value.set_bit(1, true);
/// assert_eq!(value, [2u32]);
///
/// value.set_bit(3, true);
/// assert_eq!(value, [10u32]);
///
/// value.set_bit(1, false);
/// assert_eq!(value, [8u32]);
/// ```
///
/// ## Panics
///
/// This method will panic if the bit index is out of the bounds of the bit array.
fn set_bit(&mut self, bit: usize, value: bool);
/// Sets the range of bits defined by the range `range` to the lower bits of `value`; to be
/// specific, if the range is N bits long, the N lower bits of `value` will be used; if any of
/// the other bits in `value` are set to 1, this function will panic.
///
/// ```rust
/// use bit_field::BitArray;
///
/// let mut value = [0u32, 0u32];
///
/// value.set_bits(0..2, 0b11);
/// assert_eq!(value, [0b11, 0u32]);
///
/// value.set_bits(31..35, 0b1010);
/// assert_eq!(value, [0x0003, 0b101]);
/// ```
///
/// ## Panics
///
/// This method will panic if the range is out of bounds of the bit array,
/// if the range can't be contained by the bit field T, or if there are `1`s
/// not in the lower N bits of `value`.
fn set_bits<U: RangeBounds<usize>>(&mut self, range: U, value: T);
}
/// An internal macro used for implementing BitField on the standard integral types.
macro_rules! bitfield_numeric_impl {
($($t:ty)*) => ($(
impl BitField for $t {
const BIT_LENGTH: usize = ::core::mem::size_of::<Self>() as usize * 8;
#[track_caller]
#[inline]
fn get_bit(&self, bit: usize) -> bool {
assert!(bit < Self::BIT_LENGTH);
(*self & (1 << bit)) != 0
}
#[track_caller]
#[inline]
fn get_bits<T: RangeBounds<usize>>(&self, range: T) -> Self {
let range = to_regular_range(&range, Self::BIT_LENGTH);
assert!(range.start < Self::BIT_LENGTH);
assert!(range.end <= Self::BIT_LENGTH);
assert!(range.start < range.end);
// shift away high bits
let bits = *self << (Self::BIT_LENGTH - range.end) >> (Self::BIT_LENGTH - range.end);
// shift away low bits
bits >> range.start
}
#[track_caller]
#[inline]
fn set_bit(&mut self, bit: usize, value: bool) -> &mut Self {
assert!(bit < Self::BIT_LENGTH);
if value {
*self |= 1 << bit;
} else {
*self &= !(1 << bit);
}
self
}
#[track_caller]
#[inline]
fn set_bits<T: RangeBounds<usize>>(&mut self, range: T, value: Self) -> &mut Self {
let range = to_regular_range(&range, Self::BIT_LENGTH);
assert!(range.start < Self::BIT_LENGTH);
assert!(range.end <= Self::BIT_LENGTH);
assert!(range.start < range.end);
assert!(value << (Self::BIT_LENGTH - (range.end - range.start)) >>
(Self::BIT_LENGTH - (range.end - range.start)) == value,
"value does not fit into bit range");
let bitmask: Self = !(!0 << (Self::BIT_LENGTH - range.end) >>
(Self::BIT_LENGTH - range.end) >>
range.start << range.start);
// set bits
*self = (*self & bitmask) | (value << range.start);
self
}
}
)*)
}
bitfield_numeric_impl! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
impl<T: BitField> BitArray<T> for [T] {
#[inline]
fn bit_length(&self) -> usize {
self.len() * T::BIT_LENGTH
}
#[track_caller]
#[inline]
fn get_bit(&self, bit: usize) -> bool {
let slice_index = bit / T::BIT_LENGTH;
let bit_index = bit % T::BIT_LENGTH;
self[slice_index].get_bit(bit_index)
}
#[track_caller]
#[inline]
fn get_bits<U: RangeBounds<usize>>(&self, range: U) -> T {
let range = to_regular_range(&range, self.bit_length());
assert!(range.len() <= T::BIT_LENGTH);
let slice_start = range.start / T::BIT_LENGTH;
let slice_end = range.end / T::BIT_LENGTH;
let bit_start = range.start % T::BIT_LENGTH;
let bit_end = range.end % T::BIT_LENGTH;
let len = range.len();
assert!(slice_end - slice_start <= 1);
if slice_start == slice_end {
self[slice_start].get_bits(bit_start..bit_end)
} else if bit_end == 0 {
self[slice_start].get_bits(bit_start..T::BIT_LENGTH)
} else {
let mut ret = self[slice_start].get_bits(bit_start..T::BIT_LENGTH);
ret.set_bits(
(T::BIT_LENGTH - bit_start)..len,
self[slice_end].get_bits(0..bit_end),
);
ret
}
}
#[track_caller]
#[inline]
fn set_bit(&mut self, bit: usize, value: bool) {
let slice_index = bit / T::BIT_LENGTH;
let bit_index = bit % T::BIT_LENGTH;
self[slice_index].set_bit(bit_index, value);
}
#[track_caller]
#[inline]
fn set_bits<U: RangeBounds<usize>>(&mut self, range: U, value: T) {
let range = to_regular_range(&range, self.bit_length());
assert!(range.len() <= T::BIT_LENGTH);
let slice_start = range.start / T::BIT_LENGTH;
let slice_end = range.end / T::BIT_LENGTH;
let bit_start = range.start % T::BIT_LENGTH;
let bit_end = range.end % T::BIT_LENGTH;
assert!(slice_end - slice_start <= 1);
if slice_start == slice_end {
self[slice_start].set_bits(bit_start..bit_end, value);
} else if bit_end == 0 {
self[slice_start].set_bits(bit_start..T::BIT_LENGTH, value);
} else {
self[slice_start].set_bits(
bit_start..T::BIT_LENGTH,
value.get_bits(0..T::BIT_LENGTH - bit_start),
);
self[slice_end].set_bits(
0..bit_end,
value.get_bits(T::BIT_LENGTH - bit_start..T::BIT_LENGTH),
);
}
}
}
fn to_regular_range<T: RangeBounds<usize>>(generic_rage: &T, bit_length: usize) -> Range<usize> {
let start = match generic_rage.start_bound() {
Bound::Excluded(&value) => value + 1,
Bound::Included(&value) => value,
Bound::Unbounded => 0,
};
let end = match generic_rage.end_bound() {
Bound::Excluded(&value) => value,
Bound::Included(&value) => value + 1,
Bound::Unbounded => bit_length,
};
start..end
}

View File

@ -0,0 +1,450 @@
use BitArray;
use BitField;
#[test]
fn test_integer_bit_lengths() {
assert_eq!(u8::BIT_LENGTH, 8);
assert_eq!(u16::BIT_LENGTH, 16);
assert_eq!(u32::BIT_LENGTH, 32);
assert_eq!(u64::BIT_LENGTH, 64);
assert_eq!(u128::BIT_LENGTH, 128);
assert_eq!(i8::BIT_LENGTH, 8);
assert_eq!(i16::BIT_LENGTH, 16);
assert_eq!(i32::BIT_LENGTH, 32);
assert_eq!(i64::BIT_LENGTH, 64);
assert_eq!(i128::BIT_LENGTH, 128);
}
#[test]
fn test_set_reset_u8() {
let mut field = 0b11110010u8;
let mut bit_i = |i| {
field.set_bit(i, true);
assert_eq!(field.get_bit(i), true);
field.set_bit(i, false);
assert_eq!(field.get_bit(i), false);
field.set_bit(i, true);
assert_eq!(field.get_bit(i), true);
};
for i in 0..8 {
bit_i(i);
}
}
#[test]
fn test_set_reset_u16() {
let mut field = 0b1111001010010110u16;
let mut bit_i = |i| {
field.set_bit(i, true);
assert_eq!(field.get_bit(i), true);
field.set_bit(i, false);
assert_eq!(field.get_bit(i), false);
field.set_bit(i, true);
assert_eq!(field.get_bit(i), true);
};
for i in 0..16 {
bit_i(i);
}
}
#[test]
fn test_read_u32() {
let field = 0b1111111111010110u32;
assert_eq!(field.get_bit(0), false);
assert_eq!(field.get_bit(1), true);
assert_eq!(field.get_bit(2), true);
assert_eq!(field.get_bit(3), false);
assert_eq!(field.get_bit(4), true);
assert_eq!(field.get_bit(5), false);
for i in 6..16 {
assert_eq!(field.get_bit(i), true);
}
for i in 16..32 {
assert_eq!(field.get_bit(i), false);
}
assert_eq!(field.get_bits(16..), 0);
assert_eq!(field.get_bits(16..32), 0);
assert_eq!(field.get_bits(16..=31), 0);
assert_eq!(field.get_bits(6..16), 0b1111111111);
assert_eq!(field.get_bits(6..=15), 0b1111111111);
assert_eq!(field.get_bits(..6), 0b010110);
assert_eq!(field.get_bits(0..6), 0b010110);
assert_eq!(field.get_bits(0..=5), 0b010110);
assert_eq!(field.get_bits(..10), 0b1111010110);
assert_eq!(field.get_bits(0..10), 0b1111010110);
assert_eq!(field.get_bits(0..=9), 0b1111010110);
assert_eq!(field.get_bits(5..12), 0b1111110);
assert_eq!(field.get_bits(5..=11), 0b1111110);
}
#[test]
fn test_set_reset_u32() {
let mut field = 0b1111111111010110u32;
let mut bit_i = |i| {
field.set_bit(i, true);
assert_eq!(field.get_bit(i), true);
field.set_bit(i, false);
assert_eq!(field.get_bit(i), false);
field.set_bit(i, true);
assert_eq!(field.get_bit(i), true);
};
for i in 0..32 {
bit_i(i);
}
}
#[test]
fn test_set_range_u32() {
let mut field = 0b1111111111010110u32;
field.set_bits(10..15, 0b00000);
assert_eq!(field.get_bits(10..15), 0b00000);
assert_eq!(field.get_bits(10..=14), 0b00000);
field.set_bits(10..15, 0b10101);
assert_eq!(field.get_bits(10..15), 0b10101);
assert_eq!(field.get_bits(10..=14), 0b10101);
field.set_bits(10..15, 0b01010);
assert_eq!(field.get_bits(10..15), 0b01010);
assert_eq!(field.get_bits(10..=14), 0b01010);
field.set_bits(10..15, 0b11111);
assert_eq!(field.get_bits(10..15), 0b11111);
assert_eq!(field.get_bits(10..=14), 0b11111);
field.set_bits(10..=14, 0b00000);
assert_eq!(field.get_bits(10..15), 0b00000);
assert_eq!(field.get_bits(10..=14), 0b00000);
field.set_bits(10..=14, 0b10101);
assert_eq!(field.get_bits(10..15), 0b10101);
assert_eq!(field.get_bits(10..=14), 0b10101);
field.set_bits(10..=14, 0b01010);
assert_eq!(field.get_bits(10..15), 0b01010);
assert_eq!(field.get_bits(10..=14), 0b01010);
field.set_bits(10..=14, 0b11111);
assert_eq!(field.get_bits(10..15), 0b11111);
assert_eq!(field.get_bits(10..=14), 0b11111);
field.set_bits(0..16, 0xdead);
field.set_bits(14..32, 0xbeaf);
assert_eq!(field.get_bits(0..16), 0xdead);
assert_eq!(field.get_bits(14..32), 0xbeaf);
field.set_bits(..16, 0xdead);
field.set_bits(14.., 0xbeaf);
assert_eq!(field.get_bits(..16), 0xdead);
assert_eq!(field.get_bits(14..), 0xbeaf);
}
#[test]
fn test_read_u64() {
let field = 0b1111111111010110u64 << 32;
for i in 0..32 {
assert_eq!(field.get_bit(i), false);
}
assert_eq!(field.get_bit(32), false);
assert_eq!(field.get_bit(33), true);
assert_eq!(field.get_bit(34), true);
assert_eq!(field.get_bit(35), false);
assert_eq!(field.get_bit(36), true);
assert_eq!(field.get_bit(37), false);
for i in 38..48 {
assert_eq!(field.get_bit(i), true);
}
for i in 48..64 {
assert_eq!(field.get_bit(i), false);
}
assert_eq!(field.get_bits(..32), 0);
assert_eq!(field.get_bits(0..32), 0);
assert_eq!(field.get_bits(0..=31), 0);
assert_eq!(field.get_bits(48..), 0);
assert_eq!(field.get_bits(48..64), 0);
assert_eq!(field.get_bits(48..=63), 0);
assert_eq!(field.get_bits(38..48), 0b1111111111);
assert_eq!(field.get_bits(38..=47), 0b1111111111);
assert_eq!(field.get_bits(32..38), 0b010110);
assert_eq!(field.get_bits(32..=37), 0b010110);
assert_eq!(field.get_bits(32..42), 0b1111010110);
assert_eq!(field.get_bits(32..=41), 0b1111010110);
assert_eq!(field.get_bits(37..44), 0b1111110);
assert_eq!(field.get_bits(37..=43), 0b1111110);
}
#[test]
fn test_set_reset_u64() {
let mut field = 0b1111111111010110u64 << 32;
let mut bit_i = |i| {
field.set_bit(i, true);
assert_eq!(field.get_bit(i), true);
field.set_bit(i, false);
assert_eq!(field.get_bit(i), false);
field.set_bit(i, true);
assert_eq!(field.get_bit(i), true);
};
for i in 0..64 {
bit_i(i);
}
}
#[test]
fn test_set_range_u64() {
let mut field = 0b1111111111010110u64 << 32;
field.set_bits(42..47, 0b00000);
assert_eq!(field.get_bits(42..47), 0b00000);
assert_eq!(field.get_bits(42..=46), 0b00000);
field.set_bits(10..15, 0b10101);
assert_eq!(field.get_bits(10..15), 0b10101);
assert_eq!(field.get_bits(10..=14), 0b10101);
field.set_bits(40..45, 0b01010);
assert_eq!(field.get_bits(40..45), 0b01010);
assert_eq!(field.get_bits(40..=44), 0b01010);
field.set_bits(40..45, 0b11111);
assert_eq!(field.get_bits(40..45), 0b11111);
assert_eq!(field.get_bits(40..=44), 0b11111);
field.set_bits(42..=46, 0b00000);
assert_eq!(field.get_bits(42..47), 0b00000);
assert_eq!(field.get_bits(42..=46), 0b00000);
field.set_bits(10..=14, 0b10101);
assert_eq!(field.get_bits(10..15), 0b10101);
assert_eq!(field.get_bits(10..=14), 0b10101);
field.set_bits(40..=44, 0b01010);
assert_eq!(field.get_bits(40..45), 0b01010);
assert_eq!(field.get_bits(40..=44), 0b01010);
field.set_bits(40..=44, 0b11111);
assert_eq!(field.get_bits(40..45), 0b11111);
assert_eq!(field.get_bits(40..=44), 0b11111);
field.set_bits(0..16, 0xdead);
field.set_bits(14..32, 0xbeaf);
field.set_bits(32..64, 0xcafebabe);
assert_eq!(field.get_bits(0..16), 0xdead);
assert_eq!(field.get_bits(14..32), 0xbeaf);
assert_eq!(field.get_bits(32..64), 0xcafebabe);
field.set_bits(..16, 0xdead);
field.set_bits(14..=31, 0xbeaf);
field.set_bits(32.., 0xcafebabe);
assert_eq!(field.get_bits(..16), 0xdead);
assert_eq!(field.get_bits(14..=31), 0xbeaf);
assert_eq!(field.get_bits(32..), 0xcafebabe);
}
#[test]
fn test_read_u128() {
let field = 0b1111111111010110u128 << 32;
for i in 0..32 {
assert_eq!(field.get_bit(i), false);
}
assert_eq!(field.get_bit(32), false);
assert_eq!(field.get_bit(33), true);
assert_eq!(field.get_bit(34), true);
assert_eq!(field.get_bit(35), false);
assert_eq!(field.get_bit(36), true);
assert_eq!(field.get_bit(37), false);
for i in 38..48 {
assert_eq!(field.get_bit(i), true);
}
for i in 48..64 {
assert_eq!(field.get_bit(i), false);
}
assert_eq!(field.get_bits(..32), 0);
assert_eq!(field.get_bits(0..32), 0);
assert_eq!(field.get_bits(0..=31), 0);
assert_eq!(field.get_bits(48..), 0);
assert_eq!(field.get_bits(48..64), 0);
assert_eq!(field.get_bits(48..=63), 0);
assert_eq!(field.get_bits(38..48), 0b1111111111);
assert_eq!(field.get_bits(38..=47), 0b1111111111);
assert_eq!(field.get_bits(32..38), 0b010110);
assert_eq!(field.get_bits(32..=37), 0b010110);
assert_eq!(field.get_bits(32..42), 0b1111010110);
assert_eq!(field.get_bits(32..=41), 0b1111010110);
assert_eq!(field.get_bits(37..44), 0b1111110);
assert_eq!(field.get_bits(37..=43), 0b1111110);
}
#[test]
fn test_set_reset_u128() {
let mut field = 0b1111111111010110u128 << 32;
let mut bit_i = |i| {
field.set_bit(i, true);
assert_eq!(field.get_bit(i), true);
field.set_bit(i, false);
assert_eq!(field.get_bit(i), false);
field.set_bit(i, true);
assert_eq!(field.get_bit(i), true);
};
for i in 0..64 {
bit_i(i);
}
}
#[test]
fn test_set_range_u128() {
let mut field = 0b1111111111010110u128 << 32;
field.set_bits(42..47, 0b00000);
assert_eq!(field.get_bits(42..47), 0b00000);
assert_eq!(field.get_bits(42..=46), 0b00000);
field.set_bits(10..15, 0b10101);
assert_eq!(field.get_bits(10..15), 0b10101);
assert_eq!(field.get_bits(10..=14), 0b10101);
field.set_bits(40..45, 0b01010);
assert_eq!(field.get_bits(40..45), 0b01010);
assert_eq!(field.get_bits(40..=44), 0b01010);
field.set_bits(40..45, 0b11111);
assert_eq!(field.get_bits(40..45), 0b11111);
assert_eq!(field.get_bits(40..=44), 0b11111);
field.set_bits(42..=46, 0b00000);
assert_eq!(field.get_bits(42..47), 0b00000);
assert_eq!(field.get_bits(42..=46), 0b00000);
field.set_bits(10..=14, 0b10101);
assert_eq!(field.get_bits(10..15), 0b10101);
assert_eq!(field.get_bits(10..=14), 0b10101);
field.set_bits(40..=44, 0b01010);
assert_eq!(field.get_bits(40..45), 0b01010);
assert_eq!(field.get_bits(40..=44), 0b01010);
field.set_bits(40..=44, 0b11111);
assert_eq!(field.get_bits(40..45), 0b11111);
assert_eq!(field.get_bits(40..=44), 0b11111);
field.set_bits(0..16, 0xdead);
field.set_bits(14..32, 0xbeaf);
field.set_bits(32..64, 0xcafebabe);
assert_eq!(field.get_bits(0..16), 0xdead);
assert_eq!(field.get_bits(14..32), 0xbeaf);
assert_eq!(field.get_bits(32..64), 0xcafebabe);
field.set_bits(..16, 0xdead);
field.set_bits(14..=31, 0xbeaf);
field.set_bits(32.., 0xcafebabe);
assert_eq!(field.get_bits(..16), 0xdead);
assert_eq!(field.get_bits(14..=31), 0xbeaf);
assert_eq!(field.get_bits(32..), 0xcafebabe);
}
#[test]
fn test_array_length() {
assert_eq!((&[2u8, 3u8, 4u8]).bit_length(), 24);
assert_eq!((&[2i8, 3i8, 4i8, 5i8]).bit_length(), 32);
assert_eq!((&[2u16, 3u16, 4u16]).bit_length(), 48);
assert_eq!((&[2i16, 3i16, 4i16, 5i16]).bit_length(), 64);
assert_eq!((&[2u32, 3u32, 4u32]).bit_length(), 96);
assert_eq!((&[2i32, 3i32, 4i32, 5i32]).bit_length(), 128);
assert_eq!((&[2u64, 3u64, 4u64]).bit_length(), 192);
assert_eq!((&[2i64, 3i64, 4i64, 5i64]).bit_length(), 256);
}
#[test]
fn test_set_bit_array() {
let mut test_val = [0xffu8];
&test_val.set_bit(0, false);
assert_eq!(test_val, [0xfeu8]);
&test_val.set_bit(4, false);
assert_eq!(test_val, [0xeeu8]);
let mut test_array = [0xffu8, 0x00u8, 0xffu8];
&test_array.set_bit(7, false);
&test_array.set_bit(8, true);
&test_array.set_bit(16, false);
assert_eq!(test_array, [0x7fu8, 0x01u8, 0xfeu8]);
}
#[test]
fn test_get_bit_array() {
let test_val = [0xefu8];
assert_eq!(test_val.get_bit(1), true);
assert_eq!(test_val.get_bit(4), false);
let test_array = [0xffu8, 0x00u8, 0xffu8];
assert_eq!(test_array.get_bit(7), true);
assert_eq!(test_array.get_bit(8), false);
assert_eq!(test_array.get_bit(16), true);
}
#[test]
fn test_set_bits_array() {
let mut test_val = [0xffu8];
test_val.set_bits(0..4, 0x0u8);
assert_eq!(test_val, [0xf0u8]);
test_val.set_bits(0..4, 0xau8);
assert_eq!(test_val, [0xfau8]);
test_val.set_bits(4..8, 0xau8);
assert_eq!(test_val, [0xaau8]);
test_val.set_bits(.., 0xffu8);
assert_eq!(test_val, [0xffu8]);
test_val.set_bits(2..=5, 0x0u8);
assert_eq!(test_val, [0xc3u8]);
let mut test_array = [0xffu8, 0x00u8, 0xffu8];
test_array.set_bits(7..9, 0b10);
assert_eq!(test_array, [0x7f, 0x01, 0xff]);
test_array.set_bits(12..20, 0xaa);
assert_eq!(test_array, [0x7f, 0xa1, 0xfa]);
test_array.set_bits(16..24, 0xaa);
assert_eq!(test_array, [0x7f, 0xa1, 0xaa]);
test_array.set_bits(6..14, 0x00);
assert_eq!(test_array, [0x3f, 0x80, 0xaa]);
test_array.set_bits(..4, 0x00);
assert_eq!(test_array, [0x30, 0x80, 0xaa]);
test_array.set_bits(20.., 0x00);
assert_eq!(test_array, [0x30, 0x80, 0x0a]);
test_array.set_bits(7..=11, 0x1f);
assert_eq!(test_array, [0xb0, 0x8f, 0x0a]);
}
#[test]
fn test_get_bits_array() {
let mut test_val = [0xf0u8];
assert_eq!(test_val.get_bits(0..4), 0x0u8);
test_val = [0xfau8];
assert_eq!(test_val.get_bits(0..4), 0xau8);
test_val = [0xaau8];
assert_eq!(test_val.get_bits(4..8), 0xau8);
let mut test_array: [u8; 3] = [0xff, 0x01, 0xff];
assert_eq!(test_array.get_bits(7..9), 0b11u8);
test_array = [0x7f, 0xa1, 0xfa];
assert_eq!(test_array.get_bits(12..20), 0xaa);
test_array = [0x7f, 0xa1, 0xaa];
assert_eq!(test_array.get_bits(16..24), 0xaa);
test_array = [0x3f, 0x80, 0xaa];
assert_eq!(test_array.get_bits(6..14), 0x00);
}

View File

@ -0,0 +1,6 @@
{
"git": {
"sha1": "f286b39e1734f193240ad44b993d9f516cabac4c"
},
"path_in_vcs": ""
}

View File

@ -0,0 +1,95 @@
# Based on https://github.com/actions-rs/meta/blob/master/recipes/quickstart.md
#
# While our "example" application has the platform-specific code,
# for simplicity we are compiling and testing everything on the Ubuntu environment only.
# For multi-OS testing see the `cross.yml` workflow.
on: [push, pull_request]
name: Build
jobs:
check:
name: Check
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v2
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Run cargo check
uses: actions-rs/cargo@v1
with:
command: check
test:
name: Test Suite
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v2
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test
unstable:
name: Test Suite (unstable)
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v2
- name: Install nightly toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
- name: Run cargo test --features unstable
uses: actions-rs/cargo@v1
with:
command: test
args: --features unstable
lints:
name: Lints
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v2
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
components: rustfmt, clippy
- name: Run cargo fmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- name: Run cargo clippy
uses: actions-rs/cargo@v1
with:
command: clippy

View File

@ -0,0 +1,2 @@
target
Cargo.lock

View File

@ -0,0 +1,53 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
name = "volatile"
version = "0.4.6"
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
description = "A simple volatile wrapper type"
documentation = "https://docs.rs/volatile"
readme = "README.md"
keywords = ["volatile"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-osdev/volatile"
[package.metadata.release]
dev-version = false
pre-release-commit-message = "Release version {{version}}"
[[package.metadata.release.pre-release-replacements]]
file = "Changelog.md"
search = "# Unreleased"
replace = """
# Unreleased
# {{version}} {{date}}"""
exactly = 1
[package.metadata.docs.rs]
features = ["unstable"]
[dependencies.core]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
[dependencies.compiler_builtins]
version = "0.1"
optional = true
[features]
unstable = []
rustc-dep-of-std = [
"core",
"compiler_builtins",
]

View File

@ -0,0 +1,25 @@
[package]
name = "volatile"
version = "0.4.6"
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
license = "MIT OR Apache-2.0"
keywords = ["volatile"]
description = "A simple volatile wrapper type"
documentation = "https://docs.rs/volatile"
repository = "https://github.com/rust-osdev/volatile"
[dependencies]
[features]
# Enable unstable features; requires Rust nightly; might break on compiler updates
unstable = []
[package.metadata.release]
dev-version = false
pre-release-replacements = [
{ file="Changelog.md", search="# Unreleased", replace="# Unreleased\n\n# {{version}} {{date}}", exactly=1 },
]
pre-release-commit-message = "Release version {{version}}"
[package.metadata.docs.rs]
features = ["unstable"]

View File

@ -0,0 +1,39 @@
# Unreleased
# 0.4.6 2023-01-17
- Fix UB in slice methods when Deref returns different references ([#27](https://github.com/rust-osdev/volatile/pull/27))
# 0.4.5 2022-04-24
- Remove the `const_generics` feature flag ([#25](https://github.com/rust-osdev/volatile/pull/25))
# 0.4.4 2021-03-09
- Replace feature "range_bounds_assert_len" with "slice_range" ([#21](https://github.com/rust-osdev/volatile/pull/21))
- Fixes the `unstable` feature on the latest nightly.
# 0.4.3 2020-12-23
- Add methods to restrict access ([#19](https://github.com/rust-osdev/volatile/pull/19))
# 0.4.2 2020-10-31
- Change `slice::check_range` to `RangeBounds::assert_len` ([#16](https://github.com/rust-osdev/volatile/pull/16))
- Fixes build on latest nightly.
# 0.4.1 2020-09-21
- Small documentation and metadata improvements
# 0.4.0 2020-09-21
- **Breaking:** Rewrite crate to operate on reference values ([#13](https://github.com/rust-osdev/volatile/pull/13))
# 0.3.0 2020-07-29
- **Breaking:** Remove `Debug` and `Clone` derives for `WriteOnly` ([#12](https://github.com/rust-osdev/volatile/pull/12))
# 0.2.7 2020-07-29
- Derive `Default` for `Volatile`, `WriteOnly` and `ReadOnly` ([#10](https://github.com/embed-rs/volatile/pull/10))

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 Philipp Oppermann
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,19 @@
# `volatile`
[![Build Status](https://github.com/rust-osdev/volatile/workflows/Build/badge.svg)](https://github.com/rust-osdev/volatile/actions?query=workflow%3ABuild) [![Docs.rs Badge](https://docs.rs/volatile/badge.svg)](https://docs.rs/volatile/)
Provides the wrapper type `Volatile`, which wraps a reference to any copy-able type and allows for volatile memory access to wrapped value. Volatile memory accesses are never optimized away by the compiler, and are useful in many low-level systems programming and concurrent contexts.
The wrapper types *do not* enforce any atomicity guarantees; to also get atomicity, consider looking at the `Atomic` wrapper types found in `libcore` or `libstd`.
## License
Licensed under either of
- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.

View File

@ -0,0 +1,22 @@
/// Helper trait that is implemented by [`ReadWrite`] and [`ReadOnly`].
pub trait Readable {}
/// Helper trait that is implemented by [`ReadWrite`] and [`WriteOnly`].
pub trait Writable {}
/// Zero-sized marker type for allowing both read and write access.
#[derive(Debug, Copy, Clone)]
pub struct ReadWrite;
impl Readable for ReadWrite {}
impl Writable for ReadWrite {}
/// Zero-sized marker type for allowing only read access.
#[derive(Debug, Copy, Clone)]
pub struct ReadOnly;
impl Readable for ReadOnly {}
/// Zero-sized marker type for allowing only write access.
#[derive(Debug, Copy, Clone)]
pub struct WriteOnly;
impl Writable for WriteOnly {}

View File

@ -0,0 +1,874 @@
//! Provides the wrapper type `Volatile`, which wraps a reference to any copy-able type and allows
//! for volatile memory access to wrapped value. Volatile memory accesses are never optimized away
//! by the compiler, and are useful in many low-level systems programming and concurrent contexts.
//!
//! The wrapper types *do not* enforce any atomicity guarantees; to also get atomicity, consider
//! looking at the `Atomic` wrapper types found in `libcore` or `libstd`.
#![no_std]
#![cfg_attr(feature = "unstable", feature(core_intrinsics))]
#![cfg_attr(feature = "unstable", feature(slice_range))]
#![cfg_attr(feature = "unstable", allow(incomplete_features))]
#![cfg_attr(all(feature = "unstable", test), feature(slice_as_chunks))]
#![warn(missing_docs)]
use access::{ReadOnly, ReadWrite, Readable, Writable, WriteOnly};
use core::{
fmt,
marker::PhantomData,
ops::{Deref, DerefMut, Index, IndexMut},
ptr,
slice::SliceIndex,
};
#[cfg(feature = "unstable")]
use core::{
intrinsics,
ops::{Range, RangeBounds},
slice::range,
};
/// Allows creating read-only and write-only `Volatile` values.
pub mod access;
/// Wraps a reference to make accesses to the referenced value volatile.
///
/// Allows volatile reads and writes on the referenced value. The referenced value needs to
/// be `Copy` for reading and writing, as volatile reads and writes take and return copies
/// of the value.
///
/// Since not all volatile resources (e.g. memory mapped device registers) are both readable
/// and writable, this type supports limiting the allowed access types through an optional second
/// generic parameter `A` that can be one of `ReadWrite`, `ReadOnly`, or `WriteOnly`. It defaults
/// to `ReadWrite`, which allows all operations.
///
/// The size of this struct is the same as the size of the contained reference.
#[derive(Clone)]
#[repr(transparent)]
pub struct Volatile<R, A = ReadWrite> {
reference: R,
access: PhantomData<A>,
}
/// Constructor functions for creating new values
///
/// These functions allow to construct a new `Volatile` instance from a reference type. While
/// the `new` function creates a `Volatile` instance with unrestricted access, there are also
/// functions for creating read-only or write-only instances.
impl<R> Volatile<R> {
/// Constructs a new volatile instance wrapping the given reference.
///
/// While it is possible to construct `Volatile` instances from arbitrary values (including
/// non-reference values), most of the methods are only available when the wrapped type is
/// a reference. The only reason that we don't forbid non-reference types in the constructor
/// functions is that the Rust compiler does not support trait bounds on generic `const`
/// functions yet. When this becomes possible, we will release a new version of this library
/// with removed support for non-references. For these reasons it is recommended to use
/// the `Volatile` type only with references.
///
/// ## Example
///
/// ```rust
/// use volatile::Volatile;
///
/// let mut value = 0u32;
///
/// let mut volatile = Volatile::new(&mut value);
/// volatile.write(1);
/// assert_eq!(volatile.read(), 1);
/// ```
pub const fn new(reference: R) -> Volatile<R> {
Volatile {
reference,
access: PhantomData,
}
}
/// Constructs a new read-only volatile instance wrapping the given reference.
///
/// This is equivalent to the `new` function with the difference that the returned
/// `Volatile` instance does not permit write operations. This is for example useful
/// with memory-mapped hardware registers that are defined as read-only by the hardware.
///
/// ## Example
///
/// Reading is allowed:
///
/// ```rust
/// use volatile::Volatile;
///
/// let value = 0u32;
///
/// let volatile = Volatile::new_read_only(&value);
/// assert_eq!(volatile.read(), 0);
/// ```
///
/// But writing is not:
///
/// ```compile_fail
/// use volatile::Volatile;
///
/// let mut value = 0u32;
///
/// let mut volatile = Volatile::new_read_only(&mut value);
/// volatile.write(1);
/// //ERROR: ^^^^^ the trait `volatile::access::Writable` is not implemented
/// // for `volatile::access::ReadOnly`
/// ```
pub const fn new_read_only(reference: R) -> Volatile<R, ReadOnly> {
Volatile {
reference,
access: PhantomData,
}
}
/// Constructs a new write-only volatile instance wrapping the given reference.
///
/// This is equivalent to the `new` function with the difference that the returned
/// `Volatile` instance does not permit read operations. This is for example useful
/// with memory-mapped hardware registers that are defined as write-only by the hardware.
///
/// ## Example
///
/// Writing is allowed:
///
/// ```rust
/// use volatile::Volatile;
///
/// let mut value = 0u32;
///
/// let mut volatile = Volatile::new_write_only(&mut value);
/// volatile.write(1);
/// ```
///
/// But reading is not:
///
/// ```compile_fail
/// use volatile::Volatile;
///
/// let value = 0u32;
///
/// let volatile = Volatile::new_write_only(&value);
/// volatile.read();
/// //ERROR: ^^^^ the trait `volatile::access::Readable` is not implemented
/// // for `volatile::access::WriteOnly`
/// ```
pub const fn new_write_only(reference: R) -> Volatile<R, WriteOnly> {
Volatile {
reference,
access: PhantomData,
}
}
}
/// Methods for references to `Copy` types
impl<R, T, A> Volatile<R, A>
where
R: Deref<Target = T>,
T: Copy,
{
/// Performs a volatile read of the contained value.
///
/// Returns a copy of the read value. Volatile reads are guaranteed not to be optimized
/// away by the compiler, but by themselves do not have atomic ordering
/// guarantees. To also get atomicity, consider looking at the `Atomic` wrapper types of
/// the standard/`core` library.
///
/// ## Examples
///
/// ```rust
/// use volatile::Volatile;
///
/// let value = 42;
/// let shared_reference = Volatile::new(&value);
/// assert_eq!(shared_reference.read(), 42);
///
/// let mut value = 50;
/// let mut_reference = Volatile::new(&mut value);
/// assert_eq!(mut_reference.read(), 50);
/// ```
pub fn read(&self) -> T
where
A: Readable,
{
// UNSAFE: Safe, as we know that our internal value exists.
unsafe { ptr::read_volatile(&*self.reference) }
}
/// Performs a volatile write, setting the contained value to the given `value`.
///
/// Volatile writes are guaranteed to not be optimized away by the compiler, but by
/// themselves do not have atomic ordering guarantees. To also get atomicity, consider
/// looking at the `Atomic` wrapper types of the standard/`core` library.
///
/// ## Example
///
/// ```rust
/// use volatile::Volatile;
///
/// let mut value = 42;
/// let mut volatile = Volatile::new(&mut value);
/// volatile.write(50);
///
/// assert_eq!(volatile.read(), 50);
/// ```
pub fn write(&mut self, value: T)
where
A: Writable,
R: DerefMut,
{
// UNSAFE: Safe, as we know that our internal value exists.
unsafe { ptr::write_volatile(&mut *self.reference, value) };
}
/// Updates the contained value using the given closure and volatile instructions.
///
/// Performs a volatile read of the contained value, passes a mutable reference to it to the
/// function `f`, and then performs a volatile write of the (potentially updated) value back to
/// the contained value.
///
/// ```rust
/// use volatile::Volatile;
///
/// let mut value = 42;
/// let mut volatile = Volatile::new(&mut value);
/// volatile.update(|val| *val += 1);
///
/// assert_eq!(volatile.read(), 43);
/// ```
pub fn update<F>(&mut self, f: F)
where
A: Readable + Writable,
R: DerefMut,
F: FnOnce(&mut T),
{
let mut value = self.read();
f(&mut value);
self.write(value);
}
}
/// Method for extracting the wrapped value.
impl<R, A> Volatile<R, A> {
/// Extracts the inner value stored in the wrapper type.
///
/// This method gives direct access to the wrapped reference and thus allows
/// non-volatile access again. This is seldom what you want since there is usually
/// a reason that a reference is wrapped in `Volatile`. However, in some cases it might
/// be required or useful to use the `read_volatile`/`write_volatile` pointer methods of
/// the standard library directly, which this method makes possible.
///
/// Since no memory safety violation can occur when accessing the referenced value using
/// non-volatile operations, this method is safe. However, it _can_ lead to bugs at the
/// application level, so this method should be used with care.
///
/// ## Example
///
/// ```
/// use volatile::Volatile;
///
/// let mut value = 42;
/// let mut volatile = Volatile::new(&mut value);
/// volatile.write(50);
/// let unwrapped: &mut i32 = volatile.extract_inner();
///
/// assert_eq!(*unwrapped, 50); // non volatile access, be careful!
/// ```
pub fn extract_inner(self) -> R {
self.reference
}
}
/// Transformation methods for accessing struct fields
impl<R, T, A> Volatile<R, A>
where
R: Deref<Target = T>,
T: ?Sized,
{
/// Constructs a new `Volatile` reference by mapping the wrapped value.
///
/// This method is useful for accessing individual fields of volatile structs.
///
/// Note that this method gives temporary access to the wrapped reference, which allows
/// accessing the value in a non-volatile way. This is normally not what you want, so
/// **this method should only be used for reference-to-reference transformations**.
///
/// ## Examples
///
/// Accessing a struct field:
///
/// ```
/// use volatile::Volatile;
///
/// struct Example { field_1: u32, field_2: u8, }
/// let mut value = Example { field_1: 15, field_2: 255 };
/// let mut volatile = Volatile::new(&mut value);
///
/// // construct a volatile reference to a field
/// let field_2 = volatile.map(|example| &example.field_2);
/// assert_eq!(field_2.read(), 255);
/// ```
///
/// Don't misuse this method to do a non-volatile read of the referenced value:
///
/// ```
/// use volatile::Volatile;
///
/// let mut value = 5;
/// let mut volatile = Volatile::new(&mut value);
///
/// // DON'T DO THIS:
/// let mut readout = 0;
/// volatile.map(|value| {
/// readout = *value; // non-volatile read, might lead to bugs
/// value
/// });
/// ```
pub fn map<'a, F, U>(&'a self, f: F) -> Volatile<&'a U, A>
where
F: FnOnce(&'a T) -> &'a U,
U: ?Sized,
T: 'a,
{
Volatile {
reference: f(self.reference.deref()),
access: self.access,
}
}
/// Constructs a new mutable `Volatile` reference by mapping the wrapped value.
///
/// This method is useful for accessing individual fields of volatile structs.
///
/// Note that this method gives temporary access to the wrapped reference, which allows
/// accessing the value in a non-volatile way. This is normally not what you want, so
/// **this method should only be used for reference-to-reference transformations**.
///
/// ## Examples
///
/// Accessing a struct field:
///
/// ```
/// use volatile::Volatile;
///
/// struct Example { field_1: u32, field_2: u8, }
/// let mut value = Example { field_1: 15, field_2: 255 };
/// let mut volatile = Volatile::new(&mut value);
///
/// // construct a volatile reference to a field
/// let mut field_2 = volatile.map_mut(|example| &mut example.field_2);
/// field_2.write(128);
/// assert_eq!(field_2.read(), 128);
/// ```
///
/// Don't misuse this method to do a non-volatile read or write of the referenced value:
///
/// ```
/// use volatile::Volatile;
///
/// let mut value = 5;
/// let mut volatile = Volatile::new(&mut value);
///
/// // DON'T DO THIS:
/// volatile.map_mut(|value| {
/// *value = 10; // non-volatile write, might lead to bugs
/// value
/// });
/// ```
pub fn map_mut<'a, F, U>(&'a mut self, f: F) -> Volatile<&'a mut U, A>
where
F: FnOnce(&mut T) -> &mut U,
R: DerefMut,
U: ?Sized,
T: 'a,
{
Volatile {
reference: f(&mut self.reference),
access: self.access,
}
}
}
/// Methods for volatile slices
impl<T, R, A> Volatile<R, A>
where
R: Deref<Target = [T]>,
{
/// Applies the index operation on the wrapped slice.
///
/// Returns a shared `Volatile` reference to the resulting subslice.
///
/// This is a convenience method for the `map(|slice| slice.index(index))` operation, so it
/// has the same behavior as the indexing operation on slice (e.g. panic if index is
/// out-of-bounds).
///
/// ## Examples
///
/// Accessing a single slice element:
///
/// ```
/// use volatile::Volatile;
///
/// let array = [1, 2, 3];
/// let slice = &array[..];
/// let volatile = Volatile::new(slice);
/// assert_eq!(volatile.index(1).read(), 2);
/// ```
///
/// Accessing a subslice:
///
/// ```
/// use volatile::Volatile;
///
/// let array = [1, 2, 3];
/// let slice = &array[..];
/// let volatile = Volatile::new(slice);
/// let subslice = volatile.index(1..);
/// assert_eq!(subslice.index(0).read(), 2);
/// ```
pub fn index<'a, I>(&'a self, index: I) -> Volatile<&'a I::Output, A>
where
I: SliceIndex<[T]>,
T: 'a,
{
self.map(|slice| slice.index(index))
}
/// Applies the mutable index operation on the wrapped slice.
///
/// Returns a mutable `Volatile` reference to the resulting subslice.
///
/// This is a convenience method for the `map_mut(|slice| slice.index_mut(index))`
/// operation, so it has the same behavior as the indexing operation on slice
/// (e.g. panic if index is out-of-bounds).
///
/// ## Examples
///
/// Accessing a single slice element:
///
/// ```
/// use volatile::Volatile;
///
/// let mut array = [1, 2, 3];
/// let slice = &mut array[..];
/// let mut volatile = Volatile::new(slice);
/// volatile.index_mut(1).write(6);
/// assert_eq!(volatile.index(1).read(), 6);
/// ```
///
/// Accessing a subslice:
///
/// ```
/// use volatile::Volatile;
///
/// let mut array = [1, 2, 3];
/// let slice = &mut array[..];
/// let mut volatile = Volatile::new(slice);
/// let mut subslice = volatile.index_mut(1..);
/// subslice.index_mut(0).write(6);
/// assert_eq!(subslice.index(0).read(), 6);
/// ```
pub fn index_mut<'a, I>(&'a mut self, index: I) -> Volatile<&mut I::Output, A>
where
I: SliceIndex<[T]>,
R: DerefMut,
T: 'a,
{
self.map_mut(|slice| slice.index_mut(index))
}
/// Copies all elements from `self` into `dst`, using a volatile memcpy.
///
/// The length of `dst` must be the same as `self`.
///
/// The method is only available with the `unstable` feature enabled (requires a nightly
/// Rust compiler).
///
/// ## Panics
///
/// This function will panic if the two slices have different lengths.
///
/// ## Examples
///
/// Copying two elements from a volatile slice:
///
/// ```
/// use volatile::Volatile;
///
/// let src = [1, 2];
/// // the `Volatile` type does not work with arrays, so convert `src` to a slice
/// let slice = &src[..];
/// let volatile = Volatile::new(slice);
/// let mut dst = [5, 0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the destination slice from three elements
/// // to two. It will panic if we don't do this.
/// volatile.copy_into_slice(&mut dst[1..]);
///
/// assert_eq!(src, [1, 2]);
/// assert_eq!(dst, [5, 1, 2]);
/// ```
#[cfg(feature = "unstable")]
pub fn copy_into_slice(&self, dst: &mut [T])
where
T: Copy,
{
let src = self.reference.deref();
assert_eq!(
src.len(),
dst.len(),
"destination and source slices have different lengths"
);
unsafe {
intrinsics::volatile_copy_nonoverlapping_memory(
dst.as_mut_ptr(),
src.as_ptr(),
src.len(),
);
}
}
/// Copies all elements from `src` into `self`, using a volatile memcpy.
///
/// The length of `src` must be the same as `self`.
///
/// This method is similar to the `slice::copy_from_slice` method of the standard library. The
/// difference is that this method performs a volatile copy.
///
/// The method is only available with the `unstable` feature enabled (requires a nightly
/// Rust compiler).
///
/// ## Panics
///
/// This function will panic if the two slices have different lengths.
///
/// ## Examples
///
/// Copying two elements from a slice into a volatile slice:
///
/// ```
/// use volatile::Volatile;
///
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
/// // the `Volatile` type does not work with arrays, so convert `dst` to a slice
/// let slice = &mut dst[..];
/// let mut volatile = Volatile::new(slice);
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// volatile.copy_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
#[cfg(feature = "unstable")]
pub fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
R: DerefMut,
{
let dest = self.reference.deref_mut();
assert_eq!(
dest.len(),
src.len(),
"destination and source slices have different lengths"
);
unsafe {
intrinsics::volatile_copy_nonoverlapping_memory(
dest.as_mut_ptr(),
src.as_ptr(),
dest.len(),
);
}
}
/// Copies elements from one part of the slice to another part of itself, using a
/// volatile `memmove`.
///
/// `src` is the range within `self` to copy from. `dest` is the starting index of the
/// range within `self` to copy to, which will have the same length as `src`. The two ranges
/// may overlap. The ends of the two ranges must be less than or equal to `self.len()`.
///
/// This method is similar to the `slice::copy_within` method of the standard library. The
/// difference is that this method performs a volatile copy.
///
/// This method is only available with the `unstable` feature enabled (requires a nightly
/// Rust compiler).
///
/// ## Panics
///
/// This function will panic if either range exceeds the end of the slice, or if the end
/// of `src` is before the start.
///
/// ## Examples
///
/// Copying four bytes within a slice:
///
/// ```
/// use volatile::Volatile;
///
/// let mut byte_array = *b"Hello, World!";
/// let mut slice: &mut [u8] = &mut byte_array[..];
/// let mut volatile = Volatile::new(slice);
///
/// volatile.copy_within(1..5, 8);
///
/// assert_eq!(&byte_array, b"Hello, Wello!");
#[cfg(feature = "unstable")]
pub fn copy_within(&mut self, src: impl RangeBounds<usize>, dest: usize)
where
T: Copy,
R: DerefMut,
{
let slice = self.reference.deref_mut();
// implementation taken from https://github.com/rust-lang/rust/blob/683d1bcd405727fcc9209f64845bd3b9104878b8/library/core/src/slice/mod.rs#L2726-L2738
let Range {
start: src_start,
end: src_end,
} = range(src, ..slice.len());
let count = src_end - src_start;
assert!(dest <= slice.len() - count, "dest is out of bounds");
// SAFETY: the conditions for `volatile_copy_memory` have all been checked above,
// as have those for `ptr::add`.
unsafe {
intrinsics::volatile_copy_memory(
slice.as_mut_ptr().add(dest),
slice.as_ptr().add(src_start),
count,
);
}
}
}
/// Methods for volatile byte slices
impl<R, A> Volatile<R, A>
where
R: Deref<Target = [u8]>,
{
/// Sets all elements of the byte slice to the given `value` using a volatile `memset`.
///
/// This method is similar to the `slice::fill` method of the standard library, with the
/// difference that this method performs a volatile write operation. Another difference
/// is that this method is only available for byte slices (not general `&mut [T]` slices)
/// because there currently isn't a instrinsic function that allows non-`u8` values.
///
/// This method is only available with the `unstable` feature enabled (requires a nightly
/// Rust compiler).
///
/// ## Example
///
/// ```rust
/// use volatile::Volatile;
///
/// let mut buf = Volatile::new(vec![0; 10]);
/// buf.fill(1);
/// assert_eq!(buf.extract_inner(), vec![1; 10]);
/// ```
#[cfg(feature = "unstable")]
pub fn fill(&mut self, value: u8)
where
R: DerefMut,
{
let dest = self.reference.deref_mut();
unsafe {
intrinsics::volatile_set_memory(dest.as_mut_ptr(), value, dest.len());
}
}
}
/// Methods for converting arrays to slices
impl<R, A, T, const N: usize> Volatile<R, A>
where
R: Deref<Target = [T; N]>,
{
/// Converts an array reference to a shared slice.
///
/// This makes it possible to use the methods defined on slices.
///
/// ## Example
///
/// Reading a subslice from a volatile array reference using `index`:
///
/// ```
/// use volatile::Volatile;
///
/// let src = [1, 2, 3, 4];
/// let volatile = Volatile::new(&src);
///
/// // convert the `Volatile<&[i32; 4]>` array reference to a `Volatile<&[i32]>` slice
/// let volatile_slice = volatile.as_slice();
/// // we can now use the slice methods
/// let subslice = volatile_slice.index(2..);
///
/// assert_eq!(subslice.index(0).read(), 3);
/// assert_eq!(subslice.index(1).read(), 4);
/// ```
pub fn as_slice(&self) -> Volatile<&[T], A> {
self.map(|array| &array[..])
}
/// Converts a mutable array reference to a mutable slice.
///
/// This makes it possible to use the methods defined on slices.
///
/// ## Example
///
/// Writing to an index of a mutable array reference:
///
/// ```
/// use volatile::Volatile;
///
/// let mut dst = [0, 0];
/// let mut volatile = Volatile::new(&mut dst);
///
/// // convert the `Volatile<&mut [i32; 2]>` array reference to a `Volatile<&mut [i32]>` slice
/// let mut volatile_slice = volatile.as_mut_slice();
/// // we can now use the slice methods
/// volatile_slice.index_mut(1).write(1);
///
/// assert_eq!(dst, [0, 1]);
/// ```
pub fn as_mut_slice(&mut self) -> Volatile<&mut [T], A>
where
R: DerefMut,
{
self.map_mut(|array| &mut array[..])
}
}
/// Methods for restricting access.
impl<R> Volatile<R> {
/// Restricts access permissions to read-only.
///
/// ## Example
///
/// ```
/// use volatile::Volatile;
///
/// let mut value: i16 = -4;
/// let mut volatile = Volatile::new(&mut value);
///
/// let read_only = volatile.read_only();
/// assert_eq!(read_only.read(), -4);
/// // read_only.write(10); // compile-time error
/// ```
pub fn read_only(self) -> Volatile<R, ReadOnly> {
Volatile {
reference: self.reference,
access: PhantomData,
}
}
/// Restricts access permissions to write-only.
///
/// ## Example
///
/// Creating a write-only reference to a struct field:
///
/// ```
/// use volatile::Volatile;
///
/// struct Example { field_1: u32, field_2: u8, }
/// let mut value = Example { field_1: 15, field_2: 255 };
/// let mut volatile = Volatile::new(&mut value);
///
/// // construct a volatile write-only reference to `field_2`
/// let mut field_2 = volatile.map_mut(|example| &mut example.field_2).write_only();
/// field_2.write(14);
/// // field_2.read(); // compile-time error
/// ```
pub fn write_only(self) -> Volatile<R, WriteOnly> {
Volatile {
reference: self.reference,
access: PhantomData,
}
}
}
impl<R, T, A> fmt::Debug for Volatile<R, A>
where
R: Deref<Target = T>,
T: Copy + fmt::Debug,
A: Readable,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Volatile").field(&self.read()).finish()
}
}
impl<R> fmt::Debug for Volatile<R, WriteOnly>
where
R: Deref,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Volatile").field(&"[write-only]").finish()
}
}
#[cfg(test)]
mod tests {
use super::Volatile;
#[test]
fn test_read() {
let val = 42;
assert_eq!(Volatile::new(&val).read(), 42);
}
#[test]
fn test_write() {
let mut val = 50;
let mut volatile = Volatile::new(&mut val);
volatile.write(50);
assert_eq!(val, 50);
}
#[test]
fn test_update() {
let mut val = 42;
let mut volatile = Volatile::new(&mut val);
volatile.update(|v| *v += 1);
assert_eq!(val, 43);
}
#[test]
fn test_slice() {
let mut val = [1, 2, 3];
let mut volatile = Volatile::new(&mut val[..]);
volatile.index_mut(0).update(|v| *v += 1);
assert_eq!(val, [2, 2, 3]);
}
#[test]
fn test_struct() {
struct S {
field_1: u32,
field_2: bool,
}
let mut val = S {
field_1: 60,
field_2: true,
};
let mut volatile = Volatile::new(&mut val);
volatile.map_mut(|s| &mut s.field_1).update(|v| *v += 1);
let mut field_2 = volatile.map_mut(|s| &mut s.field_2);
assert!(field_2.read());
field_2.write(false);
assert_eq!(volatile.map(|s| &s.field_1).read(), 61);
assert_eq!(volatile.map(|s| &s.field_2).read(), false);
}
#[cfg(feature = "unstable")]
#[test]
fn test_chunks() {
let mut val = [1, 2, 3, 4, 5, 6];
let mut volatile = Volatile::new(&mut val[..]);
let mut chunks = volatile.map_mut(|s| s.as_chunks_mut().0);
chunks.index_mut(1).write([10, 11, 12]);
assert_eq!(chunks.index(0).read(), [1, 2, 3]);
assert_eq!(chunks.index(1).read(), [10, 11, 12]);
}
}

View File

@ -0,0 +1,6 @@
{
"git": {
"sha1": "ad4d90d88403ff70b536b69629c2b4d4eb10f3bf"
},
"path_in_vcs": ""
}

View File

@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: weekly
ignore:
- dependency-name: "*"
update-types:
["version-update:semver-minor", "version-update:semver-patch"]

View File

@ -0,0 +1,245 @@
name: Build
on:
push:
branches:
- "master"
tags:
- "*"
schedule:
- cron: "40 4 * * *" # every day at 4:40
pull_request:
permissions:
contents: read
jobs:
stable:
name: "Test MSRV and Stable Features"
strategy:
matrix:
rust:
- nightly
- 1.59
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ matrix.rust }}
override: true
- name: Run cargo build for stable
uses: actions-rs/cargo@v1
with:
command: build
args: --no-default-features --features instructions
- name: Run cargo build for stable without instructions
uses: actions-rs/cargo@v1
with:
command: build
args: --no-default-features
- name: Run cargo doc for stable
uses: actions-rs/cargo@v1
with:
command: doc
args: --no-default-features --features instructions
- name: Run cargo doc for stable without instructions
uses: actions-rs/cargo@v1
with:
command: doc
args: --no-default-features
- name: Run cargo test for stable
uses: actions-rs/cargo@v1
with:
command: test
args: --no-default-features --features instructions
- name: Run cargo test for stable without instructions
uses: actions-rs/cargo@v1
with:
command: test
args: --no-default-features
test:
name: "Test"
strategy:
fail-fast: false
matrix:
platform: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.platform }}
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
target: x86_64-unknown-linux-musl
- name: "Print Rust Version"
run: |
rustc -Vv
cargo -Vv
- name: "Run cargo build"
uses: actions-rs/cargo@v1
with:
command: build
- name: "Run cargo doc"
uses: actions-rs/cargo@v1
with:
command: doc
- name: "Run cargo build on musl"
uses: actions-rs/cargo@v1
with:
command: build
args: --target x86_64-unknown-linux-musl
if: runner.os == 'Linux'
- name: "Run cargo test"
uses: actions-rs/cargo@v1
with:
command: test
- name: "Run cargo test on musl"
uses: actions-rs/cargo@v1
with:
command: test
args: --target x86_64-unknown-linux-musl
if: runner.os == 'Linux'
- name: "Install Rustup Targets"
run: |
rustup target add i686-unknown-linux-gnu
rustup target add thumbv7em-none-eabihf
- name: "Build on non x86_64 platforms"
run: |
cargo build --target i686-unknown-linux-gnu --no-default-features --features nightly
cargo build --target thumbv7em-none-eabihf --no-default-features --features nightly
bootloader-test:
name: "Bootloader Integration Test"
strategy:
fail-fast: false
matrix:
platform: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.platform }}
timeout-minutes: 15
steps:
- name: "Checkout Repository"
uses: actions/checkout@v4
- name: Cache binaries
id: cache-bin
uses: actions/cache@v4
with:
path: binaries
key: ${{ runner.OS }}-binaries
- name: Add binaries/bin to PATH
run: echo "$GITHUB_WORKSPACE/binaries/bin" >> $GITHUB_PATH
shell: bash
- name: "Install Rustup Components"
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
profile: minimal
components: rust-src, llvm-tools-preview
- name: "Install cargo-xbuild"
run: cargo install cargo-xbuild --debug --root binaries
- name: "Install bootimage"
run: cargo install bootimage --debug --root binaries
# install QEMU
- name: Install QEMU (Linux)
run: |
sudo apt update
sudo apt install qemu-system-x86
if: runner.os == 'Linux'
- name: Install QEMU (macOS)
run: brew install qemu
if: runner.os == 'macOS'
env:
HOMEBREW_NO_AUTO_UPDATE: 1
HOMEBREW_NO_BOTTLE_SOURCE_FALLBACK: 1
HOMEBREW_NO_INSTALL_CLEANUP: 1
- name: Install QEMU (Windows)
run: |
choco install qemu --version 2021.5.5
echo "$Env:Programfiles\qemu" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
if: runner.os == 'Windows'
shell: pwsh
- name: "Print QEMU Version"
run: qemu-system-x86_64 --version
- name: "Run Test Framework"
run: cargo xtest
shell: bash
working-directory: "testing"
check_formatting:
name: "Check Formatting"
runs-on: ubuntu-latest
timeout-minutes: 2
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
profile: minimal
components: rustfmt
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
name: "Clippy"
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
profile: minimal
components: clippy
- uses: actions-rs/cargo@v1
with:
command: clippy
semver-checks:
name: Semver Checks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: Swatinem/rust-cache@v2
with:
shared-key: "semver-checks"
cache-targets: false
- run: cargo install cargo-semver-checks --locked
- name: Check semver
run: cargo +stable semver-checks check-release
kani:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: Swatinem/rust-cache@v2
with:
shared-key: "kani"
cache-targets: false
- uses: model-checking/kani-github-action@v1.1

View File

@ -0,0 +1,32 @@
name: Release
on:
push:
branches:
- "master"
permissions:
contents: read
jobs:
release:
name: "Release"
runs-on: ubuntu-latest
permissions:
contents: write
timeout-minutes: 15
environment: crates_io_release
steps:
- name: "Checkout Repository"
uses: actions/checkout@v4
# TODO: Remove when Python 3.11 is the default on the Gihtub Actions image
- name: "Install Python 3.11"
run: sudo apt-get -y install python3.11
- name: "Run release script"
run: "python3.11 scripts/ci-release.py"
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -0,0 +1,13 @@
# Compiled files
*.o
*.so
*.rlib
*.dll
# Executables
*.exe
# Generated by Cargo
/target/
Cargo.lock
/testing/target

View File

@ -0,0 +1,6 @@
Gerd Zellweger <mail@gerdzellweger.com>
Eric Kidd <git@randomhacks.net>
Philipp Oppermann <dev@phil-opp.com>
Dan Schatzberg <schatzberg.dan@gmail.com>
John Ericson <John_Ericson@Yahoo.com>
Rex Lunae <rexlunae@gmail.com>

View File

@ -0,0 +1,98 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.59"
name = "x86_64"
version = "0.15.1"
authors = [
"Gerd Zellweger <mail@gerdzellweger.com>",
"Eric Kidd <git@randomhacks.net>",
"Philipp Oppermann <dev@phil-opp.com>",
"Dan Schatzberg <schatzberg.dan@gmail.com>",
"John Ericson <John_Ericson@Yahoo.com>",
"Rex Lunae <rexlunae@gmail.com>",
]
description = "Support for x86_64 specific instructions, registers, and structures."
documentation = "https://docs.rs/x86_64"
readme = "README.md"
keywords = [
"amd64",
"x86",
"x86_64",
"no_std",
]
categories = ["no-std"]
license = "MIT/Apache-2.0"
repository = "https://github.com/rust-osdev/x86_64"
[package.metadata.docs.rs]
all-features = true
[package.metadata.release]
dev-version = false
pre-release-commit-message = "Bump version to {{version}}"
publish = false
push = false
tag = false
[[package.metadata.release.pre-release-replacements]]
exactly = 1
file = "Changelog.md"
replace = """
# Unreleased
# {{version}} {{date}}"""
search = "# Unreleased"
[dependencies.bit_field]
path = "../bit_field-0.10.2/"
[dependencies.bitflags]
version = "2.3.2"
[dependencies.volatile]
path = "../volatile-0.4.6/"
[dependencies.core]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
[dependencies.compiler_builtins]
version = "0.1"
optional = true
[features]
abi_x86_interrupt = []
asm_const = []
const_fn = []
default = [
"nightly",
"instructions",
]
doc_auto_cfg = []
instructions = []
nightly = [
"const_fn",
"step_trait",
"abi_x86_interrupt",
"asm_const",
]
step_trait = []
rustc-dep-of-std = [
"core",
"compiler_builtins",
"bit_field/rustc-dep-of-std",
"bitflags/rustc-dep-of-std",
"volatile/rustc-dep-of-std",
]

View File

@ -0,0 +1,49 @@
[package]
authors = [
"Gerd Zellweger <mail@gerdzellweger.com>",
"Eric Kidd <git@randomhacks.net>",
"Philipp Oppermann <dev@phil-opp.com>",
"Dan Schatzberg <schatzberg.dan@gmail.com>",
"John Ericson <John_Ericson@Yahoo.com>",
"Rex Lunae <rexlunae@gmail.com>",
]
description = "Support for x86_64 specific instructions, registers, and structures."
documentation = "https://docs.rs/x86_64"
keywords = ["amd64", "x86", "x86_64", "no_std"]
categories = ["no-std"]
license = "MIT/Apache-2.0"
name = "x86_64"
readme = "README.md"
repository = "https://github.com/rust-osdev/x86_64"
version = "0.15.1"
edition = "2018"
rust-version = "1.59" # Needed to support inline asm and default const generics
[dependencies]
bit_field = "0.10.1"
bitflags = "2.3.2"
volatile = "0.4.4"
rustversion = "1.0.5"
[features]
default = ["nightly", "instructions"]
instructions = []
nightly = [ "const_fn", "step_trait", "abi_x86_interrupt", "asm_const" ]
abi_x86_interrupt = []
const_fn = []
asm_const = []
step_trait = []
doc_auto_cfg = []
[package.metadata.docs.rs]
all-features = true
[package.metadata.release]
dev-version = false
pre-release-replacements = [
{ file = "Changelog.md", search = "# Unreleased", replace = "# Unreleased\n\n# {{version}} {{date}}", exactly = 1 },
]
pre-release-commit-message = "Bump version to {{version}}"
push = false
publish = false
tag = false

View File

@ -0,0 +1,550 @@
# Unreleased
# 0.15.1 2024-03-19
## New Features
- [add constructor for `InterruptStackFrameValue`](https://github.com/rust-osdev/x86_64/pull/467)
- [add `Cr3::write_pcid_no_flush`](https://github.com/rust-osdev/x86_64/pull/472)
## Fixes
- [properly jump the address gap in CleanUp](https://github.com/rust-osdev/x86_64/pull/469)
## Other Improvements
- [added help on update_flags to get flags](https://github.com/rust-osdev/x86_64/pull/465)
- [expose DEBUG_STR more directly](https://github.com/rust-osdev/x86_64/pull/471)
# 0.15.0 2024-03-04
## Breaking changes
- [replace software_interrupt! macro with generic function](https://github.com/rust-osdev/x86_64/pull/259)
- [Use SegmentSelector in InterruptStackFrame](https://github.com/rust-osdev/x86_64/pull/263)
- [add `InvalidStarSegmentSelectors` error](https://github.com/rust-osdev/x86_64/pull/317)
- [add `PcidTooBig` error](https://github.com/rust-osdev/x86_64/pull/316)
- [implement `Index<u8>` for IDT instead of `Index<usize>`](https://github.com/rust-osdev/x86_64/pull/319)
- [change `cpu_flags`'s type to `RFlags`](https://github.com/rust-osdev/x86_64/pull/324)
- [fix `load_tss` and `GlobalDescriptorTable`](https://github.com/rust-osdev/x86_64/pull/323)
- [add an immutable getter for the level 4 page table](https://github.com/rust-osdev/x86_64/pull/327)
- [make `Cr2::read` return a result](https://github.com/rust-osdev/x86_64/pull/335)
- [remove `external_asm` and `inline_asm` features](https://github.com/rust-osdev/x86_64/pull/345)
- [Allow the GDT to be of any length](https://github.com/rust-osdev/x86_64/pull/360)
- [Remove software_interrupt! macro](https://github.com/rust-osdev/x86_64/pull/363)
- [Remove usize trait impls](https://github.com/rust-osdev/x86_64/pull/364)
- [Remove deprecated functions/flags](https://github.com/rust-osdev/x86_64/pull/368)
- [VirtAddr improvements](https://github.com/rust-osdev/x86_64/pull/370)
- [Add structures::gdt::Entry type](https://github.com/rust-osdev/x86_64/pull/380)
- [Allow GDT to be loaded with shared reference](https://github.com/rust-osdev/x86_64/pull/381)
- [seal off the `PageSize` trait](https://github.com/rust-osdev/x86_64/pull/404)
- [idt: Fixup Options structure and cleanup set_handler_fn](https://github.com/rust-osdev/x86_64/pull/226)
## New Features
- [implement `Step` for `PageTableIndex`](https://github.com/rust-osdev/x86_64/pull/464)
## Fixes
- [fix typo in docs](https://github.com/rust-osdev/x86_64/pull/265)
- [activate `feature(asm_const)`](https://github.com/rust-osdev/x86_64/pull/320)
- [gdt: Check that MAX is in range](https://github.com/rust-osdev/x86_64/pull/365)
## Other Improvements
- [allow inlining Step methods](https://github.com/rust-osdev/x86_64/pull/464)
- [constify more `Page` and `PageTableIndex` functions](https://github.com/rust-osdev/x86_64/pull/464)
# 0.14.12 2023-02-09
## New Features
- [Add `HandlerFuncType` trait](https://github.com/rust-osdev/x86_64/pull/439)
- [Support `VirtAddr::from_ptr` for `T: ?Sized`](https://github.com/rust-osdev/x86_64/pull/442)
- [Expose `Cr3::write_raw`](https://github.com/rust-osdev/x86_64/pull/445)
## Fixes
- [Use synchronizing assembly for `interrupts::enable`/`disable`](https://github.com/rust-osdev/x86_64/pull/440)
## Other Improvements
- [Optimize `Page::from_page_table_indices`](https://github.com/rust-osdev/x86_64/pull/456)
# 0.14.11 2022-09-15
## New Features
- [Add missing IDT entries #CP and #HV`](https://github.com/rust-osdev/x86_64/pull/387)
- [Adding next_higher_level to PageLevelIndex](https://github.com/rust-osdev/x86_64/pull/400)
- [Adding `is_empty` to PageTable](https://github.com/rust-osdev/x86_64/pull/399)
- [Add `Descriptor::tss_segment_unchecked`](https://github.com/rust-osdev/x86_64/pull/428)
- [Add the `iretq` function to the `InterruptStackFrameValue` struct.](https://github.com/rust-osdev/x86_64/pull/431)
- [add `flush_broadcast` and `tlbsync` functions](https://github.com/rust-osdev/x86_64/pull/403)
## Fixes
- [Change Star::write() to use checked subtractions](https://github.com/rust-osdev/x86_64/pull/422)
- [add workaround for recursive page tables with recursive index 511](https://github.com/rust-osdev/x86_64/pull/425)
- [Fix off-by-one in documentation](https://github.com/rust-osdev/x86_64/pull/427)
- [Fix misc doc typos](https://github.com/rust-osdev/x86_64/pull/432)
- [add compiler fences to enable and disable](https://github.com/rust-osdev/x86_64/pull/436)
## Other Improvements
- [set repr to transparent for various types](https://github.com/rust-osdev/x86_64/pull/402)
- [Remove unused `doc_cfg` feature](https://github.com/rust-osdev/x86_64/pull/408)
- [Enable `doc_auto_cfg` on `docs.rs` builds](https://github.com/rust-osdev/x86_64/pull/407)
- [Add Descriptor::dpl const method and use it in GDT construction](https://github.com/rust-osdev/x86_64/pull/410)
- [Bump bitflags to 2.3.2](https://github.com/rust-osdev/x86_64/pull/426)
- [Add `inline` attribute to segment functions](https://github.com/rust-osdev/x86_64/pull/430)
# 0.14.10 2022-07-10
## New Features
- [Add `registers::debug`](https://github.com/rust-osdev/x86_64/pull/286)
- [Provide null segment selector as associated constant on `SegmentSelector`](https://github.com/rust-osdev/x86_64/pull/373)
- [Add getters for the page table frame mapping](https://github.com/rust-osdev/x86_64/pull/385)
## Fixes
- [Fix align functions](https://github.com/rust-osdev/x86_64/pull/375)
- [Correct wrong comment](https://github.com/rust-osdev/x86_64/pull/374)
## Other Improvements
- [Cleanup Segment macros](https://github.com/rust-osdev/x86_64/pull/376)
- [Update comment and docs](https://github.com/rust-osdev/x86_64/pull/382)
# 0.14.9 - 2022-03-31
## New Features
- Address in `VirtAddrNotValid` and `PhysAddrNotValid` is now public ([#340](https://github.com/rust-osdev/x86_64/pull/340)).
- This field now contains the whole invalid address ([#347](https://github.com/rust-osdev/x86_64/pull/347)).
- Remove all uses of external assembly ([#343](https://github.com/rust-osdev/x86_64/pull/343))
- `external_asm` and `inline_asm` features are deprecated and now have no effect.
- `instructions` feature (on by default) now requires Rust 1.59
- Specific MSRV now noted in `README` ([#355](https://github.com/rust-osdev/x86_64/pull/355))
- Implement `core::iter::Step` for `VirtAddr` and `Page` ([#342](https://github.com/rust-osdev/x86_64/pull/342))
- This trait is only available on nightly.
- Gated behind `step_trait` feature flag
- Add `UCet` and `SCet` registers ([#349](https://github.com/rust-osdev/x86_64/pull/349))
- Use [`rustversion`](https://crates.io/crates/rustversion) to mark certain functions `const fn` on Rust 1.61 ([#353](https://github.com/rust-osdev/x86_64/pull/353))
- `Entry::handler_addr()` is now public ([#354](https://github.com/rust-osdev/x86_64/pull/354))
- Increase packed structure alignment ([#362](https://github.com/rust-osdev/x86_64/pull/362))
- Make more address methods `const fn` ([#369](https://github.com/rust-osdev/x86_64/pull/369))
- `VirtAddr::as_ptr()`
- `VirtAddr::as_mut_ptr()`
- `PhysAddr::new()`
- `PhysAddr::try_new()`
## Bug fixes and Documentation
- Fixed overflow bug in PageRangeInclusive ([#351](https://github.com/rust-osdev/x86_64/pull/351))
- Remove stabilized `const_fn_fn_ptr_basics` and `const_fn_trait_bound` features ([#352](https://github.com/rust-osdev/x86_64/pull/352))
- Don't set `nomem` in `load_tss` ([#358](https://github.com/rust-osdev/x86_64/pull/358))
- Correctly initialize TSS's IOPB to be empty ([#357](https://github.com/rust-osdev/x86_64/pull/357))
- Improve `GlobalDescriptorTable::add_entry` error handling ([#361](https://github.com/rust-osdev/x86_64/pull/361))
- Update `tss_segment` documentation ([#366](https://github.com/rust-osdev/x86_64/pull/366))
# 0.14.8 2022-02-03
- Add `Cr2::read_raw` ([#334](https://github.com/rust-osdev/x86_64/pull/334))
- Add support for `MXCSR` register ([#336](https://github.com/rust-osdev/x86_64/pull/336))
# 0.14.7 2021-12-18
- fix: build error on the latest nightly ([#329](https://github.com/rust-osdev/x86_64/pull/329))
- add `set_general_handler` macro ([#285](https://github.com/rust-osdev/x86_64/pull/285))
- Derive common traits for number, range and enum types ([#315](https://github.com/rust-osdev/x86_64/pull/315))
- Add the VMM Communication Exception (`#VC`) to the `InterruptDescriptorTable` ([#313](https://github.com/rust-osdev/x86_64/pull/313))
- fix: enable manipulation of `InterruptStackFrame` ([#312](https://github.com/rust-osdev/x86_64/pull/312))
- fix docs for `page_table_index` ([#318](https://github.com/rust-osdev/x86_64/pull/318))
- Remove redundant alignment check ([#314](https://github.com/rust-osdev/x86_64/pull/314))
- fix(idt): fix panic messages for `index` and `#VC` ([#321](https://github.com/rust-osdev/x86_64/pull/321))
- remove `const_assert!` in favor of std's `assert!` ([#326](https://github.com/rust-osdev/x86_64/pull/326))
- Move bootloader integration test to separate CI job ([#330](https://github.com/rust-osdev/x86_64/pull/330))
# 0.14.6 2021-09-20
- New `registers::segmentation` module ([#309](https://github.com/rust-osdev/x86_64/pull/309)), containing:
- `instructions::segmentation::{Segment, Segment64, CS, DS, ES, FS, GS, SS}`
- `structures::gdt::SegmentSelector`
- Old locations still re-export all the types, so this is not a breaking change.
- Fixes build so that `cargo doc --no-default-features` succeeds.
# 0.14.5 2021-09-04
- Add `ExceptionVector` enum and additional flags to `PageFaultErrorCode` ([#303](https://github.com/rust-osdev/x86_64/pull/303))
- Add `clean_up` and `clean_up_with_filter` methods to deallocate unused page tables ([#264](https://github.com/rust-osdev/x86_64/pull/264))
- Rename some XCr0 and CR4 flags (#[275](https://github.com/rust-osdev/x86_64/pull/275))
- Expose `MapperFlush::new` and `MapperFlushAll::new` constructor functions ([#296](https://github.com/rust-osdev/x86_64/pull/296))
- Use `#[cfg(doc)]` instead of docs.rs-specific cfg flag (#[287](https://github.com/rust-osdev/x86_64/pull/287))
- Some documentation updates:
- Update segment register references in `GDT::load*` method to non-deprecated methods ([#301](https://github.com/rust-osdev/x86_64/pull/301))
- Remove a panic note ([#300](https://github.com/rust-osdev/x86_64/pull/300))
- Update `bit_field` dependency ([#306](https://github.com/rust-osdev/x86_64/pull/306))
# 0.14.4 2021-07-19
- Add `instructions::tables::sgdt` ([#279](https://github.com/rust-osdev/x86_64/pull/279))
- Improve control register bits ([#273](https://github.com/rust-osdev/x86_64/pull/273))
- Add `Cr0` bits: `EXTENSION_TYPE` (ET)
- Add `Cr4` bits:
- `KEY_LOCKER` (KL)
- `CONTROL_FLOW_ENFORCEMENT` (CET)
- `PROTECTION_KEY_SUPERVISOR` (PKS)
- Add `XCr0` bits: `BNDREG`, `BNDCSR`, `OPMASK`, `ZMM_HI256`, `HI16_ZMM`
- Add consistency checks for `XCr0` bits
- Add `SelectorErrorCode` for parsing interrupt error codes from `#TS`, `#NP`, `#SS`, and `#GP` ([#274](https://github.com/rust-osdev/x86_64/pull/274))
- Make `addr::{align_up, align_down}` const ([#270](https://github.com/rust-osdev/x86_64/pull/270))
- Make `structures::idt` available on stable Rust ([#271](https://github.com/rust-osdev/x86_64/pull/271))
- Use dummy types for the `HandlerFunc`s if the `"abi_x86_interrupt"` feature is disabled
- Add unsafe `set_handler_addr` that just takes a `VirtAddr`
- Add common abstractions for x86 Segments ([#258](https://github.com/rust-osdev/x86_64/pull/258))
- Add `SS`, `CS`, `DS`, `ES`, `FS`, `GS` marker types
- Add `Segment` trait for reading/writing the segment register
- Add `Segment64` trait for reading/writing the segment base
- Add `GS::swap()`
- Deprecate the corresponding free functions:
- `cs`, `set_cs`
- `swap_gs`
- `load_{ss,ds,es,fs,gs}`
- `{wr,rd}{fs,gs}base`
- Bug fixes:
- Corrected documentation typo ([#278](https://github.com/rust-osdev/x86_64/pull/278))
- Avoided off-by-one error in `GlobalDescriptorTable::from_raw_slice` when `"const_fn"` is not enabled ([#269](https://github.com/rust-osdev/x86_64/pull/269))
- Specify `sysv64` as the calling convention for the `"external_asm"` functions ([#267](https://github.com/rust-osdev/x86_64/pull/267))
# 0.14.3 2021-05-14
- Make the following types aliases of the new `PortGeneric` type ([#248](https://github.com/rust-osdev/x86_64/pull/248)):
- `Port<T> = PortGeneric<T, ReadWriteAccess>`
- `PortReadOnly<T> = PortGeneric<T, ReadOnlyAccess>`
- `PortWriteOnly<T> = PortGeneric<T, WriteOnlyAccess>`
- The following methods no longer require the `nightly` feature to be `const fn`s ([#255](https://github.com/rust-osdev/x86_64/pull/255)):
- `PageTable::new`
- `GlobalDescriptorTable::from_raw_slice`
- `MappedFrame::{start_address, size}`
- `Page<Size4KiB>::p1_index`
- Add `Debug` implementation for `InterruptDescriptorTable` ([#253](https://github.com/rust-osdev/x86_64/pull/253))
- Improve `Debug` implementations for `Entry` and `EntryOptions`
# 0.14.2 2021-05-13
- Multiple improvements to assembly code ([#251](https://github.com/rust-osdev/x86_64/pull/251))
- Added `external_asm` implementations for `bochs_breakpoint` and `XCr0`
- Updated `options` for `asm!` blocks (to improve performance)
- Updated docs to use [`doc_cfg`](https://doc.rust-lang.org/unstable-book/language-features/doc-cfg.html)
# 0.14.1 2021-05-06
- Use new `const_fn_trait_bound` feature to fix build on latest nightly ([#250](https://github.com/rust-osdev/x86_64/pull/250))
- _Attention:_ The `const_fn` feature now requires at least Rust nightly `2021-05-06`.
- Add support for `sidt` instruction ([#246](https://github.com/rust-osdev/x86_64/pull/246))
- Fix Debug and PartialEq implementations for IDT entry type ([#249](https://github.com/rust-osdev/x86_64/pull/249))
- Looser trait bounds for Port types ([#247](https://github.com/rust-osdev/x86_64/pull/247))
# 0.14.0 2021-04-11
- **Breaking:** Take the interrupt stack frame by value (not by reference) [#242](https://github.com/rust-osdev/x86_64/pull/242)
- **Breaking:** Change `InterruptStackFrame::as_mut` to return a `Volatile<_>` wrapper [#242](https://github.com/rust-osdev/x86_64/pull/242)
# 0.13.5 2021-04-01
- Add support for `XCR0` register ([#239](https://github.com/rust-osdev/x86_64/pull/239))
# 0.13.4 2021-03-27
- Implement more fmt traits for addr types ([#237](https://github.com/rust-osdev/x86_64/pull/237))
# 0.13.3 2021-03-16
- Implement `Clone` for `PageTable` ([#236](https://github.com/rust-osdev/x86_64/pull/236))
# 0.13.2 2021-02-02
- Fix build on latest nightly: The feature `const_in_array_repeat_expressions` was removed ([#230](https://github.com/rust-osdev/x86_64/pull/230))
# 0.13.1 2020-12-29
- PCID support instructions ([#169])(https://github.com/rust-osdev/x86_64/pull/169))
# 0.13.0 2020-12-28
- **Breaking:** Also return flags for `MapperAllSizes::translate()` ([#207](https://github.com/rust-osdev/x86_64/pull/207))
- **Breaking:** Restructure the `TranslateResult` type and create separate `Translate` trait ([#211](https://github.com/rust-osdev/x86_64/pull/211))
- **Breaking:** Rename `PhysToVirt` trait to `PageTableFrameMapping` ([#214](https://github.com/rust-osdev/x86_64/pull/214))
- **Breaking:** Use custom error types instead of `()` ([#199](https://github.com/rust-osdev/x86_64/pull/199))
- **Breaking:** Remove deprecated items
- `UnusedPhysFrame`
- `ExceptionStackFrame`
- `VirtAddr::new_unchecked`
- `interrupts::enable_interrupts_and_hlt`
- **Breaking:** Make `DescriptorTablePointer::base` a `VirtAddr` ([#215](https://github.com/rust-osdev/x86_64/pull/215))
- **Breaking:** Change return type of `read_rip` to `VirtAddr` ([#216](https://github.com/rust-osdev/x86_64/pull/216))
- **Breaking:** Make writing the RFLAGS register unsafe ([#219](https://github.com/rust-osdev/x86_64/pull/219))
- **Breaking:** Remove `PortReadWrite` trait, which is no longer needed ([#217](https://github.com/rust-osdev/x86_64/pull/217))
- Relaxe `Sized` requirement for `FrameAllocator` in `Mapper::map_to` ([204](https://github.com/rust-osdev/x86_64/pull/204))
# 0.12.4 2020-12-28
- Fix bad conversion from llvm_asm! to asm! ([#218](https://github.com/rust-osdev/x86_64/pull/218))
- GDT: Add `load_unchecked`, `from_raw_slice`, and `as_raw_slice` ([#210](https://github.com/rust-osdev/x86_64/pull/210))
# 0.12.3 2020-10-31
- Use `asm!` instead of perma-unstable `llvm_asm!` macro ([#165](https://github.com/rust-osdev/x86_64/pull/165))
- Make `GlobalDescriptorTable::add_entry` a const fn ([#191](https://github.com/rust-osdev/x86_64/pull/191))
- Rename `enable_interrupts_and_hlt` to `enable_and_hlt` ([#206](https://github.com/rust-osdev/x86_64/pull/206))
- Provide functions for accessing the underlying L4 table for mapper types ([#184](https://github.com/rust-osdev/x86_64/pull/184))
- Remove Trait constraint for `Port::new()` ([#188](https://github.com/rust-osdev/x86_64/pull/188))
# 0.12.2 2020-09-29
- Add additional `DescriptorFlags` and aliases compatible with `syscall`/`sysenter` ([#181](https://github.com/rust-osdev/x86_64/pull/181))
- Fix (another) build error on latest nightly ([#186](https://github.com/rust-osdev/x86_64/pull/186))
# 0.12.1 2020-09-24
- Fix build error on latest nightly ([#182](https://github.com/rust-osdev/x86_64/pull/182))
# 0.12.0 2020-09-23
- **Breaking**: Decouple instructions into a separate feature flag ([#179](https://github.com/rust-osdev/x86_64/pull/179))
- Gates the `instructions` module by a new `instructions` feature (enabled by default).
- Rename the `stable` feature to `external_asm`
- `PageTable::new` is no longer a `const fn` on stable (i.e. without the `nightly` feature)
# 0.11.8 2020-09-23
- Add `VirtAddr::is_null` ([#180](https://github.com/rust-osdev/x86_64/pull/180))
# 0.11.7 2020-09-11
- Fix const_item_mutation warnings added in latest Rust nightly ([#178](https://github.com/rust-osdev/x86_64/pull/178))
# 0.11.6 2020-09-11 (yanked)
- (accidental empty release)
# 0.11.5 2020-09-03
- Don't rely on promotion of `PageTableEntry::new` inside a `const fn` ([#175](https://github.com/rust-osdev/x86_64/pull/175))
# 0.11.4 2020-09-01
- Add a function for the `nop` instruction ([#174](https://github.com/rust-osdev/x86_64/pull/174))
# ~~0.11.3 2020-09-01~~
- (accidental release, yanked)
# 0.11.2 2020-08-13
- Add rdfsbase, rdgsbase, wrfsbase, wrgsbase ([#172](https://github.com/rust-osdev/x86_64/pull/172))
# 0.11.1
- Export `PhysAddrNotValid` and `VirtAddrNotValid` error types ([#163](https://github.com/rust-osdev/x86_64/pull/163))
# 0.11.0
- **Breaking**: Handle parent table flags in Mapper methods ([#114](https://github.com/rust-osdev/x86_64/pull/114))
# 0.10.3
- Fix: Inclusive ranges is_empty() comparison ([#156](https://github.com/rust-osdev/x86_64/pull/156))
# 0.10.2
- **Nightly Breakage**: Use `llvm_asm!` instead of deprecated `asm!` macro ([#151](https://github.com/rust-osdev/x86_64/pull/151))
- Return the correct RPL from GDT::add_entry() ([#153](https://github.com/rust-osdev/x86_64/pull/153))
# 0.10.1
- Add InterruptDescriptorTable::load_unsafe ([#137](https://github.com/rust-osdev/x86_64/pull/137))
# 0.10.0
- **Breaking**: Make `map_to` and `update_flags` unsafe ([#135](https://github.com/rust-osdev/x86_64/pull/135))
- **Breaking**: Make `FrameDeallocator::deallocate_frame` unsafe ([#146](https://github.com/rust-osdev/x86_64/pull/146))
- **Breaking**: Don't pass small trivially copyable types by reference ([#147](https://github.com/rust-osdev/x86_64/pull/147))
- Various improvements to VirtAddr and PhysAddr ([#141](https://github.com/rust-osdev/x86_64/pull/141))
- Among other things, this renamed the `VirtAddr::new_unchecked` function to `new_truncate`.
- Add `const_fn!{}` macro to make functions const without duplication ([#144](https://github.com/rust-osdev/x86_64/pull/144))
- Also makes some more functions `const`.
- Add `{PhysFrame,Page}::from_start_address_unchecked` ([#142](https://github.com/rust-osdev/x86_64/pull/142))
- Use `#[inline]` everywhere ([#145](https://github.com/rust-osdev/x86_64/pull/145))
- In `VirtAddr::new_truncate`, use shift instead of mul and div ([#143](https://github.com/rust-osdev/x86_64/pull/143))
- Use `Self::new()` in `InterruptDescriptorTable::reset()` ([#148](https://github.com/rust-osdev/x86_64/pull/148))
# 0.9.6
- Add an enable_interrupts_and_hlt function that executes `sti; hlt` ([#138](https://github.com/rust-osdev/x86_64/pull/138))
- Fix some clippy warnings ([#130](https://github.com/rust-osdev/x86_64/pull/130))
- Resolve remaining clippy warnings and add clippy job to CI ([#132](https://github.com/rust-osdev/x86_64/pull/132))
# 0.9.5
- Add `#[inline]` attribute to small functions ([#129](https://github.com/rust-osdev/x86_64/pull/129))
# 0.9.4
- asm: add target_env = "musl" to pickup the underscore asm names ([#128](https://github.com/rust-osdev/x86_64/pull/128))
# 0.9.3
- Enable usage with non-nightly rust ([#127](https://github.com/rust-osdev/x86_64/pull/127))
# 0.9.2
- Remove the `cast` dependency ([#124](https://github.com/rust-osdev/x86_64/pull/124))
# 0.9.1
- Improve PageTableIndex and PageOffset ([#122](https://github.com/rust-osdev/x86_64/pull/122))
# 0.9.0
- **Breaking:** Return the UnusedPhysFrame on MapToError::PageAlreadyMapped ([#118](https://github.com/rust-osdev/x86_64/pull/118))
- Add User Mode registers ([#119](https://github.com/rust-osdev/x86_64/pull/119))
# 0.8.3
- Allow immediate port version of in/out instructions ([#115](https://github.com/rust-osdev/x86_64/pull/115))
- Make more functions const ([#116](https://github.com/rust-osdev/x86_64/pull/116))
# 0.8.2
- Add support for cr4 control register ([#111](https://github.com/rust-osdev/x86_64/pull/111))
# 0.8.1
- Fix: Add required reexport for new UnusedPhysFrame type ([#110](https://github.com/rust-osdev/x86_64/pull/110))
# 0.8.0
- **Breaking:** Replace `ux` dependency with custom wrapper structs ([#91](https://github.com/rust-osdev/x86_64/pull/91))
- **Breaking:** Add new UnsafePhysFrame type and use it in Mapper::map_to ([#89](https://github.com/rust-osdev/x86_64/pull/89))
- **Breaking:** Rename divide_by_zero field of interrupt descriptor table to divide_error ([#108](https://github.com/rust-osdev/x86_64/pull/108))
- **Breaking:** Introduce new diverging handler functions for double faults and machine check exceptions ([#109](https://github.com/rust-osdev/x86_64/pull/109))
- _Possibly Breaking:_ Make Mapper trait object safe by adding `Self: Sized` bounds on generic functions ([#84](https://github.com/rust-osdev/x86_64/pull/84))
# 0.7.7
- Add `slice` and `slice_mut` methods to IDT ([#95](https://github.com/rust-osdev/x86_64/pull/95))
# 0.7.6
- Use repr C to suppress not-ffi-safe when used with extern handler functions ([#94](https://github.com/rust-osdev/x86_64/pull/94))
# 0.7.5
- Add FsBase and GsBase register support ([#87](https://github.com/rust-osdev/x86_64/pull/87))
# 0.7.4
- Remove raw-cpuid dependency and use rdrand intrinsics ([#85](https://github.com/rust-osdev/x86_64/pull/85))
- Update integration tests to use new testing framework ([#86](https://github.com/rust-osdev/x86_64/pull/86))
# 0.7.3
- Add a new `OffsetPageTable` mapper type ([#83](https://github.com/rust-osdev/x86_64/pull/83))
# 0.7.2
- Add `instructions::bochs_breakpoint` and `registers::read_rip` functions ([#79](https://github.com/rust-osdev/x86_64/pull/79))
- Mark all single instruction functions as `#[inline]` ([#79](https://github.com/rust-osdev/x86_64/pull/79))
- Update GDT docs, add user_data_segment function and WRITABLE flag ([#78](https://github.com/rust-osdev/x86_64/pull/78))
- Reexport MappedPageTable on non-x86_64 platforms too ([#82](https://github.com/rust-osdev/x86_64/pull/82))
# 0.7.1
- Add ring-3 flag to GDT descriptor ([#77](https://github.com/rust-osdev/x86_64/pull/77))
# 0.7.0
- **Breaking**: `Port::read` and `PortReadOnly::read` now take `&mut self` instead of `&self` ([#76](https://github.com/rust-osdev/x86_64/pull/76)).
# 0.6.0
- **Breaking**: Make the `FrameAllocator` unsafe to implement. This way, we can force the implementer to guarantee that all frame allocators are valid. See [#69](https://github.com/rust-osdev/x86_64/issues/69) for more information.
# 0.5.5
- Use [`cast`](https://docs.rs/cast/0.2.2/cast/) crate instead of less general `usize_conversions` crate.
# 0.5.4
- Update dependencies to latest versions (fix [#67](https://github.com/rust-osdev/x86_64/issues/67))
# 0.5.3
- Add `PortReadOnly` and `PortWriteOnly` types in `instructions::port` module ([#66](https://github.com/rust-osdev/x86_64/pull/66)).
# 0.5.2
- Update documentation of `MappedPageTable`: Require that passed `level_4_table` is valid.
# 0.5.1
- Add `PageTable::{iter, iter_mut}` functions to iterate over page table entries.
# 0.5.0
## Breaking
- The `random` module is now a submodule of the `instructions` module.
- The `structures::paging` module was split into several submodules:
- The `NotGiantPageSize`, `PageRange`, and `PageRangeInclusive` types were moved to a new `page` submodule.
- The `PhysFrameRange` and `PhysFrameRangeInclusive` types were moved to a new `frame` submodule.
- The `FrameError` and `PageTableEntry` types were moved to a new `page_table` submodule.
- The `MapperFlush`, `MapToError`, `UnmapError`, and `FlagUpdateError` types were moved to a new `mapper` submodule.
- The `structures::paging` module received the following changes:
- The `Mapper::translate_page` function now returns a `Result` with a new `TranslateError` error type.
- The `NotRecursivelyMapped` error type was removed.
- The `instructions::int3` function was moved into the `instructions::interrupts` module.
- Removed some old deprecated functions.
- Made modifications of the interrupt stack frame unsafe by introducing a new wrapper type and an unsafe `as_mut` method.
## Other
- Added a new `structures::paging::MapperAllSizes` trait with generic translation methods and implement it for `MappedPageTable` and `RecursivePageTable`.
- Added a new `structures::paging::MappedPageTable` type that implements the `Mapper` and `MapperAllSizes` traits.
- Added a `software_interrupt` macro to invoke arbitrary `int x` instructions.
- Renamed the `ExceptionStackFrame` type to `InterruptStackFrame`.
# 0.4.2
- Add `RdRand::get_u{16, 32, 64}` methods
- Deprecate `RdRand::get` because it does not check for failure
- Make `RdRand` Copy
# 0.4.1
- Add support for the RdRand instruction (random number generation)
# 0.4.0
## Breaking
- Make `Mapper::map_to` and `Mapper::identity_map` unsafe because it is possible to break memory safety by passing invalid arguments.
- Rename `FrameAllocator::alloc` to `allocate_frame` and `FrameDeallocator::dealloc` to `deallocate_frame`.
- Remove `From<os_bootinfo::FrameRange>` implementation for `PhysFrameRange`
- The `os_bootinfo` crate is no longer used by the `bootloader` crate.
- It is not possible to provide an implementation for all `os_bootinfo` versions.
## Other
- Update to 2018 edition
# 0.3.6
- Add a `SIZE` constant to the `Page` type
- Add two interrupt tests to the `testing` sub-crate

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,23 @@
The MIT License (MIT)
Copyright (c) 2018 Philipp Oppermann
Copyright (c) 2015 Gerd Zellweger
Copyright (c) 2015 The libcpu Developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,42 @@
# x86_64 library
[![Crates.io](https://img.shields.io/crates/v/x86_64)](https://crates.io/crates/x86_64)
[![Build Status](https://github.com/rust-osdev/x86_64/workflows/Build/badge.svg)](https://github.com/rust-osdev/x86_64/actions?query=workflow%3ABuild) [![docs.rs](https://img.shields.io/badge/docs.rs-documentation-green.svg)](https://docs.rs/x86_64)
Support for x86_64 specific instructions (e.g. TLB flush), registers (e.g. control registers), and structures (e.g. page tables).
## Crate Feature Flags
* `nightly`: Enables features only available on nightly Rust; enabled by default.
* `instructions`: Enabled by default, turns on x86\_64 specific instructions, and dependent features. Only available for x86\_64 targets.
## Minimum Supported Rust Version (MSRV)
If no nightly features are enabled, Rust 1.59.0 is required.
This can be done by either:
- `--no-default-features --features instructions`
- `--no-default-features`
If the `nightly` feature or any of its sub-features is enabled (which is the
default), a recent nightly is required.
## Other OS development crates
This crate does not attempt to handle every facet of OS development. Other
useful crates in this space include:
- [`raw-cpuid`](https://crates.io/crates/raw-cpuid): safe wrappers around the
[`cpuid` instruction](https://en.wikipedia.org/wiki/CPUID)
- Provides parsed versions of the CPUID data, rather than just raw binary values.
- Support for AMD and Intel specific values.
- Works on x86 and x86_64 systems, in both user and kernel mode.
- [`uefi`](https://crates.io/crates/uefi): abstractions for
[UEFI](https://en.wikipedia.org/wiki/Unified_Extensible_Firmware_Interface)
(the successor to BIOS)
- Provides UEFI tables, functions, and types.
- Useful for writing UEFI applications, or calling UEFI functions from your OS.
- Works on a variety of modern platforms, not just x86_64.
- [`volatile`](https://crates.io/crates/volatile): interface to
[`read_volatile`](https://doc.rust-lang.org/std/ptr/fn.read_volatile.html) and
[`write_volatile`](https://doc.rust-lang.org/std/ptr/fn.write_volatile.html)
- Makes it easier to program [MMIO](https://en.wikipedia.org/wiki/Memory-mapped_I/O) interfaces and devices.
- Works on any Rust target.

View File

@ -0,0 +1,7 @@
# Security Policy
If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
Please disclose it at [security advisory](https://github.com/rust-osdev/x86_64/security/advisories/new).
This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure.

View File

@ -0,0 +1 @@
nightly

View File

@ -0,0 +1,36 @@
import json
import subprocess
import tomllib
import urllib.request
with open("Cargo.toml", "rb") as f:
cargo_toml = tomllib.load(f)
crate_version = cargo_toml["package"]["version"]
print("Detected crate version " + crate_version)
index_url = "https://index.crates.io/x8/6_/x86_64"
for line in urllib.request.urlopen(index_url):
version_info = json.loads(line)
assert (version_info["name"] == "x86_64")
if version_info["vers"] == crate_version:
print("Version " + crate_version + " already exists on crates.io")
break
else:
print("Could not find version " + crate_version +
" on crates.io; creating a new release")
print(" Running `cargo publish`")
subprocess.run(["cargo", "publish"], check=True)
tag_name = "v" + crate_version
print(" Tagging commit as " + tag_name)
sha = subprocess.run(["git", "rev-parse", "HEAD"], check=True,
stdout=subprocess.PIPE).stdout.decode("utf-8").strip()
subprocess.run([
"gh", "api", "/repos/rust-osdev/x86_64/git/refs",
"-X", "POST", "-H", "Accept: application/vnd.github.v3+json",
"-F", "ref=refs/tags/" + tag_name,
"-F", "sha="+sha
])
print(" Done")

View File

@ -0,0 +1,802 @@
//! Physical and virtual addresses manipulation
use core::convert::TryFrom;
use core::fmt;
#[cfg(feature = "step_trait")]
use core::iter::Step;
use core::ops::{Add, AddAssign, Sub, SubAssign};
use crate::structures::paging::page_table::PageTableLevel;
use crate::structures::paging::{PageOffset, PageTableIndex};
use bit_field::BitField;
const ADDRESS_SPACE_SIZE: u64 = 0x1_0000_0000_0000;
/// A canonical 64-bit virtual memory address.
///
/// This is a wrapper type around an `u64`, so it is always 8 bytes, even when compiled
/// on non 64-bit systems. The
/// [`TryFrom`](https://doc.rust-lang.org/std/convert/trait.TryFrom.html) trait can be used for performing conversions
/// between `u64` and `usize`.
///
/// On `x86_64`, only the 48 lower bits of a virtual address can be used. The top 16 bits need
/// to be copies of bit 47, i.e. the most significant bit. Addresses that fulfil this criterion
/// are called “canonical”. This type guarantees that it always represents a canonical address.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct VirtAddr(u64);
/// A 64-bit physical memory address.
///
/// This is a wrapper type around an `u64`, so it is always 8 bytes, even when compiled
/// on non 64-bit systems. The
/// [`TryFrom`](https://doc.rust-lang.org/std/convert/trait.TryFrom.html) trait can be used for performing conversions
/// between `u64` and `usize`.
///
/// On `x86_64`, only the 52 lower bits of a physical address can be used. The top 12 bits need
/// to be zero. This type guarantees that it always represents a valid physical address.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct PhysAddr(u64);
/// A passed `u64` was not a valid virtual address.
///
/// This means that bits 48 to 64 are not
/// a valid sign extension and are not null either. So automatic sign extension would have
/// overwritten possibly meaningful bits. This likely indicates a bug, for example an invalid
/// address calculation.
///
/// Contains the invalid address.
pub struct VirtAddrNotValid(pub u64);
impl core::fmt::Debug for VirtAddrNotValid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("VirtAddrNotValid")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
impl VirtAddr {
/// Creates a new canonical virtual address.
///
/// The provided address should already be canonical. If you want to check
/// whether an address is canonical, use [`try_new`](Self::try_new).
///
/// ## Panics
///
/// This function panics if the bits in the range 48 to 64 are invalid
/// (i.e. are not a proper sign extension of bit 47).
#[inline]
pub const fn new(addr: u64) -> VirtAddr {
// TODO: Replace with .ok().expect(msg) when that works on stable.
match Self::try_new(addr) {
Ok(v) => v,
Err(_) => panic!("virtual address must be sign extended in bits 48 to 64"),
}
}
/// Tries to create a new canonical virtual address.
///
/// This function checks wether the given address is canonical
/// and returns an error otherwise. An address is canonical
/// if bits 48 to 64 are a correct sign
/// extension (i.e. copies of bit 47).
#[inline]
pub const fn try_new(addr: u64) -> Result<VirtAddr, VirtAddrNotValid> {
let v = Self::new_truncate(addr);
if v.0 == addr {
Ok(v)
} else {
Err(VirtAddrNotValid(addr))
}
}
/// Creates a new canonical virtual address, throwing out bits 48..64.
///
/// This function performs sign extension of bit 47 to make the address
/// canonical, overwriting bits 48 to 64. If you want to check whether an
/// address is canonical, use [`new`](Self::new) or [`try_new`](Self::try_new).
#[inline]
pub const fn new_truncate(addr: u64) -> VirtAddr {
// By doing the right shift as a signed operation (on a i64), it will
// sign extend the value, repeating the leftmost bit.
VirtAddr(((addr << 16) as i64 >> 16) as u64)
}
/// Creates a new virtual address, without any checks.
///
/// ## Safety
///
/// You must make sure bits 48..64 are equal to bit 47. This is not checked.
#[inline]
pub const unsafe fn new_unsafe(addr: u64) -> VirtAddr {
VirtAddr(addr)
}
/// Creates a virtual address that points to `0`.
#[inline]
pub const fn zero() -> VirtAddr {
VirtAddr(0)
}
/// Converts the address to an `u64`.
#[inline]
pub const fn as_u64(self) -> u64 {
self.0
}
/// Creates a virtual address from the given pointer
#[cfg(target_pointer_width = "64")]
#[inline]
pub fn from_ptr<T: ?Sized>(ptr: *const T) -> Self {
Self::new(ptr as *const () as u64)
}
/// Converts the address to a raw pointer.
#[cfg(target_pointer_width = "64")]
#[inline]
pub const fn as_ptr<T>(self) -> *const T {
self.as_u64() as *const T
}
/// Converts the address to a mutable raw pointer.
#[cfg(target_pointer_width = "64")]
#[inline]
pub const fn as_mut_ptr<T>(self) -> *mut T {
self.as_ptr::<T>() as *mut T
}
/// Convenience method for checking if a virtual address is null.
#[inline]
pub const fn is_null(self) -> bool {
self.0 == 0
}
/// Aligns the virtual address upwards to the given alignment.
///
/// See the `align_up` function for more information.
///
/// # Panics
///
/// This function panics if the resulting address is higher than
/// `0xffff_ffff_ffff_ffff`.
#[inline]
pub fn align_up<U>(self, align: U) -> Self
where
U: Into<u64>,
{
VirtAddr::new_truncate(align_up(self.0, align.into()))
}
/// Aligns the virtual address downwards to the given alignment.
///
/// See the `align_down` function for more information.
#[inline]
pub fn align_down<U>(self, align: U) -> Self
where
U: Into<u64>,
{
self.align_down_u64(align.into())
}
/// Aligns the virtual address downwards to the given alignment.
///
/// See the `align_down` function for more information.
#[inline]
pub(crate) const fn align_down_u64(self, align: u64) -> Self {
VirtAddr::new_truncate(align_down(self.0, align))
}
/// Checks whether the virtual address has the demanded alignment.
#[inline]
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64>,
{
self.is_aligned_u64(align.into())
}
/// Checks whether the virtual address has the demanded alignment.
#[inline]
pub(crate) const fn is_aligned_u64(self, align: u64) -> bool {
self.align_down_u64(align).as_u64() == self.as_u64()
}
/// Returns the 12-bit page offset of this virtual address.
#[inline]
pub const fn page_offset(self) -> PageOffset {
PageOffset::new_truncate(self.0 as u16)
}
/// Returns the 9-bit level 1 page table index.
#[inline]
pub const fn p1_index(self) -> PageTableIndex {
PageTableIndex::new_truncate((self.0 >> 12) as u16)
}
/// Returns the 9-bit level 2 page table index.
#[inline]
pub const fn p2_index(self) -> PageTableIndex {
PageTableIndex::new_truncate((self.0 >> 12 >> 9) as u16)
}
/// Returns the 9-bit level 3 page table index.
#[inline]
pub const fn p3_index(self) -> PageTableIndex {
PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9) as u16)
}
/// Returns the 9-bit level 4 page table index.
#[inline]
pub const fn p4_index(self) -> PageTableIndex {
PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9 >> 9) as u16)
}
/// Returns the 9-bit level page table index.
#[inline]
pub const fn page_table_index(self, level: PageTableLevel) -> PageTableIndex {
PageTableIndex::new_truncate((self.0 >> 12 >> ((level as u8 - 1) * 9)) as u16)
}
// FIXME: Move this into the `Step` impl, once `Step` is stabilized.
pub(crate) fn steps_between_impl(start: &Self, end: &Self) -> Option<usize> {
let mut steps = end.0.checked_sub(start.0)?;
// Mask away extra bits that appear while jumping the gap.
steps &= 0xffff_ffff_ffff;
usize::try_from(steps).ok()
}
// FIXME: Move this into the `Step` impl, once `Step` is stabilized.
#[inline]
pub(crate) fn forward_checked_impl(start: Self, count: usize) -> Option<Self> {
let offset = u64::try_from(count).ok()?;
if offset > ADDRESS_SPACE_SIZE {
return None;
}
let mut addr = start.0.checked_add(offset)?;
match addr.get_bits(47..) {
0x1 => {
// Jump the gap by sign extending the 47th bit.
addr.set_bits(47.., 0x1ffff);
}
0x2 => {
// Address overflow
return None;
}
_ => {}
}
Some(unsafe { Self::new_unsafe(addr) })
}
}
impl fmt::Debug for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("VirtAddr")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
impl fmt::Binary for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Binary::fmt(&self.0, f)
}
}
impl fmt::LowerHex for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(&self.0, f)
}
}
impl fmt::Octal for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Octal::fmt(&self.0, f)
}
}
impl fmt::UpperHex for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::UpperHex::fmt(&self.0, f)
}
}
impl fmt::Pointer for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&(self.0 as *const ()), f)
}
}
impl Add<u64> for VirtAddr {
type Output = Self;
#[inline]
fn add(self, rhs: u64) -> Self::Output {
VirtAddr::new(self.0 + rhs)
}
}
impl AddAssign<u64> for VirtAddr {
#[inline]
fn add_assign(&mut self, rhs: u64) {
*self = *self + rhs;
}
}
impl Sub<u64> for VirtAddr {
type Output = Self;
#[inline]
fn sub(self, rhs: u64) -> Self::Output {
VirtAddr::new(self.0.checked_sub(rhs).unwrap())
}
}
impl SubAssign<u64> for VirtAddr {
#[inline]
fn sub_assign(&mut self, rhs: u64) {
*self = *self - rhs;
}
}
impl Sub<VirtAddr> for VirtAddr {
type Output = u64;
#[inline]
fn sub(self, rhs: VirtAddr) -> Self::Output {
self.as_u64().checked_sub(rhs.as_u64()).unwrap()
}
}
#[cfg(feature = "step_trait")]
impl Step for VirtAddr {
#[inline]
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
Self::steps_between_impl(start, end)
}
#[inline]
fn forward_checked(start: Self, count: usize) -> Option<Self> {
Self::forward_checked_impl(start, count)
}
#[inline]
fn backward_checked(start: Self, count: usize) -> Option<Self> {
let offset = u64::try_from(count).ok()?;
if offset > ADDRESS_SPACE_SIZE {
return None;
}
let mut addr = start.0.checked_sub(offset)?;
match addr.get_bits(47..) {
0x1fffe => {
// Jump the gap by sign extending the 47th bit.
addr.set_bits(47.., 0);
}
0x1fffd => {
// Address underflow
return None;
}
_ => {}
}
Some(unsafe { Self::new_unsafe(addr) })
}
}
/// A passed `u64` was not a valid physical address.
///
/// This means that bits 52 to 64 were not all null.
///
/// Contains the invalid address.
pub struct PhysAddrNotValid(pub u64);
impl core::fmt::Debug for PhysAddrNotValid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("PhysAddrNotValid")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
impl PhysAddr {
/// Creates a new physical address.
///
/// ## Panics
///
/// This function panics if a bit in the range 52 to 64 is set.
#[inline]
pub const fn new(addr: u64) -> Self {
// TODO: Replace with .ok().expect(msg) when that works on stable.
match Self::try_new(addr) {
Ok(p) => p,
Err(_) => panic!("physical addresses must not have any bits in the range 52 to 64 set"),
}
}
/// Creates a new physical address, throwing bits 52..64 away.
#[inline]
pub const fn new_truncate(addr: u64) -> PhysAddr {
PhysAddr(addr % (1 << 52))
}
/// Creates a new physical address, without any checks.
///
/// ## Safety
///
/// You must make sure bits 52..64 are zero. This is not checked.
#[inline]
pub const unsafe fn new_unsafe(addr: u64) -> PhysAddr {
PhysAddr(addr)
}
/// Tries to create a new physical address.
///
/// Fails if any bits in the range 52 to 64 are set.
#[inline]
pub const fn try_new(addr: u64) -> Result<Self, PhysAddrNotValid> {
let p = Self::new_truncate(addr);
if p.0 == addr {
Ok(p)
} else {
Err(PhysAddrNotValid(addr))
}
}
/// Creates a physical address that points to `0`.
#[inline]
pub const fn zero() -> PhysAddr {
PhysAddr(0)
}
/// Converts the address to an `u64`.
#[inline]
pub const fn as_u64(self) -> u64 {
self.0
}
/// Convenience method for checking if a physical address is null.
#[inline]
pub const fn is_null(self) -> bool {
self.0 == 0
}
/// Aligns the physical address upwards to the given alignment.
///
/// See the `align_up` function for more information.
///
/// # Panics
///
/// This function panics if the resulting address has a bit in the range 52
/// to 64 set.
#[inline]
pub fn align_up<U>(self, align: U) -> Self
where
U: Into<u64>,
{
PhysAddr::new(align_up(self.0, align.into()))
}
/// Aligns the physical address downwards to the given alignment.
///
/// See the `align_down` function for more information.
#[inline]
pub fn align_down<U>(self, align: U) -> Self
where
U: Into<u64>,
{
PhysAddr(align_down(self.0, align.into()))
}
/// Checks whether the physical address has the demanded alignment.
#[inline]
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64>,
{
self.align_down(align) == self
}
}
impl fmt::Debug for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("PhysAddr")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
impl fmt::Binary for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Binary::fmt(&self.0, f)
}
}
impl fmt::LowerHex for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(&self.0, f)
}
}
impl fmt::Octal for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Octal::fmt(&self.0, f)
}
}
impl fmt::UpperHex for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::UpperHex::fmt(&self.0, f)
}
}
impl fmt::Pointer for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&(self.0 as *const ()), f)
}
}
impl Add<u64> for PhysAddr {
type Output = Self;
#[inline]
fn add(self, rhs: u64) -> Self::Output {
PhysAddr::new(self.0 + rhs)
}
}
impl AddAssign<u64> for PhysAddr {
#[inline]
fn add_assign(&mut self, rhs: u64) {
*self = *self + rhs;
}
}
impl Sub<u64> for PhysAddr {
type Output = Self;
#[inline]
fn sub(self, rhs: u64) -> Self::Output {
PhysAddr::new(self.0.checked_sub(rhs).unwrap())
}
}
impl SubAssign<u64> for PhysAddr {
#[inline]
fn sub_assign(&mut self, rhs: u64) {
*self = *self - rhs;
}
}
impl Sub<PhysAddr> for PhysAddr {
type Output = u64;
#[inline]
fn sub(self, rhs: PhysAddr) -> Self::Output {
self.as_u64().checked_sub(rhs.as_u64()).unwrap()
}
}
/// Align address downwards.
///
/// Returns the greatest `x` with alignment `align` so that `x <= addr`.
///
/// Panics if the alignment is not a power of two.
#[inline]
pub const fn align_down(addr: u64, align: u64) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
addr & !(align - 1)
}
/// Align address upwards.
///
/// Returns the smallest `x` with alignment `align` so that `x >= addr`.
///
/// Panics if the alignment is not a power of two or if an overflow occurs.
#[inline]
pub const fn align_up(addr: u64, align: u64) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
let align_mask = align - 1;
if addr & align_mask == 0 {
addr // already aligned
} else {
// FIXME: Replace with .expect, once `Option::expect` is const.
if let Some(aligned) = (addr | align_mask).checked_add(1) {
aligned
} else {
panic!("attempt to add with overflow")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn virtaddr_new_truncate() {
assert_eq!(VirtAddr::new_truncate(0), VirtAddr(0));
assert_eq!(VirtAddr::new_truncate(1 << 47), VirtAddr(0xfffff << 47));
assert_eq!(VirtAddr::new_truncate(123), VirtAddr(123));
assert_eq!(VirtAddr::new_truncate(123 << 47), VirtAddr(0xfffff << 47));
}
#[test]
#[cfg(feature = "step_trait")]
fn virtaddr_step_forward() {
assert_eq!(Step::forward(VirtAddr(0), 0), VirtAddr(0));
assert_eq!(Step::forward(VirtAddr(0), 1), VirtAddr(1));
assert_eq!(
Step::forward(VirtAddr(0x7fff_ffff_ffff), 1),
VirtAddr(0xffff_8000_0000_0000)
);
assert_eq!(
Step::forward(VirtAddr(0xffff_8000_0000_0000), 1),
VirtAddr(0xffff_8000_0000_0001)
);
assert_eq!(
Step::forward_checked(VirtAddr(0xffff_ffff_ffff_ffff), 1),
None
);
assert_eq!(
Step::forward(VirtAddr(0x7fff_ffff_ffff), 0x1234_5678_9abd),
VirtAddr(0xffff_9234_5678_9abc)
);
assert_eq!(
Step::forward(VirtAddr(0x7fff_ffff_ffff), 0x8000_0000_0000),
VirtAddr(0xffff_ffff_ffff_ffff)
);
assert_eq!(
Step::forward(VirtAddr(0x7fff_ffff_ff00), 0x8000_0000_00ff),
VirtAddr(0xffff_ffff_ffff_ffff)
);
assert_eq!(
Step::forward_checked(VirtAddr(0x7fff_ffff_ff00), 0x8000_0000_0100),
None
);
assert_eq!(
Step::forward_checked(VirtAddr(0x7fff_ffff_ffff), 0x8000_0000_0001),
None
);
}
#[test]
#[cfg(feature = "step_trait")]
fn virtaddr_step_backward() {
assert_eq!(Step::backward(VirtAddr(0), 0), VirtAddr(0));
assert_eq!(Step::backward_checked(VirtAddr(0), 1), None);
assert_eq!(Step::backward(VirtAddr(1), 1), VirtAddr(0));
assert_eq!(
Step::backward(VirtAddr(0xffff_8000_0000_0000), 1),
VirtAddr(0x7fff_ffff_ffff)
);
assert_eq!(
Step::backward(VirtAddr(0xffff_8000_0000_0001), 1),
VirtAddr(0xffff_8000_0000_0000)
);
assert_eq!(
Step::backward(VirtAddr(0xffff_9234_5678_9abc), 0x1234_5678_9abd),
VirtAddr(0x7fff_ffff_ffff)
);
assert_eq!(
Step::backward(VirtAddr(0xffff_8000_0000_0000), 0x8000_0000_0000),
VirtAddr(0)
);
assert_eq!(
Step::backward(VirtAddr(0xffff_8000_0000_0000), 0x7fff_ffff_ff01),
VirtAddr(0xff)
);
assert_eq!(
Step::backward_checked(VirtAddr(0xffff_8000_0000_0000), 0x8000_0000_0001),
None
);
}
#[test]
#[cfg(feature = "step_trait")]
fn virtaddr_steps_between() {
assert_eq!(Step::steps_between(&VirtAddr(0), &VirtAddr(0)), Some(0));
assert_eq!(Step::steps_between(&VirtAddr(0), &VirtAddr(1)), Some(1));
assert_eq!(Step::steps_between(&VirtAddr(1), &VirtAddr(0)), None);
assert_eq!(
Step::steps_between(
&VirtAddr(0x7fff_ffff_ffff),
&VirtAddr(0xffff_8000_0000_0000)
),
Some(1)
);
assert_eq!(
Step::steps_between(
&VirtAddr(0xffff_8000_0000_0000),
&VirtAddr(0x7fff_ffff_ffff)
),
None
);
assert_eq!(
Step::steps_between(
&VirtAddr(0xffff_8000_0000_0000),
&VirtAddr(0xffff_8000_0000_0000)
),
Some(0)
);
assert_eq!(
Step::steps_between(
&VirtAddr(0xffff_8000_0000_0000),
&VirtAddr(0xffff_8000_0000_0001)
),
Some(1)
);
assert_eq!(
Step::steps_between(
&VirtAddr(0xffff_8000_0000_0001),
&VirtAddr(0xffff_8000_0000_0000)
),
None
);
}
#[test]
pub fn test_align_up() {
// align 1
assert_eq!(align_up(0, 1), 0);
assert_eq!(align_up(1234, 1), 1234);
assert_eq!(align_up(0xffff_ffff_ffff_ffff, 1), 0xffff_ffff_ffff_ffff);
// align 2
assert_eq!(align_up(0, 2), 0);
assert_eq!(align_up(1233, 2), 1234);
assert_eq!(align_up(0xffff_ffff_ffff_fffe, 2), 0xffff_ffff_ffff_fffe);
// address 0
assert_eq!(align_up(0, 128), 0);
assert_eq!(align_up(0, 1), 0);
assert_eq!(align_up(0, 2), 0);
assert_eq!(align_up(0, 0x8000_0000_0000_0000), 0);
}
#[test]
fn test_virt_addr_align_up() {
// Make sure the 47th bit is extended.
assert_eq!(
VirtAddr::new(0x7fff_ffff_ffff).align_up(2u64),
VirtAddr::new(0xffff_8000_0000_0000)
);
}
#[test]
fn test_virt_addr_align_down() {
// Make sure the 47th bit is extended.
assert_eq!(
VirtAddr::new(0xffff_8000_0000_0000).align_down(1u64 << 48),
VirtAddr::new(0)
);
}
#[test]
#[should_panic]
fn test_virt_addr_align_up_overflow() {
VirtAddr::new(0xffff_ffff_ffff_ffff).align_up(2u64);
}
#[test]
#[should_panic]
fn test_phys_addr_align_up_overflow() {
PhysAddr::new(0x000f_ffff_ffff_ffff).align_up(2u64);
}
#[test]
fn test_from_ptr_array() {
let slice = &[1, 2, 3, 4, 5];
// Make sure that from_ptr(slice) is the address of the first element
assert_eq!(VirtAddr::from_ptr(slice), VirtAddr::from_ptr(&slice[0]));
}
}

View File

@ -0,0 +1,157 @@
//! Enabling and disabling interrupts
use core::arch::asm;
/// Returns whether interrupts are enabled.
#[inline]
pub fn are_enabled() -> bool {
use crate::registers::rflags::{self, RFlags};
rflags::read().contains(RFlags::INTERRUPT_FLAG)
}
/// Enable interrupts.
///
/// This is a wrapper around the `sti` instruction.
#[inline]
pub fn enable() {
// Omit `nomem` to imitate a lock release. Otherwise, the compiler
// is free to move reads and writes through this asm block.
unsafe {
asm!("sti", options(preserves_flags, nostack));
}
}
/// Disable interrupts.
///
/// This is a wrapper around the `cli` instruction.
#[inline]
pub fn disable() {
// Omit `nomem` to imitate a lock acquire. Otherwise, the compiler
// is free to move reads and writes through this asm block.
unsafe {
asm!("cli", options(preserves_flags, nostack));
}
}
/// Run a closure with disabled interrupts.
///
/// Run the given closure, disabling interrupts before running it (if they aren't already disabled).
/// Afterwards, interrupts are enabling again if they were enabled before.
///
/// If you have other `enable` and `disable` calls _within_ the closure, things may not work as expected.
///
/// # Examples
///
/// ```ignore
/// // interrupts are enabled
/// without_interrupts(|| {
/// // interrupts are disabled
/// without_interrupts(|| {
/// // interrupts are disabled
/// });
/// // interrupts are still disabled
/// });
/// // interrupts are enabled again
/// ```
#[inline]
pub fn without_interrupts<F, R>(f: F) -> R
where
F: FnOnce() -> R,
{
// true if the interrupt flag is set (i.e. interrupts are enabled)
let saved_intpt_flag = are_enabled();
// if interrupts are enabled, disable them for now
if saved_intpt_flag {
disable();
}
// do `f` while interrupts are disabled
let ret = f();
// re-enable interrupts if they were previously enabled
if saved_intpt_flag {
enable();
}
// return the result of `f` to the caller
ret
}
/// Atomically enable interrupts and put the CPU to sleep
///
/// Executes the `sti; hlt` instruction sequence. Since the `sti` instruction
/// keeps interrupts disabled until after the immediately following
/// instruction (called "interrupt shadow"), no interrupt can occur between the
/// two instructions. (One exception to this are non-maskable interrupts; this
/// is explained below.)
///
/// This function is useful to put the CPU to sleep without missing interrupts
/// that occur immediately before the `hlt` instruction:
///
/// ```ignore
/// // there is a race between the check and the `hlt` instruction here:
///
/// if nothing_to_do() {
/// // <- race when the interrupt occurs here
/// x86_64::instructions::hlt(); // wait for the next interrupt
/// }
///
/// // avoid this race by using `enable_and_hlt`:
///
/// x86_64::instructions::interrupts::disable();
/// if nothing_to_do() {
/// // <- no interrupts can occur here (interrupts are disabled)
/// x86_64::instructions::interrupts::enable_and_hlt();
/// }
///
/// ```
///
/// ## Non-maskable Interrupts
///
/// On some processors, the interrupt shadow of `sti` does not apply to
/// non-maskable interrupts (NMIs). This means that an NMI can occur between
/// the `sti` and `hlt` instruction, with the result that the CPU is put to
/// sleep even though a new interrupt occurred.
///
/// To work around this, it is recommended to check in the NMI handler if
/// the interrupt occurred between `sti` and `hlt` instructions. If this is the
/// case, the handler should increase the instruction pointer stored in the
/// interrupt stack frame so that the `hlt` instruction is skipped.
///
/// See <http://lkml.iu.edu/hypermail/linux/kernel/1009.2/01406.html> for more
/// information.
#[inline]
pub fn enable_and_hlt() {
unsafe {
asm!("sti; hlt", options(nomem, nostack));
}
}
/// Cause a breakpoint exception by invoking the `int3` instruction.
#[inline]
pub fn int3() {
unsafe {
asm!("int3", options(nomem, nostack));
}
}
/// Generate a software interrupt by invoking the `int` instruction.
///
/// ## Safety
///
/// Invoking an arbitrary interrupt is unsafe. It can cause your system to
/// crash if you invoke a double-fault (#8) or machine-check (#18) exception.
/// It can also cause memory/register corruption depending on the interrupt
/// implementation (if it expects values/pointers to be passed in registers).
#[cfg(feature = "asm_const")]
#[cfg_attr(
feature = "doc_cfg",
doc(cfg(any(feature = "nightly", feature = "asm_const")))
)]
pub unsafe fn software_interrupt<const NUM: u8>() {
unsafe {
asm!("int {num}", num = const NUM, options(nomem, nostack));
}
}

View File

@ -0,0 +1,53 @@
#![cfg(feature = "instructions")]
//! Special x86_64 instructions.
pub mod interrupts;
pub mod port;
pub mod random;
pub mod segmentation;
pub mod tables;
pub mod tlb;
use core::arch::asm;
/// Halts the CPU until the next interrupt arrives.
#[inline]
pub fn hlt() {
unsafe {
asm!("hlt", options(nomem, nostack, preserves_flags));
}
}
/// Executes the `nop` instructions, which performs no operation (i.e. does nothing).
///
/// This operation is useful to work around the LLVM bug that endless loops are illegally
/// optimized away (see [the issue](https://github.com/rust-lang/rust/issues/28728)). By invoking this
/// instruction (which is marked as volatile), the compiler should no longer optimize the
/// endless loop away.
#[inline]
pub fn nop() {
unsafe {
asm!("nop", options(nomem, nostack, preserves_flags));
}
}
/// Emits a '[magic breakpoint](https://wiki.osdev.org/Bochs#Magic_Breakpoint)' instruction for the [Bochs](http://bochs.sourceforge.net/) CPU
/// emulator. Make sure to set `magic_break: enabled=1` in your `.bochsrc` file.
#[inline]
pub fn bochs_breakpoint() {
unsafe {
asm!("xchg bx, bx", options(nomem, nostack, preserves_flags));
}
}
/// Gets the current instruction pointer. Note that this is only approximate as it requires a few
/// instructions to execute.
#[inline(always)]
pub fn read_rip() -> crate::VirtAddr {
let rip: u64;
unsafe {
asm!("lea {}, [rip]", out(reg) rip, options(nostack, nomem, preserves_flags));
}
crate::VirtAddr::new(rip)
}

View File

@ -0,0 +1,197 @@
//! Access to I/O ports
use core::arch::asm;
use core::fmt;
use core::marker::PhantomData;
use crate::sealed::Sealed;
pub use crate::structures::port::{PortRead, PortWrite};
impl PortRead for u8 {
#[inline]
unsafe fn read_from_port(port: u16) -> u8 {
let value: u8;
unsafe {
asm!("in al, dx", out("al") value, in("dx") port, options(nomem, nostack, preserves_flags));
}
value
}
}
impl PortRead for u16 {
#[inline]
unsafe fn read_from_port(port: u16) -> u16 {
let value: u16;
unsafe {
asm!("in ax, dx", out("ax") value, in("dx") port, options(nomem, nostack, preserves_flags));
}
value
}
}
impl PortRead for u32 {
#[inline]
unsafe fn read_from_port(port: u16) -> u32 {
let value: u32;
unsafe {
asm!("in eax, dx", out("eax") value, in("dx") port, options(nomem, nostack, preserves_flags));
}
value
}
}
impl PortWrite for u8 {
#[inline]
unsafe fn write_to_port(port: u16, value: u8) {
unsafe {
asm!("out dx, al", in("dx") port, in("al") value, options(nomem, nostack, preserves_flags));
}
}
}
impl PortWrite for u16 {
#[inline]
unsafe fn write_to_port(port: u16, value: u16) {
unsafe {
asm!("out dx, ax", in("dx") port, in("ax") value, options(nomem, nostack, preserves_flags));
}
}
}
impl PortWrite for u32 {
#[inline]
unsafe fn write_to_port(port: u16, value: u32) {
unsafe {
asm!("out dx, eax", in("dx") port, in("eax") value, options(nomem, nostack, preserves_flags));
}
}
}
/// A marker trait for access types which allow accessing port values.
pub trait PortAccess: Sealed {
/// A string representation for debug output.
const DEBUG_STR: &'static str;
}
/// A marker trait for access types which allow reading port values.
pub trait PortReadAccess: PortAccess {}
/// A marker trait for access types which allow writing port values.
pub trait PortWriteAccess: PortAccess {}
/// An access marker type indicating that a port is only allowed to read values.
#[derive(Debug)]
pub struct ReadOnlyAccess(());
impl Sealed for ReadOnlyAccess {}
impl PortAccess for ReadOnlyAccess {
const DEBUG_STR: &'static str = "ReadOnly";
}
impl PortReadAccess for ReadOnlyAccess {}
/// An access marker type indicating that a port is only allowed to write values.
#[derive(Debug)]
pub struct WriteOnlyAccess(());
impl Sealed for WriteOnlyAccess {}
impl PortAccess for WriteOnlyAccess {
const DEBUG_STR: &'static str = "WriteOnly";
}
impl PortWriteAccess for WriteOnlyAccess {}
/// An access marker type indicating that a port is allowed to read or write values.
#[derive(Debug)]
pub struct ReadWriteAccess(());
impl Sealed for ReadWriteAccess {}
impl PortAccess for ReadWriteAccess {
const DEBUG_STR: &'static str = "ReadWrite";
}
impl PortReadAccess for ReadWriteAccess {}
impl PortWriteAccess for ReadWriteAccess {}
/// An I/O port.
///
/// The port reads or writes values of type `T` and has read/write access specified by `A`.
///
/// Use the provided marker types or aliases to get a port type with the access you need:
/// * `PortGeneric<T, ReadWriteAccess>` -> `Port<T>`
/// * `PortGeneric<T, ReadOnlyAccess>` -> `PortReadOnly<T>`
/// * `PortGeneric<T, WriteOnlyAccess>` -> `PortWriteOnly<T>`
pub struct PortGeneric<T, A> {
port: u16,
phantom: PhantomData<(T, A)>,
}
/// A read-write I/O port.
pub type Port<T> = PortGeneric<T, ReadWriteAccess>;
/// A read-only I/O port.
pub type PortReadOnly<T> = PortGeneric<T, ReadOnlyAccess>;
/// A write-only I/O port.
pub type PortWriteOnly<T> = PortGeneric<T, WriteOnlyAccess>;
impl<T, A> PortGeneric<T, A> {
/// Creates an I/O port with the given port number.
#[inline]
pub const fn new(port: u16) -> PortGeneric<T, A> {
PortGeneric {
port,
phantom: PhantomData,
}
}
}
impl<T: PortRead, A: PortReadAccess> PortGeneric<T, A> {
/// Reads from the port.
///
/// ## Safety
///
/// This function is unsafe because the I/O port could have side effects that violate memory
/// safety.
#[inline]
pub unsafe fn read(&mut self) -> T {
unsafe { T::read_from_port(self.port) }
}
}
impl<T: PortWrite, A: PortWriteAccess> PortGeneric<T, A> {
/// Writes to the port.
///
/// ## Safety
///
/// This function is unsafe because the I/O port could have side effects that violate memory
/// safety.
#[inline]
pub unsafe fn write(&mut self, value: T) {
unsafe { T::write_to_port(self.port, value) }
}
}
impl<T, A: PortAccess> fmt::Debug for PortGeneric<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PortGeneric")
.field("port", &self.port)
.field("size", &core::mem::size_of::<T>())
.field("access", &format_args!("{}", A::DEBUG_STR))
.finish()
}
}
impl<T, A> Clone for PortGeneric<T, A> {
fn clone(&self) -> Self {
Self {
port: self.port,
phantom: PhantomData,
}
}
}
impl<T, A> PartialEq for PortGeneric<T, A> {
fn eq(&self, other: &Self) -> bool {
self.port == other.port
}
}
impl<T, A> Eq for PortGeneric<T, A> {}

View File

@ -0,0 +1,84 @@
//! Support for build-in RNGs
#[derive(Copy, Clone, Debug)]
/// Used to obtain random numbers using x86_64's RDRAND opcode
pub struct RdRand(());
impl RdRand {
/// Creates Some(RdRand) if RDRAND is supported, None otherwise
#[inline]
pub fn new() -> Option<Self> {
// RDRAND support indicated by CPUID page 01h, ecx bit 30
// https://en.wikipedia.org/wiki/RdRand#Overview
let cpuid = unsafe { core::arch::x86_64::__cpuid(0x1) };
if cpuid.ecx & (1 << 30) != 0 {
Some(RdRand(()))
} else {
None
}
}
/// Uniformly sampled u64.
/// May fail in rare circumstances or heavy load.
#[inline]
pub fn get_u64(self) -> Option<u64> {
let mut res: u64 = 0;
unsafe {
match core::arch::x86_64::_rdrand64_step(&mut res) {
1 => Some(res),
x => {
debug_assert_eq!(x, 0, "rdrand64 returned non-binary value");
None
}
}
}
}
/// Uniformly sampled u32.
/// May fail in rare circumstances or heavy load.
#[inline]
pub fn get_u32(self) -> Option<u32> {
let mut res: u32 = 0;
unsafe {
match core::arch::x86_64::_rdrand32_step(&mut res) {
1 => Some(res),
x => {
debug_assert_eq!(x, 0, "rdrand32 returned non-binary value");
None
}
}
}
}
/// Uniformly sampled u16.
/// May fail in rare circumstances or heavy load.
#[inline]
pub fn get_u16(self) -> Option<u16> {
let mut res: u16 = 0;
unsafe {
match core::arch::x86_64::_rdrand16_step(&mut res) {
1 => Some(res),
x => {
debug_assert_eq!(x, 0, "rdrand16 returned non-binary value");
None
}
}
}
}
}
#[cfg(all(test))]
mod tests {
use super::*;
#[test]
pub fn test_rdrand() {
let rand = RdRand::new();
if is_x86_feature_detected!("rdrand") {
let rand = rand.unwrap();
assert!(rand.get_u16().is_some());
assert!(rand.get_u32().is_some());
assert!(rand.get_u64().is_some());
} else {
assert!(rand.is_none());
}
}
}

View File

@ -0,0 +1,111 @@
//! Provides functions to read and write segment registers.
pub use crate::registers::segmentation::{Segment, Segment64, CS, DS, ES, FS, GS, SS};
use crate::{
registers::model_specific::{FsBase, GsBase, Msr},
structures::gdt::SegmentSelector,
VirtAddr,
};
use core::arch::asm;
macro_rules! get_reg_impl {
($name:literal) => {
#[inline]
fn get_reg() -> SegmentSelector {
let segment: u16;
unsafe {
asm!(concat!("mov {0:x}, ", $name), out(reg) segment, options(nomem, nostack, preserves_flags));
}
SegmentSelector(segment)
}
};
}
macro_rules! segment_impl {
($type:ty, $name:literal) => {
impl Segment for $type {
get_reg_impl!($name);
#[inline]
unsafe fn set_reg(sel: SegmentSelector) {
unsafe {
asm!(concat!("mov ", $name, ", {0:x}"), in(reg) sel.0, options(nostack, preserves_flags));
}
}
}
};
}
macro_rules! segment64_impl {
($type:ty, $name:literal, $base:ty) => {
impl Segment64 for $type {
const BASE: Msr = <$base>::MSR;
#[inline]
fn read_base() -> VirtAddr {
unsafe {
let val: u64;
asm!(concat!("rd", $name, "base {}"), out(reg) val, options(nomem, nostack, preserves_flags));
VirtAddr::new_unsafe(val)
}
}
#[inline]
unsafe fn write_base(base: VirtAddr) {
unsafe{
asm!(concat!("wr", $name, "base {}"), in(reg) base.as_u64(), options(nostack, preserves_flags));
}
}
}
};
}
impl Segment for CS {
get_reg_impl!("cs");
/// Note this is special since we cannot directly move to [`CS`]; x86 requires the instruction
/// pointer and [`CS`] to be set at the same time. To do this, we push the new segment selector
/// and return value onto the stack and use a "far return" (`retfq`) to reload [`CS`] and
/// continue at the end of our function.
///
/// Note we cannot use a "far call" (`lcall`) or "far jmp" (`ljmp`) to do this because then we
/// would only be able to jump to 32-bit instruction pointers. Only Intel implements support
/// for 64-bit far calls/jumps in long-mode, AMD does not.
#[inline]
unsafe fn set_reg(sel: SegmentSelector) {
unsafe {
asm!(
"push {sel}",
"lea {tmp}, [1f + rip]",
"push {tmp}",
"retfq",
"1:",
sel = in(reg) u64::from(sel.0),
tmp = lateout(reg) _,
options(preserves_flags),
);
}
}
}
segment_impl!(SS, "ss");
segment_impl!(DS, "ds");
segment_impl!(ES, "es");
segment_impl!(FS, "fs");
segment64_impl!(FS, "fs", FsBase);
segment_impl!(GS, "gs");
segment64_impl!(GS, "gs", GsBase);
impl GS {
/// Swap `KernelGsBase` MSR and `GsBase` MSR.
///
/// ## Safety
///
/// This function is unsafe because the caller must ensure that the
/// swap operation cannot lead to undefined behavior.
#[inline]
pub unsafe fn swap() {
unsafe {
asm!("swapgs", options(nostack, preserves_flags));
}
}
}

View File

@ -0,0 +1,92 @@
//! Functions to load GDT, IDT, and TSS structures.
use crate::structures::gdt::SegmentSelector;
use crate::VirtAddr;
use core::arch::asm;
pub use crate::structures::DescriptorTablePointer;
/// Load a GDT.
///
/// Use the
/// [`GlobalDescriptorTable`](crate::structures::gdt::GlobalDescriptorTable) struct for a high-level
/// interface to loading a GDT.
///
/// ## Safety
///
/// This function is unsafe because the caller must ensure that the given
/// `DescriptorTablePointer` points to a valid GDT and that loading this
/// GDT is safe.
#[inline]
pub unsafe fn lgdt(gdt: &DescriptorTablePointer) {
unsafe {
asm!("lgdt [{}]", in(reg) gdt, options(readonly, nostack, preserves_flags));
}
}
/// Load an IDT.
///
/// Use the
/// [`InterruptDescriptorTable`](crate::structures::idt::InterruptDescriptorTable) struct for a high-level
/// interface to loading an IDT.
///
/// ## Safety
///
/// This function is unsafe because the caller must ensure that the given
/// `DescriptorTablePointer` points to a valid IDT and that loading this
/// IDT is safe.
#[inline]
pub unsafe fn lidt(idt: &DescriptorTablePointer) {
unsafe {
asm!("lidt [{}]", in(reg) idt, options(readonly, nostack, preserves_flags));
}
}
/// Get the address of the current GDT.
#[inline]
pub fn sgdt() -> DescriptorTablePointer {
let mut gdt: DescriptorTablePointer = DescriptorTablePointer {
limit: 0,
base: VirtAddr::new(0),
};
unsafe {
asm!("sgdt [{}]", in(reg) &mut gdt, options(nostack, preserves_flags));
}
gdt
}
/// Get the address of the current IDT.
#[inline]
pub fn sidt() -> DescriptorTablePointer {
let mut idt: DescriptorTablePointer = DescriptorTablePointer {
limit: 0,
base: VirtAddr::new(0),
};
unsafe {
asm!("sidt [{}]", in(reg) &mut idt, options(nostack, preserves_flags));
}
idt
}
/// Load the task state register using the `ltr` instruction.
///
/// Note that loading a TSS segment selector marks the corresponding TSS
/// Descriptor in the GDT as "busy", preventing it from being loaded again
/// (either on this CPU or another CPU). TSS structures (including Descriptors
/// and Selectors) should generally be per-CPU. See
/// [`tss_segment`](crate::structures::gdt::Descriptor::tss_segment)
/// for more information.
///
/// Calling `load_tss` with a busy TSS selector results in a `#GP` exception.
///
/// ## Safety
///
/// This function is unsafe because the caller must ensure that the given
/// `SegmentSelector` points to a valid TSS entry in the GDT and that the
/// corresponding data in the TSS is valid.
#[inline]
pub unsafe fn load_tss(sel: SegmentSelector) {
unsafe {
asm!("ltr {0:x}", in(reg) sel.0, options(nostack, preserves_flags));
}
}

View File

@ -0,0 +1,430 @@
//! Functions to flush the translation lookaside buffer (TLB).
use bit_field::BitField;
use crate::{
instructions::segmentation::{Segment, CS},
structures::paging::{
page::{NotGiantPageSize, PageRange},
Page, PageSize, Size2MiB, Size4KiB,
},
PrivilegeLevel, VirtAddr,
};
use core::{arch::asm, cmp, convert::TryFrom, fmt};
/// Invalidate the given address in the TLB using the `invlpg` instruction.
#[inline]
pub fn flush(addr: VirtAddr) {
unsafe {
asm!("invlpg [{}]", in(reg) addr.as_u64(), options(nostack, preserves_flags));
}
}
/// Invalidate the TLB completely by reloading the CR3 register.
#[inline]
pub fn flush_all() {
use crate::registers::control::Cr3;
let (frame, flags) = Cr3::read();
unsafe { Cr3::write(frame, flags) }
}
/// The Invalidate PCID Command to execute.
#[derive(Debug)]
pub enum InvPicdCommand {
/// The logical processor invalidates mappings—except global translations—for the linear address and PCID specified.
Address(VirtAddr, Pcid),
/// The logical processor invalidates all mappings—except global translations—associated with the PCID.
Single(Pcid),
/// The logical processor invalidates all mappings—including global translations—associated with any PCID.
All,
/// The logical processor invalidates all mappings—except global translations—associated with any PCID.
AllExceptGlobal,
}
/// The INVPCID descriptor comprises 128 bits and consists of a PCID and a linear address.
/// For INVPCID type 0, the processor uses the full 64 bits of the linear address even outside 64-bit mode; the linear address is not used for other INVPCID types.
#[repr(C)]
#[derive(Debug)]
struct InvpcidDescriptor {
address: u64,
pcid: u64,
}
/// Structure of a PCID. A PCID has to be <= 4096 for x86_64.
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Pcid(u16);
impl Pcid {
/// Create a new PCID. Will result in a failure if the value of
/// PCID is out of expected bounds.
pub const fn new(pcid: u16) -> Result<Pcid, PcidTooBig> {
if pcid >= 4096 {
Err(PcidTooBig(pcid))
} else {
Ok(Pcid(pcid))
}
}
/// Get the value of the current PCID.
pub const fn value(&self) -> u16 {
self.0
}
}
/// A passed `u16` was not a valid PCID.
///
/// A PCID has to be <= 4096 for x86_64.
#[derive(Debug)]
pub struct PcidTooBig(u16);
impl fmt::Display for PcidTooBig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "PCID should be < 4096, got {}", self.0)
}
}
/// Invalidate the given address in the TLB using the `invpcid` instruction.
///
/// ## Safety
///
/// This function is unsafe as it requires CPUID.(EAX=07H, ECX=0H):EBX.INVPCID to be 1.
#[inline]
pub unsafe fn flush_pcid(command: InvPicdCommand) {
let mut desc = InvpcidDescriptor {
address: 0,
pcid: 0,
};
let kind: u64;
match command {
InvPicdCommand::Address(addr, pcid) => {
kind = 0;
desc.pcid = pcid.value().into();
desc.address = addr.as_u64()
}
InvPicdCommand::Single(pcid) => {
kind = 1;
desc.pcid = pcid.0.into()
}
InvPicdCommand::All => kind = 2,
InvPicdCommand::AllExceptGlobal => kind = 3,
}
unsafe {
asm!("invpcid {0}, [{1}]", in(reg) kind, in(reg) &desc, options(nostack, preserves_flags));
}
}
/// Used to broadcast flushes to all logical processors.
///
/// ```no_run
/// use x86_64::VirtAddr;
/// use x86_64::structures::paging::Page;
/// use x86_64::instructions::tlb::Invlpgb;
///
/// // Check that `invlpgb` and `tlbsync` are supported.
/// let invlpgb = Invlpgb::new().unwrap();
///
/// // Broadcast flushing some pages to all logical processors.
/// let start: Page = Page::from_start_address(VirtAddr::new(0xf000_0000)).unwrap();
/// let pages = Page::range(start, start + 3);
/// invlpgb.build().pages(pages).include_global().flush();
///
/// // Wait for all logical processors to respond.
/// invlpgb.tlbsync();
/// ```
#[derive(Debug, Clone, Copy)]
pub struct Invlpgb {
invlpgb_count_max: u16,
tlb_flush_nested: bool,
nasid: u32,
}
impl Invlpgb {
/// Check that `invlpgb` and `tlbsync` are supported and query limits.
///
/// # Panics
///
/// Panics if the CPL is not 0.
pub fn new() -> Option<Self> {
let cs = CS::get_reg();
assert_eq!(cs.rpl(), PrivilegeLevel::Ring0);
// Check if the `INVLPGB` and `TLBSYNC` instruction are supported.
let cpuid = unsafe { core::arch::x86_64::__cpuid(0x8000_0008) };
if !cpuid.ebx.get_bit(3) {
return None;
}
let tlb_flush_nested = cpuid.ebx.get_bit(21);
let invlpgb_count_max = cpuid.edx.get_bits(0..=15) as u16;
// Figure out the number of supported ASIDs.
let cpuid = unsafe { core::arch::x86_64::__cpuid(0x8000_000a) };
let nasid = cpuid.ebx;
Some(Self {
tlb_flush_nested,
invlpgb_count_max,
nasid,
})
}
/// Returns the maximum count of pages to be flushed supported by the processor.
#[inline]
pub fn invlpgb_count_max(&self) -> u16 {
self.invlpgb_count_max
}
/// Returns whether the processor supports flushing translations used for guest translation.
#[inline]
pub fn tlb_flush_nested(&self) -> bool {
self.tlb_flush_nested
}
/// Returns the number of available address space identifiers.
#[inline]
pub fn nasid(&self) -> u32 {
self.nasid
}
/// Create a `InvlpgbFlushBuilder`.
pub fn build(&self) -> InvlpgbFlushBuilder<'_> {
InvlpgbFlushBuilder {
invlpgb: self,
page_range: None,
pcid: None,
asid: None,
include_global: false,
final_translation_only: false,
include_nested_translations: false,
}
}
/// Wait for all previous `invlpgb` instruction executed on the current
/// logical processor to be acknowledged by all other logical processors.
#[inline]
pub fn tlbsync(&self) {
unsafe {
asm!("tlbsync", options(nomem, preserves_flags));
}
}
}
/// A builder struct to construct the parameters for the `invlpgb` instruction.
#[derive(Debug, Clone)]
#[must_use]
pub struct InvlpgbFlushBuilder<'a, S = Size4KiB>
where
S: NotGiantPageSize,
{
invlpgb: &'a Invlpgb,
page_range: Option<PageRange<S>>,
pcid: Option<Pcid>,
asid: Option<u16>,
include_global: bool,
final_translation_only: bool,
include_nested_translations: bool,
}
impl<'a, S> InvlpgbFlushBuilder<'a, S>
where
S: NotGiantPageSize,
{
/// Flush a range of pages.
///
/// If the range doesn't fit within `invlpgb_count_max`, `invlpgb` is
/// executed multiple times.
pub fn pages<T>(self, page_range: PageRange<T>) -> InvlpgbFlushBuilder<'a, T>
where
T: NotGiantPageSize,
{
InvlpgbFlushBuilder {
invlpgb: self.invlpgb,
page_range: Some(page_range),
pcid: self.pcid,
asid: self.asid,
include_global: self.include_global,
final_translation_only: self.final_translation_only,
include_nested_translations: self.include_nested_translations,
}
}
/// Only flush TLB entries with the given PCID.
///
/// # Safety
///
/// The caller has to ensure that PCID is enabled in CR4 when the flush is executed.
pub unsafe fn pcid(&mut self, pcid: Pcid) -> &mut Self {
self.pcid = Some(pcid);
self
}
/// Only flush TLB entries with the given ASID.
///
/// # Safety
///
/// The caller has to ensure that SVM is enabled in EFER when the flush is executed.
// FIXME: Make ASID a type and remove error type.
pub unsafe fn asid(&mut self, asid: u16) -> Result<&mut Self, AsidOutOfRangeError> {
if u32::from(asid) >= self.invlpgb.nasid {
return Err(AsidOutOfRangeError {
asid,
nasid: self.invlpgb.nasid,
});
}
self.asid = Some(asid);
Ok(self)
}
/// Also flush global pages.
pub fn include_global(&mut self) -> &mut Self {
self.include_global = true;
self
}
/// Only flush the final translation and not the cached upper level TLB entries.
pub fn final_translation_only(&mut self) -> &mut Self {
self.final_translation_only = true;
self
}
/// Also flush nestred translations that could be used for guest translation.
pub fn include_nested_translations(mut self) -> Self {
assert!(
self.invlpgb.tlb_flush_nested,
"flushing all nested translations is not supported"
);
self.include_nested_translations = true;
self
}
/// Execute the flush.
pub fn flush(&self) {
if let Some(mut pages) = self.page_range {
while !pages.is_empty() {
// Calculate out how many pages we still need to flush.
let count = Page::<S>::steps_between_impl(&pages.start, &pages.end).unwrap();
// Make sure that we never jump the gap in the address space when flushing.
let second_half_start =
Page::<S>::containing_address(VirtAddr::new(0xffff_8000_0000_0000));
let count = if pages.start < second_half_start {
let count_to_second_half =
Page::steps_between_impl(&pages.start, &second_half_start).unwrap();
cmp::min(count, count_to_second_half)
} else {
count
};
// We can flush at most u16::MAX pages at once.
let count = u16::try_from(count).unwrap_or(u16::MAX);
// Cap the count by the maximum supported count of the processor.
let count = cmp::min(count, self.invlpgb.invlpgb_count_max);
unsafe {
flush_broadcast(
Some((pages.start, count)),
self.pcid,
self.asid,
self.include_global,
self.final_translation_only,
self.include_nested_translations,
);
}
// Even if the count is zero, one page is still flushed and so
// we need to advance by at least one.
let inc_count = cmp::max(count, 1);
pages.start =
Page::forward_checked_impl(pages.start, usize::from(inc_count)).unwrap();
}
} else {
unsafe {
flush_broadcast::<S>(
None,
self.pcid,
self.asid,
self.include_global,
self.final_translation_only,
self.include_nested_translations,
);
}
}
}
}
/// An error returned when trying to use an invalid ASID.
#[derive(Debug)]
pub struct AsidOutOfRangeError {
/// The requested ASID.
pub asid: u16,
/// The number of valid ASIDS.
pub nasid: u32,
}
impl fmt::Display for AsidOutOfRangeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} is out of the range of available ASIDS ({})",
self.asid, self.nasid
)
}
}
/// See `INVLPGB` in AMD64 Architecture Programmer's Manual Volume 3
#[inline]
unsafe fn flush_broadcast<S>(
va_and_count: Option<(Page<S>, u16)>,
pcid: Option<Pcid>,
asid: Option<u16>,
include_global: bool,
final_translation_only: bool,
include_nested_translations: bool,
) where
S: NotGiantPageSize,
{
let mut rax = 0;
let mut ecx = 0;
let mut edx = 0;
if let Some((va, count)) = va_and_count {
rax.set_bit(0, true);
rax.set_bits(12.., va.start_address().as_u64().get_bits(12..));
ecx.set_bits(0..=15, u32::from(count));
ecx.set_bit(31, S::SIZE == Size2MiB::SIZE);
}
if let Some(pcid) = pcid {
rax.set_bit(1, true);
edx.set_bits(16..=27, u32::from(pcid.value()));
}
if let Some(asid) = asid {
rax.set_bit(2, true);
edx.set_bits(0..=15, u32::from(asid));
}
rax.set_bit(3, include_global);
rax.set_bit(4, final_translation_only);
rax.set_bit(5, include_nested_translations);
unsafe {
asm!(
"invlpgb",
in("rax") rax,
in("ecx") ecx,
in("edx") edx,
options(nostack, preserves_flags),
);
}
}

View File

@ -0,0 +1,71 @@
//! This crate provides x86_64 specific functions and data structures,
//! and access to various system registers.
#![cfg_attr(not(test), no_std)]
#![cfg_attr(feature = "const_fn", feature(const_mut_refs))] // GDT::append()
#![cfg_attr(feature = "asm_const", feature(asm_const))]
#![cfg_attr(feature = "abi_x86_interrupt", feature(abi_x86_interrupt))]
#![cfg_attr(feature = "step_trait", feature(step_trait))]
#![cfg_attr(feature = "doc_auto_cfg", feature(doc_auto_cfg))]
#![warn(missing_docs)]
#![deny(missing_debug_implementations)]
#![deny(unsafe_op_in_unsafe_fn)]
#![allow(elided_lifetimes_in_paths)]
#![allow(dead_code)]
pub use crate::addr::{align_down, align_up, PhysAddr, VirtAddr};
pub mod addr;
pub mod instructions;
pub mod registers;
pub mod structures;
/// Represents a protection ring level.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum PrivilegeLevel {
/// Privilege-level 0 (most privilege): This level is used by critical system-software
/// components that require direct access to, and control over, all processor and system
/// resources. This can include BIOS, memory-management functions, and interrupt handlers.
Ring0 = 0,
/// Privilege-level 1 (moderate privilege): This level is used by less-critical system-
/// software services that can access and control a limited scope of processor and system
/// resources. Software running at these privilege levels might include some device drivers
/// and library routines. The actual privileges of this level are defined by the
/// operating system.
Ring1 = 1,
/// Privilege-level 2 (moderate privilege): Like level 1, this level is used by
/// less-critical system-software services that can access and control a limited scope of
/// processor and system resources. The actual privileges of this level are defined by the
/// operating system.
Ring2 = 2,
/// Privilege-level 3 (least privilege): This level is used by application software.
/// Software running at privilege-level 3 is normally prevented from directly accessing
/// most processor and system resources. Instead, applications request access to the
/// protected processor and system resources by calling more-privileged service routines
/// to perform the accesses.
Ring3 = 3,
}
impl PrivilegeLevel {
/// Creates a `PrivilegeLevel` from a numeric value. The value must be in the range 0..4.
///
/// This function panics if the passed value is >3.
#[inline]
pub const fn from_u16(value: u16) -> PrivilegeLevel {
match value {
0 => PrivilegeLevel::Ring0,
1 => PrivilegeLevel::Ring1,
2 => PrivilegeLevel::Ring2,
3 => PrivilegeLevel::Ring3,
_ => panic!("invalid privilege level"),
}
}
}
pub(crate) mod sealed {
pub trait Sealed {}
}

View File

@ -0,0 +1,442 @@
//! Functions to read and write control registers.
pub use super::model_specific::{Efer, EferFlags};
use bitflags::bitflags;
/// Various control flags modifying the basic operation of the CPU.
#[derive(Debug)]
pub struct Cr0;
bitflags! {
/// Configuration flags of the [`Cr0`] register.
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct Cr0Flags: u64 {
/// Enables protected mode.
const PROTECTED_MODE_ENABLE = 1;
/// Enables monitoring of the coprocessor, typical for x87 instructions.
///
/// Controls (together with the [`TASK_SWITCHED`](Cr0Flags::TASK_SWITCHED)
/// flag) whether a `wait` or `fwait` instruction should cause an `#NE` exception.
const MONITOR_COPROCESSOR = 1 << 1;
/// Force all x87 and MMX instructions to cause an `#NE` exception.
const EMULATE_COPROCESSOR = 1 << 2;
/// Automatically set to 1 on _hardware_ task switch.
///
/// This flags allows lazily saving x87/MMX/SSE instructions on hardware context switches.
const TASK_SWITCHED = 1 << 3;
/// Indicates support of 387DX math coprocessor instructions.
///
/// Always set on all recent x86 processors, cannot be cleared.
const EXTENSION_TYPE = 1 << 4;
/// Enables the native (internal) error reporting mechanism for x87 FPU errors.
const NUMERIC_ERROR = 1 << 5;
/// Controls whether supervisor-level writes to read-only pages are inhibited.
///
/// When set, it is not possible to write to read-only pages from ring 0.
const WRITE_PROTECT = 1 << 16;
/// Enables automatic usermode alignment checking if [`RFlags::ALIGNMENT_CHECK`] is also set.
const ALIGNMENT_MASK = 1 << 18;
/// Ignored, should always be unset.
///
/// Must be unset if [`CACHE_DISABLE`](Cr0Flags::CACHE_DISABLE) is unset.
/// Older CPUs used this to control write-back/write-through cache strategy.
const NOT_WRITE_THROUGH = 1 << 29;
/// Disables some processor caches, specifics are model-dependent.
const CACHE_DISABLE = 1 << 30;
/// Enables paging.
///
/// If this bit is set, [`PROTECTED_MODE_ENABLE`](Cr0Flags::PROTECTED_MODE_ENABLE) must be set.
const PAGING = 1 << 31;
}
}
/// Contains the Page Fault Linear Address (PFLA).
///
/// When a page fault occurs, the CPU sets this register to the faulting virtual address.
#[derive(Debug)]
pub struct Cr2;
/// Contains the physical address of the highest-level page table.
#[derive(Debug)]
pub struct Cr3;
bitflags! {
/// Controls cache settings for the highest-level page table.
///
/// Unused if paging is disabled or if [`PCID`](Cr4Flags::PCID) is enabled.
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct Cr3Flags: u64 {
/// Use a writethrough cache policy for the table (otherwise a writeback policy is used).
const PAGE_LEVEL_WRITETHROUGH = 1 << 3;
/// Disable caching for the table.
const PAGE_LEVEL_CACHE_DISABLE = 1 << 4;
}
}
/// Contains various control flags that enable architectural extensions, and
/// indicate support for specific processor capabilities.
#[derive(Debug)]
pub struct Cr4;
bitflags! {
/// Configuration flags of the [`Cr4`] register.
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct Cr4Flags: u64 {
/// Enables hardware-supported performance enhancements for software running in
/// virtual-8086 mode.
const VIRTUAL_8086_MODE_EXTENSIONS = 1;
/// Enables support for protected-mode virtual interrupts.
const PROTECTED_MODE_VIRTUAL_INTERRUPTS = 1 << 1;
/// When set, only privilege-level 0 can execute the `RDTSC` or `RDTSCP` instructions.
const TIMESTAMP_DISABLE = 1 << 2;
/// Enables I/O breakpoint capability and enforces treatment of `DR4` and `DR5` registers
/// as reserved.
const DEBUGGING_EXTENSIONS = 1 << 3;
/// Enables the use of 4MB physical frames; ignored if
/// [`PHYSICAL_ADDRESS_EXTENSION`](Cr4Flags::PHYSICAL_ADDRESS_EXTENSION)
/// is set (so always ignored in long mode).
const PAGE_SIZE_EXTENSION = 1 << 4;
/// Enables physical address extensions and 2MB physical frames. Required in long mode.
const PHYSICAL_ADDRESS_EXTENSION = 1 << 5;
/// Enables the machine-check exception mechanism.
const MACHINE_CHECK_EXCEPTION = 1 << 6;
/// Enables the global page feature, allowing some page translations to
/// be marked as global (see [`PageTableFlags::GLOBAL`]).
const PAGE_GLOBAL = 1 << 7;
/// Allows software running at any privilege level to use the `RDPMC` instruction.
const PERFORMANCE_MONITOR_COUNTER = 1 << 8;
/// Enables the use of legacy SSE instructions; allows using `FXSAVE`/`FXRSTOR` for saving
/// processor state of 128-bit media instructions.
const OSFXSR = 1 << 9;
/// Enables the SIMD floating-point exception (`#XF`) for handling unmasked 256-bit and
/// 128-bit media floating-point errors.
const OSXMMEXCPT_ENABLE = 1 << 10;
/// Prevents the execution of the `SGDT`, `SIDT`, `SLDT`, `SMSW`, and `STR` instructions by
/// user-mode software.
const USER_MODE_INSTRUCTION_PREVENTION = 1 << 11;
/// Enables 5-level paging on supported CPUs (Intel Only).
const L5_PAGING = 1 << 12;
/// Enables VMX instructions (Intel Only).
const VIRTUAL_MACHINE_EXTENSIONS = 1 << 13;
/// Enables SMX instructions (Intel Only).
const SAFER_MODE_EXTENSIONS = 1 << 14;
/// Enables software running in 64-bit mode at any privilege level to read and write
/// the FS.base and GS.base hidden segment register state.
const FSGSBASE = 1 << 16;
/// Enables process-context identifiers (PCIDs).
const PCID = 1 << 17;
/// Enables extended processor state management instructions, including `XGETBV` and `XSAVE`.
const OSXSAVE = 1 << 18;
/// Enables the Key Locker feature (Intel Only).
///
/// This enables creation and use of opaque AES key handles; see the
/// [Intel Key Locker Specification](https://software.intel.com/content/www/us/en/develop/download/intel-key-locker-specification.html)
/// for more information.
const KEY_LOCKER = 1 << 19;
/// Prevents the execution of instructions that reside in pages accessible by user-mode
/// software when the processor is in supervisor-mode.
const SUPERVISOR_MODE_EXECUTION_PROTECTION = 1 << 20;
/// Enables restrictions for supervisor-mode software when reading data from user-mode
/// pages.
const SUPERVISOR_MODE_ACCESS_PREVENTION = 1 << 21;
/// Enables protection keys for user-mode pages.
///
/// Also enables access to the PKRU register (via the `RDPKRU`/`WRPKRU`
/// instructions) to set user-mode protection key access controls.
const PROTECTION_KEY_USER = 1 << 22;
/// Enables Control-flow Enforcement Technology (CET)
///
/// This enables the shadow stack feature, ensuring return addresses read
/// via `RET` and `IRET` have not been corrupted.
const CONTROL_FLOW_ENFORCEMENT = 1 << 23;
/// Enables protection keys for supervisor-mode pages (Intel Only).
///
/// Also enables the `IA32_PKRS` MSR to set supervisor-mode protection
/// key access controls.
const PROTECTION_KEY_SUPERVISOR = 1 << 24;
}
}
#[cfg(feature = "instructions")]
mod x86_64 {
use super::*;
use crate::{
addr::VirtAddrNotValid, instructions::tlb::Pcid, structures::paging::PhysFrame, PhysAddr,
VirtAddr,
};
use core::arch::asm;
impl Cr0 {
/// Read the current set of CR0 flags.
#[inline]
pub fn read() -> Cr0Flags {
Cr0Flags::from_bits_truncate(Self::read_raw())
}
/// Read the current raw CR0 value.
#[inline]
pub fn read_raw() -> u64 {
let value: u64;
unsafe {
asm!("mov {}, cr0", out(reg) value, options(nomem, nostack, preserves_flags));
}
value
}
/// Write CR0 flags.
///
/// Preserves the value of reserved fields.
///
/// ## Safety
///
/// This function is unsafe because it's possible to violate memory
/// safety through it, e.g. by disabling paging.
#[inline]
pub unsafe fn write(flags: Cr0Flags) {
let old_value = Self::read_raw();
let reserved = old_value & !(Cr0Flags::all().bits());
let new_value = reserved | flags.bits();
unsafe {
Self::write_raw(new_value);
}
}
/// Write raw CR0 flags.
///
/// Does _not_ preserve any values, including reserved fields.
///
/// ## Safety
///
/// This function is unsafe because it's possible to violate memory
/// safety through it, e.g. by disabling paging.
#[inline]
pub unsafe fn write_raw(value: u64) {
unsafe {
asm!("mov cr0, {}", in(reg) value, options(nostack, preserves_flags));
}
}
/// Updates CR0 flags.
///
/// Preserves the value of reserved fields.
///
/// ## Safety
///
/// This function is unsafe because it's possible to violate memory
/// safety through it, e.g. by disabling paging.
#[inline]
pub unsafe fn update<F>(f: F)
where
F: FnOnce(&mut Cr0Flags),
{
let mut flags = Self::read();
f(&mut flags);
unsafe {
Self::write(flags);
}
}
}
impl Cr2 {
/// Read the current page fault linear address from the CR2 register.
///
/// # Errors
///
/// This method returns a [`VirtAddrNotValid`] error if the CR2 register contains a
/// non-canonical address. Call [`Cr2::read_raw`] to handle such cases.
#[inline]
pub fn read() -> Result<VirtAddr, VirtAddrNotValid> {
VirtAddr::try_new(Self::read_raw())
}
/// Read the current page fault linear address from the CR2 register as a raw `u64`.
#[inline]
pub fn read_raw() -> u64 {
let value: u64;
unsafe {
asm!("mov {}, cr2", out(reg) value, options(nomem, nostack, preserves_flags));
}
value
}
}
impl Cr3 {
/// Read the current P4 table address from the CR3 register.
#[inline]
pub fn read() -> (PhysFrame, Cr3Flags) {
let (frame, value) = Cr3::read_raw();
let flags = Cr3Flags::from_bits_truncate(value.into());
(frame, flags)
}
/// Read the current P4 table address from the CR3 register
#[inline]
pub fn read_raw() -> (PhysFrame, u16) {
let value: u64;
unsafe {
asm!("mov {}, cr3", out(reg) value, options(nomem, nostack, preserves_flags));
}
let addr = PhysAddr::new(value & 0x_000f_ffff_ffff_f000);
let frame = PhysFrame::containing_address(addr);
(frame, (value & 0xFFF) as u16)
}
/// Read the current P4 table address from the CR3 register along with PCID.
/// The correct functioning of this requires CR4.PCIDE = 1.
/// See [`Cr4Flags::PCID`]
#[inline]
pub fn read_pcid() -> (PhysFrame, Pcid) {
let (frame, value) = Cr3::read_raw();
(frame, Pcid::new(value as u16).unwrap())
}
/// Write a new P4 table address into the CR3 register.
///
/// ## Safety
///
/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
#[inline]
pub unsafe fn write(frame: PhysFrame, flags: Cr3Flags) {
unsafe {
Cr3::write_raw_impl(false, frame, flags.bits() as u16);
}
}
/// Write a new P4 table address into the CR3 register.
///
/// ## Safety
///
/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
/// [`Cr4Flags::PCID`] must be set before calling this method.
#[inline]
pub unsafe fn write_pcid(frame: PhysFrame, pcid: Pcid) {
unsafe {
Cr3::write_raw_impl(false, frame, pcid.value());
}
}
/// Write a new P4 table address into the CR3 register without flushing existing TLB entries for
/// the PCID.
///
/// ## Safety
///
/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
/// [`Cr4Flags::PCID`] must be set before calling this method.
#[inline]
pub unsafe fn write_pcid_no_flush(frame: PhysFrame, pcid: Pcid) {
unsafe {
Cr3::write_raw_impl(true, frame, pcid.value());
}
}
/// Write a new P4 table address into the CR3 register.
///
/// ## Safety
///
/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
#[inline]
pub unsafe fn write_raw(frame: PhysFrame, val: u16) {
unsafe { Self::write_raw_impl(false, frame, val) }
}
#[inline]
unsafe fn write_raw_impl(top_bit: bool, frame: PhysFrame, val: u16) {
let addr = frame.start_address();
let value = ((top_bit as u64) << 63) | addr.as_u64() | val as u64;
unsafe {
asm!("mov cr3, {}", in(reg) value, options(nostack, preserves_flags));
}
}
}
impl Cr4 {
/// Read the current set of CR4 flags.
#[inline]
pub fn read() -> Cr4Flags {
Cr4Flags::from_bits_truncate(Self::read_raw())
}
/// Read the current raw CR4 value.
#[inline]
pub fn read_raw() -> u64 {
let value: u64;
unsafe {
asm!("mov {}, cr4", out(reg) value, options(nomem, nostack, preserves_flags));
}
value
}
/// Write CR4 flags.
///
/// Preserves the value of reserved fields.
///
/// ## Safety
///
/// This function is unsafe because it's possible to violate memory
/// safety through it, e.g. by overwriting the physical address extension
/// flag.
#[inline]
pub unsafe fn write(flags: Cr4Flags) {
let old_value = Self::read_raw();
let reserved = old_value & !(Cr4Flags::all().bits());
let new_value = reserved | flags.bits();
unsafe {
Self::write_raw(new_value);
}
}
/// Write raw CR4 flags.
///
/// Does _not_ preserve any values, including reserved fields.
///
/// ## Safety
///
/// This function is unsafe because it's possible to violate memory
/// safety through it, e.g. by overwriting the physical address extension
/// flag.
#[inline]
pub unsafe fn write_raw(value: u64) {
unsafe {
asm!("mov cr4, {}", in(reg) value, options(nostack, preserves_flags));
}
}
/// Updates CR4 flags.
///
/// Preserves the value of reserved fields.
///
/// ## Safety
///
/// This function is unsafe because it's possible to violate memory
/// safety through it, e.g. by overwriting the physical address extension
/// flag.
#[inline]
pub unsafe fn update<F>(f: F)
where
F: FnOnce(&mut Cr4Flags),
{
let mut flags = Self::read();
f(&mut flags);
unsafe {
Self::write(flags);
}
}
}
}

View File

@ -0,0 +1,503 @@
//! Functions to read and write debug registers.
#[cfg(feature = "instructions")]
use core::arch::asm;
use core::ops::Range;
use bit_field::BitField;
use bitflags::bitflags;
/// Debug Address Register
///
/// Holds the address of a hardware breakpoint.
pub trait DebugAddressRegister {
/// The corresponding [`DebugAddressRegisterNumber`].
const NUM: DebugAddressRegisterNumber;
/// Reads the current breakpoint address.
#[cfg(feature = "instructions")]
fn read() -> u64;
/// Writes the provided breakpoint address.
#[cfg(feature = "instructions")]
fn write(addr: u64);
}
macro_rules! debug_address_register {
($Dr:ident, $name:literal) => {
/// Debug Address Register
///
/// Holds the address of a hardware breakpoint.
#[derive(Debug)]
pub struct $Dr;
impl DebugAddressRegister for $Dr {
const NUM: DebugAddressRegisterNumber = DebugAddressRegisterNumber::$Dr;
#[cfg(feature = "instructions")]
#[inline]
fn read() -> u64 {
let addr;
unsafe {
asm!(concat!("mov {}, ", $name), out(reg) addr, options(nomem, nostack, preserves_flags));
}
addr
}
#[cfg(feature = "instructions")]
#[inline]
fn write(addr: u64) {
unsafe {
asm!(concat!("mov ", $name, ", {}"), in(reg) addr, options(nomem, nostack, preserves_flags));
}
}
}
};
}
debug_address_register!(Dr0, "dr0");
debug_address_register!(Dr1, "dr1");
debug_address_register!(Dr2, "dr2");
debug_address_register!(Dr3, "dr3");
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
/// A valid debug address register number.
///
/// Must be between 0 and 3 (inclusive).
pub enum DebugAddressRegisterNumber {
/// The debug address register number of [`Dr0`] (0).
Dr0,
/// The debug address register number of [`Dr1`] (1).
Dr1,
/// The debug address register number of [`Dr2`] (2).
Dr2,
/// The debug address register number of [`Dr3`] (3).
Dr3,
}
impl DebugAddressRegisterNumber {
/// Creates a debug address register number if it is valid.
pub const fn new(n: u8) -> Option<Self> {
match n {
0 => Some(Self::Dr0),
1 => Some(Self::Dr1),
2 => Some(Self::Dr2),
3 => Some(Self::Dr3),
_ => None,
}
}
/// Returns the number as a primitive type.
pub const fn get(self) -> u8 {
match self {
Self::Dr0 => 0,
Self::Dr1 => 1,
Self::Dr2 => 2,
Self::Dr3 => 3,
}
}
}
/// Debug Status Register (DR6).
///
/// Reports debug conditions from the last debug exception.
#[derive(Debug)]
pub struct Dr6;
bitflags! {
/// Debug condition flags of the [`Dr6`] register.
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct Dr6Flags: u64 {
/// Breakpoint condition 0 was detected.
const TRAP0 = 1;
/// Breakpoint condition 1 was detected.
const TRAP1 = 1 << 1;
/// Breakpoint condition 2 was detected.
const TRAP2 = 1 << 2;
/// Breakpoint condition 3 was detected.
const TRAP3 = 1 << 3;
/// Breakpoint condition was detected.
const TRAP = Self::TRAP0.bits() | Self::TRAP1.bits() | Self::TRAP2.bits() | Self::TRAP3.bits();
/// Next instruction accesses one of the debug registers.
///
/// Enabled via [`Dr7Flags::GENERAL_DETECT_ENABLE`].
const ACCESS_DETECTED = 1 << 13;
/// CPU is in single-step execution mode.
///
/// Enabled via [`RFlags::TRAP_FLAG`].
const STEP = 1 << 14;
/// Task switch.
///
/// Enabled via the debug trap flag in the TSS of the target task.
const SWITCH = 1 << 15;
/// When *clear*, indicates a debug or breakpoint exception inside an RTM region.
///
/// Enabled via [`Dr7Flags::RESTRICTED_TRANSACTIONAL_MEMORY`] and the
/// RTM flag in the `IA32_DEBUGCTL` [`Msr`].
const RTM = 1 << 16;
}
}
impl Dr6Flags {
/// Returns the trap flag of the provided debug address register.
pub fn trap(n: DebugAddressRegisterNumber) -> Self {
match n {
DebugAddressRegisterNumber::Dr0 => Self::TRAP0,
DebugAddressRegisterNumber::Dr1 => Self::TRAP1,
DebugAddressRegisterNumber::Dr2 => Self::TRAP2,
DebugAddressRegisterNumber::Dr3 => Self::TRAP3,
}
}
}
bitflags! {
/// Debug control flags of the [`Dr7`] register.
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct Dr7Flags: u64 {
/// Breakpoint 0 is enabled for the current task.
const LOCAL_BREAKPOINT_0_ENABLE = 1;
/// Breakpoint 1 is enabled for the current task.
const LOCAL_BREAKPOINT_1_ENABLE = 1 << 2;
/// Breakpoint 2 is enabled for the current task.
const LOCAL_BREAKPOINT_2_ENABLE = 1 << 4;
/// Breakpoint 3 is enabled for the current task.
const LOCAL_BREAKPOINT_3_ENABLE = 1 << 6;
/// Breakpoint 0 is enabled for all tasks.
const GLOBAL_BREAKPOINT_0_ENABLE = 1 << 1;
/// Breakpoint 1 is enabled for all tasks.
const GLOBAL_BREAKPOINT_1_ENABLE = 1 << 3;
/// Breakpoint 2 is enabled for all tasks.
const GLOBAL_BREAKPOINT_2_ENABLE = 1 << 5;
/// Breakpoint 3 is enabled for all tasks.
const GLOBAL_BREAKPOINT_3_ENABLE = 1 << 7;
/// Enable detection of exact instruction causing a data breakpoint condition for the current task.
///
/// This is not supported by `x86_64` processors, but is recommended to be enabled for backward and forward compatibility.
const LOCAL_EXACT_BREAKPOINT_ENABLE = 1 << 8;
/// Enable detection of exact instruction causing a data breakpoint condition for all tasks.
///
/// This is not supported by `x86_64` processors, but is recommended to be enabled for backward and forward compatibility.
const GLOBAL_EXACT_BREAKPOINT_ENABLE = 1 << 9;
/// Enables advanced debugging of RTM transactional regions.
///
/// The RTM flag in the `IA32_DEBUGCTL` [`Msr`] must also be set.
const RESTRICTED_TRANSACTIONAL_MEMORY = 1 << 11;
/// Enables debug register protection.
///
/// This will cause a debug exception before any access to a debug register.
const GENERAL_DETECT_ENABLE = 1 << 13;
}
}
impl Dr7Flags {
/// Returns the local breakpoint enable flag of the provided debug address register.
pub fn local_breakpoint_enable(n: DebugAddressRegisterNumber) -> Self {
match n {
DebugAddressRegisterNumber::Dr0 => Self::LOCAL_BREAKPOINT_0_ENABLE,
DebugAddressRegisterNumber::Dr1 => Self::LOCAL_BREAKPOINT_1_ENABLE,
DebugAddressRegisterNumber::Dr2 => Self::LOCAL_BREAKPOINT_2_ENABLE,
DebugAddressRegisterNumber::Dr3 => Self::LOCAL_BREAKPOINT_3_ENABLE,
}
}
/// Returns the global breakpoint enable flag of the provided debug address register.
pub fn global_breakpoint_enable(n: DebugAddressRegisterNumber) -> Self {
match n {
DebugAddressRegisterNumber::Dr0 => Self::GLOBAL_BREAKPOINT_0_ENABLE,
DebugAddressRegisterNumber::Dr1 => Self::GLOBAL_BREAKPOINT_1_ENABLE,
DebugAddressRegisterNumber::Dr2 => Self::GLOBAL_BREAKPOINT_2_ENABLE,
DebugAddressRegisterNumber::Dr3 => Self::GLOBAL_BREAKPOINT_3_ENABLE,
}
}
}
/// The condition for a hardware breakpoint.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum BreakpointCondition {
/// Instruction execution
InstructionExecution = 0b00,
/// Data writes
DataWrites = 0b01,
/// I/O reads or writes
IoReadsWrites = 0b10,
/// Data reads or writes but not instruction fetches
DataReadsWrites = 0b11,
}
impl BreakpointCondition {
/// Creates a new hardware breakpoint condition if `bits` is valid.
pub const fn from_bits(bits: u64) -> Option<Self> {
match bits {
0b00 => Some(Self::InstructionExecution),
0b01 => Some(Self::DataWrites),
0b10 => Some(Self::IoReadsWrites),
0b11 => Some(Self::DataReadsWrites),
_ => None,
}
}
const fn bit_range(n: DebugAddressRegisterNumber) -> Range<usize> {
let lsb = (16 + 4 * n.get()) as usize;
lsb..lsb + 2
}
}
/// The size of a hardware breakpoint.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum BreakpointSize {
/// 1 byte length
Length1B = 0b00,
/// 2 byte length
Length2B = 0b01,
/// 8 byte length
Length8B = 0b10,
/// 4 byte length
Length4B = 0b11,
}
impl BreakpointSize {
/// Creates a new hardware breakpoint size if `size` is valid.
pub const fn new(size: usize) -> Option<Self> {
match size {
1 => Some(Self::Length1B),
2 => Some(Self::Length2B),
8 => Some(Self::Length8B),
4 => Some(Self::Length4B),
_ => None,
}
}
/// Creates a new hardware breakpoint size if `bits` is valid.
pub const fn from_bits(bits: u64) -> Option<Self> {
match bits {
0b00 => Some(Self::Length1B),
0b01 => Some(Self::Length2B),
0b10 => Some(Self::Length8B),
0b11 => Some(Self::Length4B),
_ => None,
}
}
const fn bit_range(n: DebugAddressRegisterNumber) -> Range<usize> {
let lsb = (18 + 4 * n.get()) as usize;
lsb..lsb + 2
}
}
/// A valid value of the [`Dr7`] debug register.
///
/// In addition to the [`Dr7Flags`] this value has a condition field and a size field for each debug address register.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(transparent)]
pub struct Dr7Value {
bits: u64,
}
impl From<Dr7Flags> for Dr7Value {
fn from(dr7_flags: Dr7Flags) -> Self {
Self::from_bits_truncate(dr7_flags.bits())
}
}
impl Dr7Value {
const fn valid_bits() -> u64 {
let field_valid_bits = (1 << 32) - (1 << 16);
let flag_valid_bits = Dr7Flags::all().bits();
field_valid_bits | flag_valid_bits
}
/// Convert from underlying bit representation, unless that representation contains bits that do not correspond to a field.
#[inline]
pub const fn from_bits(bits: u64) -> Option<Self> {
if (bits & !Self::valid_bits()) == 0 {
Some(Self { bits })
} else {
None
}
}
/// Convert from underlying bit representation, dropping any bits that do not correspond to fields.
#[inline]
pub const fn from_bits_truncate(bits: u64) -> Self {
Self {
bits: bits & Self::valid_bits(),
}
}
/// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined field).
///
/// # Safety
///
/// The bit representation must be a valid [`Dr7Value`].
#[inline]
pub const unsafe fn from_bits_unchecked(bits: u64) -> Self {
Self { bits }
}
/// Returns the raw value of the fields currently stored.
#[inline]
pub const fn bits(&self) -> u64 {
self.bits
}
/// Returns the [`Dr7Flags`] in this value.
#[inline]
pub const fn flags(self) -> Dr7Flags {
Dr7Flags::from_bits_truncate(self.bits)
}
/// Inserts the specified [`Dr7Flags`] in-place.
#[inline]
pub fn insert_flags(&mut self, flags: Dr7Flags) {
self.bits |= flags.bits();
}
/// Removes the specified [`Dr7Flags`] in-place.
#[inline]
pub fn remove_flags(&mut self, flags: Dr7Flags) {
self.bits &= !flags.bits();
}
/// Toggles the specified [`Dr7Flags`] in-place.
#[inline]
pub fn toggle_flags(&mut self, flags: Dr7Flags) {
self.bits ^= flags.bits();
}
/// Inserts or removes the specified [`Dr7Flags`] depending on the passed value.
#[inline]
pub fn set_flags(&mut self, flags: Dr7Flags, value: bool) {
if value {
self.insert_flags(flags);
} else {
self.remove_flags(flags);
}
}
/// Returns the condition field of a debug address register.
pub fn condition(&self, n: DebugAddressRegisterNumber) -> BreakpointCondition {
let condition = self.bits.get_bits(BreakpointCondition::bit_range(n));
BreakpointCondition::from_bits(condition).expect("condition should be always valid")
}
/// Sets the condition field of a debug address register.
pub fn set_condition(&mut self, n: DebugAddressRegisterNumber, condition: BreakpointCondition) {
self.bits
.set_bits(BreakpointCondition::bit_range(n), condition as u64);
}
/// Returns the size field of a debug address register.
pub fn size(&self, n: DebugAddressRegisterNumber) -> BreakpointSize {
let size = self.bits.get_bits(BreakpointSize::bit_range(n));
BreakpointSize::from_bits(size).expect("condition should be always valid")
}
/// Sets the size field of a debug address register.
pub fn set_size(&mut self, n: DebugAddressRegisterNumber, size: BreakpointSize) {
self.bits
.set_bits(BreakpointSize::bit_range(n), size as u64);
}
}
/// Debug Control Register (DR7).
///
/// Configures debug conditions for debug exceptions.
#[derive(Debug)]
pub struct Dr7;
#[cfg(feature = "instructions")]
mod x86_64 {
use super::*;
impl Dr6 {
/// Read the current set of DR6 flags.
#[inline]
pub fn read() -> Dr6Flags {
Dr6Flags::from_bits_truncate(Self::read_raw())
}
/// Read the current raw DR6 value.
#[inline]
pub fn read_raw() -> u64 {
let value;
unsafe {
asm!("mov {}, dr6", out(reg) value, options(nomem, nostack, preserves_flags));
}
value
}
}
impl Dr7 {
/// Read the current set of DR7 flags.
#[inline]
pub fn read() -> Dr7Value {
Dr7Value::from_bits_truncate(Self::read_raw())
}
/// Read the current raw DR7 value.
#[inline]
pub fn read_raw() -> u64 {
let value;
unsafe {
asm!("mov {}, dr7", out(reg) value, options(nomem, nostack, preserves_flags));
}
value
}
/// Write DR7 value.
///
/// Preserves the value of reserved fields.
#[inline]
pub fn write(value: Dr7Value) {
let old_value = Self::read_raw();
let reserved = old_value & !Dr7Value::valid_bits();
let new_value = reserved | value.bits();
Self::write_raw(new_value)
}
/// Write raw DR7 value.
#[inline]
pub fn write_raw(value: u64) {
unsafe {
asm!("mov dr7, {}", in(reg) value, options(nomem, nostack, preserves_flags));
}
}
}
}

View File

@ -0,0 +1,12 @@
//! Access to various system and model specific registers.
pub mod control;
pub mod debug;
pub mod model_specific;
pub mod mxcsr;
pub mod rflags;
pub mod segmentation;
pub mod xcontrol;
#[cfg(feature = "instructions")]
pub use crate::instructions::read_rip;

View File

@ -0,0 +1,595 @@
//! Functions to read and write model specific registers.
use bitflags::bitflags;
// imports for intra doc links
#[cfg(doc)]
use crate::registers::segmentation::{FS, GS};
/// A model specific register.
#[derive(Debug)]
pub struct Msr(u32);
impl Msr {
/// Create an instance from a register.
#[inline]
pub const fn new(reg: u32) -> Msr {
Msr(reg)
}
}
/// The Extended Feature Enable Register.
#[derive(Debug)]
pub struct Efer;
/// [FS].Base Model Specific Register.
#[derive(Debug)]
pub struct FsBase;
/// [GS].Base Model Specific Register.
///
#[cfg_attr(
feature = "instructions",
doc = "[`GS::swap`] swaps this register with [`KernelGsBase`]."
)]
#[derive(Debug)]
pub struct GsBase;
/// KernelGsBase Model Specific Register.
///
#[cfg_attr(
feature = "instructions",
doc = "[`GS::swap`] swaps this register with [`GsBase`]."
)]
#[derive(Debug)]
pub struct KernelGsBase;
/// Syscall Register: STAR
#[derive(Debug)]
pub struct Star;
/// Syscall Register: LSTAR
#[derive(Debug)]
pub struct LStar;
/// Syscall Register: SFMASK
#[derive(Debug)]
pub struct SFMask;
/// IA32_U_CET: user mode CET configuration
#[derive(Debug)]
pub struct UCet;
/// IA32_S_CET: supervisor mode CET configuration
#[derive(Debug)]
pub struct SCet;
impl Efer {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0xC000_0080);
}
impl FsBase {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0xC000_0100);
}
impl GsBase {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0xC000_0101);
}
impl KernelGsBase {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0xC000_0102);
}
impl Star {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0xC000_0081);
}
impl LStar {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0xC000_0082);
}
impl SFMask {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0xC000_0084);
}
impl UCet {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0x6A0);
}
impl SCet {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0x6A2);
}
bitflags! {
/// Flags of the Extended Feature Enable Register.
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct EferFlags: u64 {
/// Enables the `syscall` and `sysret` instructions.
const SYSTEM_CALL_EXTENSIONS = 1;
/// Activates long mode, requires activating paging.
const LONG_MODE_ENABLE = 1 << 8;
/// Indicates that long mode is active.
const LONG_MODE_ACTIVE = 1 << 10;
/// Enables the no-execute page-protection feature.
const NO_EXECUTE_ENABLE = 1 << 11;
/// Enables SVM extensions.
const SECURE_VIRTUAL_MACHINE_ENABLE = 1 << 12;
/// Enable certain limit checks in 64-bit mode.
const LONG_MODE_SEGMENT_LIMIT_ENABLE = 1 << 13;
/// Enable the `fxsave` and `fxrstor` instructions to execute faster in 64-bit mode.
const FAST_FXSAVE_FXRSTOR = 1 << 14;
/// Changes how the `invlpg` instruction operates on TLB entries of upper-level entries.
const TRANSLATION_CACHE_EXTENSION = 1 << 15;
}
}
bitflags! {
/// Flags stored in IA32_U_CET and IA32_S_CET (Table-2-2 in Intel SDM Volume
/// 4). The Intel SDM-equivalent names are described in parentheses.
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct CetFlags: u64 {
/// Enable shadow stack (SH_STK_EN)
const SS_ENABLE = 1 << 0;
/// Enable WRSS{D,Q}W instructions (WR_SHTK_EN)
const SS_WRITE_ENABLE = 1 << 1;
/// Enable indirect branch tracking (ENDBR_EN)
const IBT_ENABLE = 1 << 2;
/// Enable legacy treatment for indirect branch tracking (LEG_IW_EN)
const IBT_LEGACY_ENABLE = 1 << 3;
/// Enable no-track opcode prefix for indirect branch tracking (NO_TRACK_EN)
const IBT_NO_TRACK_ENABLE = 1 << 4;
/// Disable suppression of CET on legacy compatibility (SUPPRESS_DIS)
const IBT_LEGACY_SUPPRESS_ENABLE = 1 << 5;
/// Enable suppression of indirect branch tracking (SUPPRESS)
const IBT_SUPPRESS_ENABLE = 1 << 10;
/// Is IBT waiting for a branch to return? (read-only, TRACKER)
const IBT_TRACKED = 1 << 11;
}
}
#[cfg(feature = "instructions")]
mod x86_64 {
use super::*;
use crate::addr::VirtAddr;
use crate::registers::rflags::RFlags;
use crate::structures::gdt::SegmentSelector;
use crate::structures::paging::Page;
use crate::structures::paging::Size4KiB;
use crate::PrivilegeLevel;
use bit_field::BitField;
use core::convert::TryInto;
use core::fmt;
// imports for intra doc links
#[cfg(doc)]
use crate::registers::{
control::Cr4Flags,
segmentation::{Segment, Segment64, CS, SS},
};
use core::arch::asm;
impl Msr {
/// Read 64 bits msr register.
///
/// ## Safety
///
/// The caller must ensure that this read operation has no unsafe side
/// effects.
#[inline]
pub unsafe fn read(&self) -> u64 {
let (high, low): (u32, u32);
unsafe {
asm!(
"rdmsr",
in("ecx") self.0,
out("eax") low, out("edx") high,
options(nomem, nostack, preserves_flags),
);
}
((high as u64) << 32) | (low as u64)
}
/// Write 64 bits to msr register.
///
/// ## Safety
///
/// The caller must ensure that this write operation has no unsafe side
/// effects.
#[inline]
pub unsafe fn write(&mut self, value: u64) {
let low = value as u32;
let high = (value >> 32) as u32;
unsafe {
asm!(
"wrmsr",
in("ecx") self.0,
in("eax") low, in("edx") high,
options(nostack, preserves_flags),
);
}
}
}
impl Efer {
/// Read the current EFER flags.
#[inline]
pub fn read() -> EferFlags {
EferFlags::from_bits_truncate(Self::read_raw())
}
/// Read the current raw EFER flags.
#[inline]
pub fn read_raw() -> u64 {
unsafe { Self::MSR.read() }
}
/// Write the EFER flags, preserving reserved values.
///
/// Preserves the value of reserved fields.
///
/// ## Safety
///
/// Unsafe because it's possible to break memory
/// safety with wrong flags, e.g. by disabling long mode.
#[inline]
pub unsafe fn write(flags: EferFlags) {
let old_value = Self::read_raw();
let reserved = old_value & !(EferFlags::all().bits());
let new_value = reserved | flags.bits();
unsafe {
Self::write_raw(new_value);
}
}
/// Write the EFER flags.
///
/// Does not preserve any bits, including reserved fields.
///
/// ## Safety
///
/// Unsafe because it's possible to
/// break memory safety with wrong flags, e.g. by disabling long mode.
#[inline]
pub unsafe fn write_raw(flags: u64) {
let mut msr = Self::MSR;
unsafe {
msr.write(flags);
}
}
/// Update EFER flags.
///
/// Preserves the value of reserved fields.
///
/// ## Safety
///
/// Unsafe because it's possible to break memory
/// safety with wrong flags, e.g. by disabling long mode.
#[inline]
pub unsafe fn update<F>(f: F)
where
F: FnOnce(&mut EferFlags),
{
let mut flags = Self::read();
f(&mut flags);
unsafe {
Self::write(flags);
}
}
}
impl FsBase {
/// Read the current FsBase register.
///
/// If [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is set, the more efficient
/// [`FS::read_base`] can be used instead.
#[inline]
pub fn read() -> VirtAddr {
VirtAddr::new(unsafe { Self::MSR.read() })
}
/// Write a given virtual address to the FS.Base register.
///
/// If [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is set, the more efficient
/// [`FS::write_base`] can be used instead.
#[inline]
pub fn write(address: VirtAddr) {
let mut msr = Self::MSR;
unsafe { msr.write(address.as_u64()) };
}
}
impl GsBase {
/// Read the current GsBase register.
///
/// If [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is set, the more efficient
/// [`GS::read_base`] can be used instead.
#[inline]
pub fn read() -> VirtAddr {
VirtAddr::new(unsafe { Self::MSR.read() })
}
/// Write a given virtual address to the GS.Base register.
///
/// If [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is set, the more efficient
/// [`GS::write_base`] can be used instead.
#[inline]
pub fn write(address: VirtAddr) {
let mut msr = Self::MSR;
unsafe { msr.write(address.as_u64()) };
}
}
impl KernelGsBase {
/// Read the current KernelGsBase register.
#[inline]
pub fn read() -> VirtAddr {
VirtAddr::new(unsafe { Self::MSR.read() })
}
/// Write a given virtual address to the KernelGsBase register.
#[inline]
pub fn write(address: VirtAddr) {
let mut msr = Self::MSR;
unsafe { msr.write(address.as_u64()) };
}
}
impl Star {
/// Read the Ring 0 and Ring 3 segment bases.
/// The remaining fields are ignored because they are
/// not valid for long mode.
///
/// # Returns
/// - Field 1 (SYSRET): The CS selector is set to this field + 16. SS.Sel is set to
/// this field + 8. Because SYSRET always returns to CPL 3, the
/// RPL bits 1:0 should be initialized to 11b.
/// - Field 2 (SYSCALL): This field is copied directly into CS.Sel. SS.Sel is set to
/// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits
/// 33:32 should be initialized to 00b.
#[inline]
pub fn read_raw() -> (u16, u16) {
let msr_value = unsafe { Self::MSR.read() };
let sysret = msr_value.get_bits(48..64);
let syscall = msr_value.get_bits(32..48);
(sysret.try_into().unwrap(), syscall.try_into().unwrap())
}
/// Read the Ring 0 and Ring 3 segment bases.
/// Returns
/// - CS Selector SYSRET
/// - SS Selector SYSRET
/// - CS Selector SYSCALL
/// - SS Selector SYSCALL
#[inline]
pub fn read() -> (
SegmentSelector,
SegmentSelector,
SegmentSelector,
SegmentSelector,
) {
let raw = Self::read_raw();
(
SegmentSelector(raw.0 + 16),
SegmentSelector(raw.0 + 8),
SegmentSelector(raw.1),
SegmentSelector(raw.1 + 8),
)
}
/// Write the Ring 0 and Ring 3 segment bases.
/// The remaining fields are ignored because they are
/// not valid for long mode.
///
/// # Parameters
/// - sysret: The CS selector is set to this field + 16. SS.Sel is set to
/// this field + 8. Because SYSRET always returns to CPL 3, the
/// RPL bits 1:0 should be initialized to 11b.
/// - syscall: This field is copied directly into CS.Sel. SS.Sel is set to
/// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits
/// 33:32 should be initialized to 00b.
///
/// # Safety
///
/// Unsafe because this can cause system instability if passed in the
/// wrong values for the fields.
#[inline]
pub unsafe fn write_raw(sysret: u16, syscall: u16) {
let mut msr_value = 0u64;
msr_value.set_bits(48..64, sysret.into());
msr_value.set_bits(32..48, syscall.into());
let mut msr = Self::MSR;
unsafe {
msr.write(msr_value);
}
}
/// Write the Ring 0 and Ring 3 segment bases.
/// The remaining fields are ignored because they are
/// not valid for long mode.
/// This function will fail if the segment selectors are
/// not in the correct offset of each other or if the
/// segment selectors do not have correct privileges.
#[inline]
pub fn write(
cs_sysret: SegmentSelector,
ss_sysret: SegmentSelector,
cs_syscall: SegmentSelector,
ss_syscall: SegmentSelector,
) -> Result<(), InvalidStarSegmentSelectors> {
// Convert to i32 to prevent underflows.
let cs_sysret_cmp = i32::from(cs_sysret.0) - 16;
let ss_sysret_cmp = i32::from(ss_sysret.0) - 8;
let cs_syscall_cmp = i32::from(cs_syscall.0);
let ss_syscall_cmp = i32::from(ss_syscall.0) - 8;
if cs_sysret_cmp != ss_sysret_cmp {
return Err(InvalidStarSegmentSelectors::SysretOffset);
}
if cs_syscall_cmp != ss_syscall_cmp {
return Err(InvalidStarSegmentSelectors::SyscallOffset);
}
if ss_sysret.rpl() != PrivilegeLevel::Ring3 {
return Err(InvalidStarSegmentSelectors::SysretPrivilegeLevel);
}
if ss_syscall.rpl() != PrivilegeLevel::Ring0 {
return Err(InvalidStarSegmentSelectors::SyscallPrivilegeLevel);
}
unsafe { Self::write_raw(ss_sysret.0 - 8, cs_syscall.0) };
Ok(())
}
}
#[derive(Debug)]
pub enum InvalidStarSegmentSelectors {
SysretOffset,
SyscallOffset,
SysretPrivilegeLevel,
SyscallPrivilegeLevel,
}
impl fmt::Display for InvalidStarSegmentSelectors {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::SysretOffset => write!(f, "Sysret CS and SS are not offset by 8."),
Self::SyscallOffset => write!(f, "Syscall CS and SS are not offset by 8."),
Self::SysretPrivilegeLevel => {
write!(f, "Sysret's segment must be a Ring3 segment.")
}
Self::SyscallPrivilegeLevel => {
write!(f, "Syscall's segment must be a Ring0 segment.")
}
}
}
}
impl LStar {
/// Read the current LStar register.
/// This holds the target RIP of a syscall.
#[inline]
pub fn read() -> VirtAddr {
VirtAddr::new(unsafe { Self::MSR.read() })
}
/// Write a given virtual address to the LStar register.
/// This holds the target RIP of a syscall.
#[inline]
pub fn write(address: VirtAddr) {
let mut msr = Self::MSR;
unsafe { msr.write(address.as_u64()) };
}
}
impl SFMask {
/// Read to the SFMask register.
/// The SFMASK register is used to specify which RFLAGS bits
/// are cleared during a SYSCALL. In long mode, SFMASK is used
/// to specify which RFLAGS bits are cleared when SYSCALL is
/// executed. If a bit in SFMASK is set to 1, the corresponding
/// bit in RFLAGS is cleared to 0. If a bit in SFMASK is cleared
/// to 0, the corresponding rFLAGS bit is not modified.
#[inline]
pub fn read() -> RFlags {
RFlags::from_bits(unsafe { Self::MSR.read() }).unwrap()
}
/// Write to the SFMask register.
/// The SFMASK register is used to specify which RFLAGS bits
/// are cleared during a SYSCALL. In long mode, SFMASK is used
/// to specify which RFLAGS bits are cleared when SYSCALL is
/// executed. If a bit in SFMASK is set to 1, the corresponding
/// bit in RFLAGS is cleared to 0. If a bit in SFMASK is cleared
/// to 0, the corresponding rFLAGS bit is not modified.
#[inline]
pub fn write(value: RFlags) {
let mut msr = Self::MSR;
unsafe { msr.write(value.bits()) };
}
}
impl UCet {
/// Read the raw IA32_U_CET.
#[inline]
fn read_raw() -> u64 {
unsafe { Self::MSR.read() }
}
/// Write the raw IA32_U_CET.
#[inline]
fn write_raw(value: u64) {
let mut msr = Self::MSR;
unsafe {
msr.write(value);
}
}
/// Read IA32_U_CET. Returns a tuple of the flags and the address to the legacy code page bitmap.
#[inline]
pub fn read() -> (CetFlags, Page) {
let value = Self::read_raw();
let cet_flags = CetFlags::from_bits_truncate(value);
let legacy_bitmap =
Page::from_start_address(VirtAddr::new(value & !(Page::<Size4KiB>::SIZE - 1)))
.unwrap();
(cet_flags, legacy_bitmap)
}
/// Write IA32_U_CET.
#[inline]
pub fn write(flags: CetFlags, legacy_bitmap: Page) {
Self::write_raw(flags.bits() | legacy_bitmap.start_address().as_u64());
}
}
impl SCet {
/// Read the raw IA32_S_CET.
#[inline]
fn read_raw() -> u64 {
unsafe { Self::MSR.read() }
}
/// Write the raw IA32_S_CET.
#[inline]
fn write_raw(value: u64) {
let mut msr = Self::MSR;
unsafe {
msr.write(value);
}
}
/// Read IA32_S_CET. Returns a tuple of the flags and the address to the legacy code page bitmap.
#[inline]
pub fn read() -> (CetFlags, Page) {
let value = Self::read_raw();
let cet_flags = CetFlags::from_bits_truncate(value);
let legacy_bitmap =
Page::from_start_address(VirtAddr::new(value & !(Page::<Size4KiB>::SIZE - 1)))
.unwrap();
(cet_flags, legacy_bitmap)
}
/// Write IA32_S_CET.
#[inline]
pub fn write(flags: CetFlags, legacy_bitmap: Page) {
Self::write_raw(flags.bits() | legacy_bitmap.start_address().as_u64());
}
}
}

View File

@ -0,0 +1,109 @@
//! Functions to read and write MXCSR register.
#[cfg(feature = "instructions")]
pub use self::x86_64::*;
use bitflags::bitflags;
bitflags! {
/// MXCSR register.
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct MxCsr: u32 {
/// Invalid operation
const INVALID_OPERATION = 1 << 0;
/// Denormal
const DENORMAL = 1 << 1;
/// Divide-by-zero
const DIVIDE_BY_ZERO = 1 << 2;
/// Overflow
const OVERFLOW = 1 << 3;
/// Underflow
const UNDERFLOW = 1 << 4;
/// Precision
const PRECISION = 1 << 5;
/// Denormals are zeros
const DENORMALS_ARE_ZEROS = 1 << 6;
/// Invalid operation mask
const INVALID_OPERATION_MASK = 1 << 7;
/// Denormal mask
const DENORMAL_MASK = 1 << 8;
/// Divide-by-zero mask
const DIVIDE_BY_ZERO_MASK = 1 << 9;
/// Overflow mask
const OVERFLOW_MASK = 1 << 10;
/// Underflow mask
const UNDERFLOW_MASK = 1 << 11;
/// Precision mask
const PRECISION_MASK = 1 << 12;
/// Toward negative infinity
const ROUNDING_CONTROL_NEGATIVE = 1 << 13;
/// Toward positive infinity
const ROUNDING_CONTROL_POSITIVE = 1 << 14;
/// Toward zero (positive + negative)
const ROUNDING_CONTROL_ZERO = 3 << 13;
/// Flush to zero
const FLUSH_TO_ZERO = 1 << 15;
}
}
impl Default for MxCsr {
/// Return the default MXCSR value at reset, as documented in Intel SDM volume 2A.
#[inline]
fn default() -> Self {
MxCsr::INVALID_OPERATION_MASK
| MxCsr::DENORMAL_MASK
| MxCsr::DIVIDE_BY_ZERO_MASK
| MxCsr::OVERFLOW_MASK
| MxCsr::UNDERFLOW_MASK
| MxCsr::PRECISION_MASK
}
}
#[cfg(feature = "instructions")]
mod x86_64 {
use super::*;
use core::arch::asm;
/// Read the value of MXCSR.
#[inline]
pub fn read() -> MxCsr {
let mut mxcsr: u32 = 0;
unsafe {
asm!("stmxcsr [{}]", in(reg) &mut mxcsr, options(nostack, preserves_flags));
}
MxCsr::from_bits_truncate(mxcsr)
}
/// Write MXCSR.
#[inline]
pub fn write(mxcsr: MxCsr) {
unsafe {
asm!("ldmxcsr [{}]", in(reg) &mxcsr, options(nostack, readonly));
}
}
#[cfg(test)]
mod test {
use crate::registers::mxcsr::*;
#[test]
fn mxcsr_default() {
let mxcsr = read();
assert_eq!(mxcsr, MxCsr::from_bits_truncate(0x1F80));
}
#[test]
fn mxcsr_read() {
let mxcsr = read();
assert_eq!(mxcsr, MxCsr::default());
}
#[test]
fn mxcsr_write() {
let mxcsr = read();
write(mxcsr);
assert_eq!(mxcsr, read());
}
}
}

View File

@ -0,0 +1,139 @@
//! Processor state stored in the RFLAGS register.
#[cfg(feature = "instructions")]
pub use self::x86_64::*;
use bitflags::bitflags;
bitflags! {
/// The RFLAGS register. All bit patterns are valid representations for this type.
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct RFlags: u64 {
/// Processor feature identification flag.
///
/// If this flag is modifiable, the CPU supports CPUID.
const ID = 1 << 21;
/// Indicates that an external, maskable interrupt is pending.
///
/// Used when virtual-8086 mode extensions (CR4.VME) or protected-mode virtual
/// interrupts (CR4.PVI) are activated.
const VIRTUAL_INTERRUPT_PENDING = 1 << 20;
/// Virtual image of the INTERRUPT_FLAG bit.
///
/// Used when virtual-8086 mode extensions (CR4.VME) or protected-mode virtual
/// interrupts (CR4.PVI) are activated.
const VIRTUAL_INTERRUPT = 1 << 19;
/// Enable automatic alignment checking if CR0.AM is set. Only works if CPL is 3.
const ALIGNMENT_CHECK = 1 << 18;
/// Enable the virtual-8086 mode.
const VIRTUAL_8086_MODE = 1 << 17;
/// Allows to restart an instruction following an instruction breakpoint.
const RESUME_FLAG = 1 << 16;
/// Used by `iret` in hardware task switch mode to determine if current task is nested.
const NESTED_TASK = 1 << 14;
/// The high bit of the I/O Privilege Level field.
///
/// Specifies the privilege level required for executing I/O address-space instructions.
const IOPL_HIGH = 1 << 13;
/// The low bit of the I/O Privilege Level field.
///
/// Specifies the privilege level required for executing I/O address-space instructions.
const IOPL_LOW = 1 << 12;
/// Set by hardware to indicate that the sign bit of the result of the last signed integer
/// operation differs from the source operands.
const OVERFLOW_FLAG = 1 << 11;
/// Determines the order in which strings are processed.
const DIRECTION_FLAG = 1 << 10;
/// Enable interrupts.
const INTERRUPT_FLAG = 1 << 9;
/// Enable single-step mode for debugging.
const TRAP_FLAG = 1 << 8;
/// Set by hardware if last arithmetic operation resulted in a negative value.
const SIGN_FLAG = 1 << 7;
/// Set by hardware if last arithmetic operation resulted in a zero value.
const ZERO_FLAG = 1 << 6;
/// Set by hardware if last arithmetic operation generated a carry ouf of bit 3 of the
/// result.
const AUXILIARY_CARRY_FLAG = 1 << 4;
/// Set by hardware if last result has an even number of 1 bits (only for some operations).
const PARITY_FLAG = 1 << 2;
/// Set by hardware if last arithmetic operation generated a carry out of the
/// most-significant bit of the result.
const CARRY_FLAG = 1;
}
}
#[cfg(feature = "instructions")]
mod x86_64 {
use super::*;
use core::arch::asm;
/// Returns the current value of the RFLAGS register.
///
/// Drops any unknown bits.
#[inline]
pub fn read() -> RFlags {
RFlags::from_bits_truncate(read_raw())
}
/// Returns the raw current value of the RFLAGS register.
#[inline]
pub fn read_raw() -> u64 {
let r: u64;
unsafe {
asm!("pushfq; pop {}", out(reg) r, options(nomem, preserves_flags));
}
r
}
/// Writes the RFLAGS register, preserves reserved bits.
///
/// ## Safety
///
/// Unsafe because undefined becavior can occur if certain flags are modified. For example,
/// the `DF` flag must be unset in all Rust code. Also, modifying `CF`, `PF`, or any other
/// flags also used by Rust/LLVM can result in undefined behavior too.
#[inline]
pub unsafe fn write(flags: RFlags) {
let old_value = read_raw();
let reserved = old_value & !(RFlags::all().bits());
let new_value = reserved | flags.bits();
unsafe {
write_raw(new_value);
}
}
/// Writes the RFLAGS register.
///
/// Does not preserve any bits, including reserved bits.
///
///
/// ## Safety
///
/// Unsafe because undefined becavior can occur if certain flags are modified. For example,
/// the `DF` flag must be unset in all Rust code. Also, modifying `CF`, `PF`, or any other
/// flags also used by Rust/LLVM can result in undefined behavior too.
#[inline]
pub unsafe fn write_raw(val: u64) {
// HACK: we mark this function as preserves_flags to prevent Rust from restoring
// saved flags after the "popf" below. See above note on safety.
unsafe {
asm!("push {}; popfq", in(reg) val, options(nomem, preserves_flags));
}
}
#[cfg(test)]
mod test {
use crate::registers::rflags::read;
#[test]
fn rflags_read() {
let rflags = read();
println!("{:#?}", rflags);
}
}
}

View File

@ -0,0 +1,170 @@
//! Abstractions for segment registers.
use super::model_specific::Msr;
use crate::{PrivilegeLevel, VirtAddr};
use bit_field::BitField;
use core::fmt;
// imports for intra doc links
#[cfg(doc)]
use crate::{
registers::control::Cr4Flags,
structures::gdt::{Descriptor, DescriptorFlags, GlobalDescriptorTable},
};
/// An x86 segment
///
/// Segment registers on x86 are 16-bit [`SegmentSelector`]s, which index into
/// the [`GlobalDescriptorTable`]. The corresponding GDT entry is used to
/// configure the segment itself. Note that most segmentation functionality is
/// disabled in 64-bit mode. See the individual segments for more information.
pub trait Segment {
/// Returns the current value of the segment register.
fn get_reg() -> SegmentSelector;
/// Reload the segment register. Depending on the segment, this may also
/// reconfigure the corresponding segment.
///
/// ## Safety
///
/// This function is unsafe because the caller must ensure that `sel`
/// is a valid segment descriptor, and that reconfiguring the segment will
/// not cause undefined behavior.
unsafe fn set_reg(sel: SegmentSelector);
}
/// An x86 segment which is actually used in 64-bit mode
///
/// While most segments are unused in 64-bit mode, the FS and GS segments are
/// still partially used. Only the 64-bit segment base address is used, and this
/// address can be set via the GDT, or by using the `FSGSBASE` instructions.
pub trait Segment64: Segment {
/// MSR containing the segment base. This MSR can be used to set the base
/// when [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is **not** set.
const BASE: Msr;
/// Reads the segment base address
///
/// ## Exceptions
///
/// If [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is not set, this instruction will throw a `#UD`.
fn read_base() -> VirtAddr;
/// Writes the segment base address
///
/// ## Exceptions
///
/// If [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is not set, this instruction will throw a `#UD`.
///
/// ## Safety
///
/// The caller must ensure that this write operation has no unsafe side
/// effects, as the segment base address might be in use.
unsafe fn write_base(base: VirtAddr);
}
/// Specifies which element to load into a segment from
/// descriptor tables (i.e., is a index to LDT or GDT table
/// with some additional flags).
///
/// See Intel 3a, Section 3.4.2 "Segment Selectors"
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct SegmentSelector(pub u16);
impl SegmentSelector {
/// Creates a new SegmentSelector
///
/// # Arguments
/// * `index`: index in GDT or LDT array (not the offset)
/// * `rpl`: the requested privilege level
#[inline]
pub const fn new(index: u16, rpl: PrivilegeLevel) -> SegmentSelector {
SegmentSelector(index << 3 | (rpl as u16))
}
/// Can be used as a selector into a non-existent segment and assigned to segment registers,
/// e.g. data segment register in ring 0
pub const NULL: Self = Self::new(0, PrivilegeLevel::Ring0);
/// Returns the GDT index.
#[inline]
pub fn index(self) -> u16 {
self.0 >> 3
}
/// Returns the requested privilege level.
#[inline]
pub fn rpl(self) -> PrivilegeLevel {
PrivilegeLevel::from_u16(self.0.get_bits(0..2))
}
/// Set the privilege level for this Segment selector.
#[inline]
pub fn set_rpl(&mut self, rpl: PrivilegeLevel) {
self.0.set_bits(0..2, rpl as u16);
}
}
impl fmt::Debug for SegmentSelector {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut s = f.debug_struct("SegmentSelector");
s.field("index", &self.index());
s.field("rpl", &self.rpl());
s.finish()
}
}
/// Code Segment
///
/// While most fields in the Code-Segment [`Descriptor`] are unused in 64-bit
/// long mode, some of them must be set to a specific value. The
/// [`EXECUTABLE`](DescriptorFlags::EXECUTABLE),
/// [`USER_SEGMENT`](DescriptorFlags::USER_SEGMENT), and
/// [`LONG_MODE`](DescriptorFlags::LONG_MODE) bits must be set, while the
/// [`DEFAULT_SIZE`](DescriptorFlags::DEFAULT_SIZE) bit must be unset.
///
/// The [`DPL_RING_3`](DescriptorFlags::DPL_RING_3) field can be used to change
/// privilege level. The [`PRESENT`](DescriptorFlags::PRESENT) bit can be used
/// to make a segment present or not present.
///
/// All other fields (like the segment base and limit) are ignored by the
/// processor and setting them has no effect.
#[derive(Debug)]
pub struct CS;
/// Stack Segment
///
/// Entirely unused in 64-bit mode; setting the segment register does nothing.
/// However, in ring 3, the SS register still has to point to a valid
/// [`Descriptor`] (it cannot be zero). This
/// means a user-mode read/write segment descriptor must be present in the GDT.
///
/// This register is also set by the `syscall`/`sysret` and
/// `sysenter`/`sysexit` instructions (even on 64-bit transitions). This is to
/// maintain symmetry with 32-bit transitions where setting SS actually will
/// actually have an effect.
#[derive(Debug)]
pub struct SS;
/// Data Segment
///
/// Entirely unused in 64-bit mode; setting the segment register does nothing.
#[derive(Debug)]
pub struct DS;
/// ES Segment
///
/// Entirely unused in 64-bit mode; setting the segment register does nothing.
#[derive(Debug)]
pub struct ES;
/// FS Segment
///
/// Only base is used in 64-bit mode, see [`Segment64`]. This is often used in
/// user-mode for Thread-Local Storage (TLS).
#[derive(Debug)]
pub struct FS;
/// GS Segment
///
/// Only base is used in 64-bit mode, see [`Segment64`]. In kernel-mode, the GS
/// base often points to a per-cpu kernel data structure.
#[derive(Debug)]
pub struct GS;

View File

@ -0,0 +1,149 @@
//! Access to various extended system registers
use bitflags::bitflags;
/// Extended feature enable mask register
#[derive(Debug)]
pub struct XCr0;
bitflags! {
/// Configuration flags of the XCr0 register.
///
/// For MPX, [`BNDREG`](XCr0Flags::BNDREG) and [`BNDCSR`](XCr0Flags::BNDCSR) must be set/unset simultaneously.
/// For AVX-512, [`OPMASK`](XCr0Flags::OPMASK), [`ZMM_HI256`](XCr0Flags::ZMM_HI256), and [`HI16_ZMM`](XCr0Flags::HI16_ZMM) must be set/unset simultaneously.
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct XCr0Flags: u64 {
/// Enables using the x87 FPU state
/// with `XSAVE`/`XRSTOR`.
///
/// Must be set.
const X87 = 1;
/// Enables using MXCSR and the XMM registers
/// with `XSAVE`/`XRSTOR`.
///
/// Must be set if [`AVX`](XCr0Flags::AVX) is set.
const SSE = 1 << 1;
/// Enables AVX instructions and using the upper halves of the AVX registers
/// with `XSAVE`/`XRSTOR`.
const AVX = 1 << 2;
/// Enables MPX instructions and using the BND0-BND3 bound registers
/// with `XSAVE`/`XRSTOR` (Intel Only).
const BNDREG = 1 << 3;
/// Enables MPX instructions and using the BNDCFGU and BNDSTATUS registers
/// with `XSAVE`/`XRSTOR` (Intel Only).
const BNDCSR = 1 << 4;
/// Enables AVX-512 instructions and using the K0-K7 mask registers
/// with `XSAVE`/`XRSTOR` (Intel Only).
const OPMASK = 1 << 5;
/// Enables AVX-512 instructions and using the upper halves of the lower ZMM registers
/// with `XSAVE`/`XRSTOR` (Intel Only).
const ZMM_HI256 = 1 << 6;
/// Enables AVX-512 instructions and using the upper ZMM registers
/// with `XSAVE`/`XRSTOR` (Intel Only).
const HI16_ZMM = 1 << 7;
/// Enables using the PKRU register
/// with `XSAVE`/`XRSTOR`.
const MPK = 1<<9;
/// Enables Lightweight Profiling extensions and managing LWP state
/// with `XSAVE`/`XRSTOR` (AMD Only).
const LWP = 1<<62;
}
}
#[cfg(feature = "instructions")]
mod x86_64 {
use super::*;
use core::arch::asm;
impl XCr0 {
/// Read the current set of XCR0 flags.
#[inline]
pub fn read() -> XCr0Flags {
XCr0Flags::from_bits_truncate(Self::read_raw())
}
/// Read the current raw XCR0 value.
#[inline]
pub fn read_raw() -> u64 {
unsafe {
let (low, high): (u32, u32);
asm!(
"xgetbv",
in("ecx") 0,
out("rax") low, out("rdx") high,
options(nomem, nostack, preserves_flags),
);
(high as u64) << 32 | (low as u64)
}
}
/// Write XCR0 flags.
///
/// Preserves the value of reserved fields.
/// Panics if invalid combinations of [`XCr0Flags`] are set.
///
/// ## Safety
///
/// This function is unsafe because it's possible to
/// enable features that are not supported by the architecture
#[inline]
pub unsafe fn write(flags: XCr0Flags) {
let old_value = Self::read_raw();
let reserved = old_value & !(XCr0Flags::all().bits());
let new_value = reserved | flags.bits();
assert!(flags.contains(XCr0Flags::X87), "The X87 flag must be set");
if flags.contains(XCr0Flags::AVX) {
assert!(
flags.contains(XCr0Flags::SSE),
"AVX cannot be enabled without enabling SSE"
);
}
let mpx = XCr0Flags::BNDREG | XCr0Flags::BNDCSR;
if flags.intersects(mpx) {
assert!(
flags.contains(mpx),
"MPX flags XCr0.BNDREG and XCr0.BNDCSR must be set and unset together"
);
}
let avx512 = XCr0Flags::OPMASK | XCr0Flags::ZMM_HI256 | XCr0Flags::HI16_ZMM;
if flags.intersects(avx512) {
assert!(
flags.contains(XCr0Flags::AVX),
"AVX-512 cannot be enabled without enabling AVX"
);
assert!(
flags.contains(avx512),
"AVX-512 flags XCR0.opmask, XCR0.ZMM_Hi256, and XCR0.Hi16_ZMM must be set and unset together"
);
}
unsafe {
Self::write_raw(new_value);
}
}
/// Write raw XCR0 flags.
///
/// Does _not_ preserve any values, including reserved fields.
///
/// ## Safety
///
/// This function is unsafe because it's possible to
/// enable features that are not supported by the architecture
#[inline]
pub unsafe fn write_raw(value: u64) {
let low = value as u32;
let high = (value >> 32) as u32;
unsafe {
asm!(
"xsetbv",
in("ecx") 0,
in("rax") low, in("rdx") high,
options(nomem, nostack, preserves_flags),
);
}
}
}
}

View File

@ -0,0 +1,536 @@
//! Types for the Global Descriptor Table and segment selectors.
pub use crate::registers::segmentation::SegmentSelector;
use crate::structures::tss::TaskStateSegment;
use crate::PrivilegeLevel;
use bit_field::BitField;
use bitflags::bitflags;
use core::fmt;
// imports for intra-doc links
#[cfg(doc)]
use crate::registers::segmentation::{Segment, CS, SS};
#[cfg(feature = "instructions")]
use core::sync::atomic::{AtomicU64 as EntryValue, Ordering};
#[cfg(not(feature = "instructions"))]
use u64 as EntryValue;
/// 8-byte entry in a descriptor table.
///
/// A [`GlobalDescriptorTable`] (or LDT) is an array of these entries, and
/// [`SegmentSelector`]s index into this array. Each [`Descriptor`] in the table
/// uses either 1 Entry (if it is a [`UserSegment`](Descriptor::UserSegment)) or
/// 2 Entries (if it is a [`SystemSegment`](Descriptor::SystemSegment)). This
/// type exists to give users access to the raw entry bits in a GDT.
#[repr(transparent)]
pub struct Entry(EntryValue);
impl Entry {
// Create a new Entry from a raw value.
const fn new(raw: u64) -> Self {
#[cfg(feature = "instructions")]
let raw = EntryValue::new(raw);
Self(raw)
}
/// The raw bits for this entry. Depending on the [`Descriptor`] type, these
/// bits may correspond to those in [`DescriptorFlags`].
pub fn raw(&self) -> u64 {
// TODO: Make this const fn when AtomicU64::load is const.
#[cfg(feature = "instructions")]
let raw = self.0.load(Ordering::SeqCst);
#[cfg(not(feature = "instructions"))]
let raw = self.0;
raw
}
}
impl Clone for Entry {
fn clone(&self) -> Self {
Self::new(self.raw())
}
}
impl PartialEq for Entry {
fn eq(&self, other: &Self) -> bool {
self.raw() == other.raw()
}
}
impl Eq for Entry {}
impl fmt::Debug for Entry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Display inner value as hex
write!(f, "Entry({:#018x})", self.raw())
}
}
/// A 64-bit mode global descriptor table (GDT).
///
/// In 64-bit mode, segmentation is not supported. The GDT is used nonetheless, for example for
/// switching between user and kernel mode or for loading a TSS.
///
/// The GDT has a fixed maximum size given by the `MAX` const generic parameter.
/// Overflowing this limit by adding too many [`Descriptor`]s via
/// [`GlobalDescriptorTable::append`] will panic.
///
/// You do **not** need to add a null segment descriptor yourself - this is already done
/// internally. This means you can add up to `MAX - 1` additional [`Entry`]s to
/// this table. Note that some [`Descriptor`]s may take up 2 [`Entry`]s.
///
/// Data segment registers in ring 0 can be loaded with the null segment selector. When running in
/// ring 3, the `ss` register must point to a valid data segment which can be obtained through the
/// [`Descriptor::user_data_segment()`](Descriptor::user_data_segment) function. Code segments must
/// be valid and non-null at all times and can be obtained through the
/// [`Descriptor::kernel_code_segment()`](Descriptor::kernel_code_segment) and
/// [`Descriptor::user_code_segment()`](Descriptor::user_code_segment) in rings 0 and 3
/// respectively.
///
/// For more info, see:
/// [x86 Instruction Reference for `mov`](https://www.felixcloutier.com/x86/mov#64-bit-mode-exceptions),
/// [Intel Manual](https://software.intel.com/sites/default/files/managed/39/c5/325462-sdm-vol-1-2abcd-3abcd.pdf),
/// [AMD Manual](https://www.amd.com/system/files/TechDocs/24593.pdf)
///
/// # Example
/// ```
/// use x86_64::structures::gdt::{GlobalDescriptorTable, Descriptor};
///
/// let mut gdt = GlobalDescriptorTable::new();
/// gdt.append(Descriptor::kernel_code_segment());
/// gdt.append(Descriptor::user_code_segment());
/// gdt.append(Descriptor::user_data_segment());
///
/// // Add entry for TSS, call gdt.load() then update segment registers
/// ```
#[derive(Debug, Clone)]
pub struct GlobalDescriptorTable<const MAX: usize = 8> {
table: [Entry; MAX],
len: usize,
}
impl GlobalDescriptorTable {
/// Creates an empty GDT with the default length of 8.
pub const fn new() -> Self {
Self::empty()
}
}
impl<const MAX: usize> GlobalDescriptorTable<MAX> {
/// Creates an empty GDT which can hold `MAX` number of [`Entry`]s.
#[inline]
pub const fn empty() -> Self {
// TODO: Replace with compiler error when feature(generic_const_exprs) is stable.
assert!(MAX > 0, "A GDT cannot have 0 entries");
assert!(MAX <= (1 << 13), "A GDT can only have at most 2^13 entries");
// TODO: Replace with inline_const when it's stable.
#[allow(clippy::declare_interior_mutable_const)]
const NULL: Entry = Entry::new(0);
Self {
table: [NULL; MAX],
len: 1,
}
}
/// Forms a GDT from a slice of `u64`.
///
/// This method allows for creation of a GDT with malformed or invalid
/// entries. However, it is safe because loading a GDT with invalid
/// entires doesn't do anything until those entries are used. For example,
/// [`CS::set_reg`] and [`load_tss`](crate::instructions::tables::load_tss)
/// are both unsafe for this reason.
///
/// Panics if:
/// * the provided slice has more than `MAX` entries
/// * the provided slice is empty
/// * the first entry is not zero
#[cfg_attr(not(feature = "instructions"), allow(rustdoc::broken_intra_doc_links))]
#[inline]
pub const fn from_raw_entries(slice: &[u64]) -> Self {
let len = slice.len();
let mut table = Self::empty().table;
let mut idx = 0;
assert!(len > 0, "cannot initialize GDT with empty slice");
assert!(slice[0] == 0, "first GDT entry must be zero");
assert!(
len <= MAX,
"cannot initialize GDT with slice exceeding the maximum length"
);
while idx < len {
table[idx] = Entry::new(slice[idx]);
idx += 1;
}
Self { table, len }
}
/// Get a reference to the internal [`Entry`] table.
///
/// The resulting slice may contain system descriptors, which span two [`Entry`]s.
#[inline]
pub fn entries(&self) -> &[Entry] {
&self.table[..self.len]
}
/// Appends the given segment descriptor to the GDT, returning the segment selector.
///
/// Note that depending on the type of the [`Descriptor`] this may append
/// either one or two new [`Entry`]s to the table.
///
/// Panics if the GDT doesn't have enough free entries.
#[inline]
#[cfg_attr(feature = "const_fn", rustversion::attr(all(), const))]
pub fn append(&mut self, entry: Descriptor) -> SegmentSelector {
let index = match entry {
Descriptor::UserSegment(value) => {
if self.len > self.table.len().saturating_sub(1) {
panic!("GDT full")
}
self.push(value)
}
Descriptor::SystemSegment(value_low, value_high) => {
if self.len > self.table.len().saturating_sub(2) {
panic!("GDT requires two free spaces to hold a SystemSegment")
}
let index = self.push(value_low);
self.push(value_high);
index
}
};
SegmentSelector::new(index as u16, entry.dpl())
}
/// Loads the GDT in the CPU using the `lgdt` instruction. This does **not** alter any of the
/// segment registers; you **must** (re)load them yourself using [the appropriate
/// functions](crate::instructions::segmentation):
/// [`SS::set_reg()`] and [`CS::set_reg()`].
#[cfg(feature = "instructions")]
#[inline]
pub fn load(&'static self) {
// SAFETY: static lifetime ensures no modification after loading.
unsafe { self.load_unsafe() };
}
/// Loads the GDT in the CPU using the `lgdt` instruction. This does **not** alter any of the
/// segment registers; you **must** (re)load them yourself using [the appropriate
/// functions](crate::instructions::segmentation):
/// [`SS::set_reg()`] and [`CS::set_reg()`].
///
/// # Safety
///
/// Unlike `load` this function will not impose a static lifetime constraint
/// this means its up to the user to ensure that there will be no modifications
/// after loading and that the GDT will live for as long as it's loaded.
///
#[cfg(feature = "instructions")]
#[inline]
pub unsafe fn load_unsafe(&self) {
use crate::instructions::tables::lgdt;
unsafe {
lgdt(&self.pointer());
}
}
#[inline]
#[cfg_attr(feature = "const_fn", rustversion::attr(all(), const))]
fn push(&mut self, value: u64) -> usize {
let index = self.len;
self.table[index] = Entry::new(value);
self.len += 1;
index
}
/// Creates the descriptor pointer for this table. This pointer can only be
/// safely used if the table is never modified or destroyed while in use.
#[cfg(feature = "instructions")]
fn pointer(&self) -> super::DescriptorTablePointer {
use core::mem::size_of;
super::DescriptorTablePointer {
base: crate::VirtAddr::new(self.table.as_ptr() as u64),
// 0 < self.next_free <= MAX <= 2^13, so the limit calculation
// will not underflow or overflow.
limit: (self.len * size_of::<u64>() - 1) as u16,
}
}
}
/// A 64-bit mode segment descriptor.
///
/// Segmentation is no longer supported in 64-bit mode, so most of the descriptor
/// contents are ignored.
#[derive(Debug, Clone, Copy)]
pub enum Descriptor {
/// Descriptor for a code or data segment.
///
/// Since segmentation is no longer supported in 64-bit mode, almost all of
/// code and data descriptors is ignored. Only some flags are still used.
UserSegment(u64),
/// A system segment descriptor such as a LDT or TSS descriptor.
SystemSegment(u64, u64),
}
bitflags! {
/// Flags for a GDT descriptor. Not all flags are valid for all descriptor types.
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct DescriptorFlags: u64 {
/// Set by the processor if this segment has been accessed. Only cleared by software.
/// _Setting_ this bit in software prevents GDT writes on first use.
const ACCESSED = 1 << 40;
/// For 32-bit data segments, sets the segment as writable. For 32-bit code segments,
/// sets the segment as _readable_. In 64-bit mode, ignored for all segments.
const WRITABLE = 1 << 41;
/// For code segments, sets the segment as “conforming”, influencing the
/// privilege checks that occur on control transfers. For 32-bit data segments,
/// sets the segment as "expand down". In 64-bit mode, ignored for data segments.
const CONFORMING = 1 << 42;
/// This flag must be set for code segments and unset for data segments.
const EXECUTABLE = 1 << 43;
/// This flag must be set for user segments (in contrast to system segments).
const USER_SEGMENT = 1 << 44;
/// These two bits encode the Descriptor Privilege Level (DPL) for this descriptor.
/// If both bits are set, the DPL is Ring 3, if both are unset, the DPL is Ring 0.
const DPL_RING_3 = 3 << 45;
/// Must be set for any segment, causes a segment not present exception if not set.
const PRESENT = 1 << 47;
/// Available for use by the Operating System
const AVAILABLE = 1 << 52;
/// Must be set for 64-bit code segments, unset otherwise.
const LONG_MODE = 1 << 53;
/// Use 32-bit (as opposed to 16-bit) operands. If [`LONG_MODE`][Self::LONG_MODE] is set,
/// this must be unset. In 64-bit mode, ignored for data segments.
const DEFAULT_SIZE = 1 << 54;
/// Limit field is scaled by 4096 bytes. In 64-bit mode, ignored for all segments.
const GRANULARITY = 1 << 55;
/// Bits `0..=15` of the limit field (ignored in 64-bit mode)
const LIMIT_0_15 = 0xFFFF;
/// Bits `16..=19` of the limit field (ignored in 64-bit mode)
const LIMIT_16_19 = 0xF << 48;
/// Bits `0..=23` of the base field (ignored in 64-bit mode, except for fs and gs)
const BASE_0_23 = 0xFF_FFFF << 16;
/// Bits `24..=31` of the base field (ignored in 64-bit mode, except for fs and gs)
const BASE_24_31 = 0xFF << 56;
}
}
/// The following constants define default values for common GDT entries. They
/// are all "flat" segments, meaning they can access the entire address space.
/// These values all set [`WRITABLE`][DescriptorFlags::WRITABLE] and
/// [`ACCESSED`][DescriptorFlags::ACCESSED]. They also match the values loaded
/// by the `syscall`/`sysret` and `sysenter`/`sysexit` instructions.
///
/// In short, these values disable segmentation, permission checks, and access
/// tracking at the GDT level. Kernels using these values should use paging to
/// implement this functionality.
impl DescriptorFlags {
// Flags that we set for all our default segments
const COMMON: Self = Self::from_bits_truncate(
Self::USER_SEGMENT.bits()
| Self::PRESENT.bits()
| Self::WRITABLE.bits()
| Self::ACCESSED.bits()
| Self::LIMIT_0_15.bits()
| Self::LIMIT_16_19.bits()
| Self::GRANULARITY.bits(),
);
/// A kernel data segment (64-bit or flat 32-bit)
pub const KERNEL_DATA: Self =
Self::from_bits_truncate(Self::COMMON.bits() | Self::DEFAULT_SIZE.bits());
/// A flat 32-bit kernel code segment
pub const KERNEL_CODE32: Self = Self::from_bits_truncate(
Self::COMMON.bits() | Self::EXECUTABLE.bits() | Self::DEFAULT_SIZE.bits(),
);
/// A 64-bit kernel code segment
pub const KERNEL_CODE64: Self = Self::from_bits_truncate(
Self::COMMON.bits() | Self::EXECUTABLE.bits() | Self::LONG_MODE.bits(),
);
/// A user data segment (64-bit or flat 32-bit)
pub const USER_DATA: Self =
Self::from_bits_truncate(Self::KERNEL_DATA.bits() | Self::DPL_RING_3.bits());
/// A flat 32-bit user code segment
pub const USER_CODE32: Self =
Self::from_bits_truncate(Self::KERNEL_CODE32.bits() | Self::DPL_RING_3.bits());
/// A 64-bit user code segment
pub const USER_CODE64: Self =
Self::from_bits_truncate(Self::KERNEL_CODE64.bits() | Self::DPL_RING_3.bits());
}
impl Descriptor {
/// Returns the Descriptor Privilege Level (DPL). When using this descriptor
/// via a [`SegmentSelector`], the RPL and Current Privilege Level (CPL)
/// must less than or equal to the DPL, except for stack segments where the
/// RPL, CPL, and DPL must all be equal.
#[inline]
pub const fn dpl(self) -> PrivilegeLevel {
let value_low = match self {
Descriptor::UserSegment(v) => v,
Descriptor::SystemSegment(v, _) => v,
};
let dpl = (value_low & DescriptorFlags::DPL_RING_3.bits()) >> 45;
PrivilegeLevel::from_u16(dpl as u16)
}
/// Creates a segment descriptor for a 64-bit kernel code segment. Suitable
/// for use with `syscall` or 64-bit `sysenter`.
#[inline]
pub const fn kernel_code_segment() -> Descriptor {
Descriptor::UserSegment(DescriptorFlags::KERNEL_CODE64.bits())
}
/// Creates a segment descriptor for a kernel data segment (32-bit or
/// 64-bit). Suitable for use with `syscall` or `sysenter`.
#[inline]
pub const fn kernel_data_segment() -> Descriptor {
Descriptor::UserSegment(DescriptorFlags::KERNEL_DATA.bits())
}
/// Creates a segment descriptor for a ring 3 data segment (32-bit or
/// 64-bit). Suitable for use with `sysret` or `sysexit`.
#[inline]
pub const fn user_data_segment() -> Descriptor {
Descriptor::UserSegment(DescriptorFlags::USER_DATA.bits())
}
/// Creates a segment descriptor for a 64-bit ring 3 code segment. Suitable
/// for use with `sysret` or `sysexit`.
#[inline]
pub const fn user_code_segment() -> Descriptor {
Descriptor::UserSegment(DescriptorFlags::USER_CODE64.bits())
}
/// Creates a TSS system descriptor for the given TSS.
///
/// While it is possible to create multiple Descriptors that point to the
/// same TSS, this generally isn't recommended, as the TSS usually contains
/// per-CPU information such as the RSP and IST pointers. Instead, there
/// should be exactly one TSS and one corresponding TSS Descriptor per CPU.
/// Then, each of these descriptors should be placed in a GDT (which can
/// either be global or per-CPU).
#[inline]
pub fn tss_segment(tss: &'static TaskStateSegment) -> Descriptor {
// SAFETY: The pointer is derived from a &'static reference, which ensures its validity.
unsafe { Self::tss_segment_unchecked(tss) }
}
/// Similar to [`Descriptor::tss_segment`], but unsafe since it does not enforce a lifetime
/// constraint on the provided TSS.
///
/// # Safety
/// The caller must ensure that the passed pointer is valid for as long as the descriptor is
/// being used.
#[inline]
pub unsafe fn tss_segment_unchecked(tss: *const TaskStateSegment) -> Descriptor {
use self::DescriptorFlags as Flags;
use core::mem::size_of;
let ptr = tss as u64;
let mut low = Flags::PRESENT.bits();
// base
low.set_bits(16..40, ptr.get_bits(0..24));
low.set_bits(56..64, ptr.get_bits(24..32));
// limit (the `-1` in needed since the bound is inclusive)
low.set_bits(0..16, (size_of::<TaskStateSegment>() - 1) as u64);
// type (0b1001 = available 64-bit tss)
low.set_bits(40..44, 0b1001);
let mut high = 0;
high.set_bits(0..32, ptr.get_bits(32..64));
Descriptor::SystemSegment(low, high)
}
}
#[cfg(test)]
mod tests {
use super::DescriptorFlags as Flags;
use super::*;
#[test]
#[rustfmt::skip]
pub fn linux_kernel_defaults() {
// Make sure our defaults match the ones used by the Linux kernel.
// Constants pulled from an old version of arch/x86/kernel/cpu/common.c
assert_eq!(Flags::KERNEL_CODE64.bits(), 0x00af9b000000ffff);
assert_eq!(Flags::KERNEL_CODE32.bits(), 0x00cf9b000000ffff);
assert_eq!(Flags::KERNEL_DATA.bits(), 0x00cf93000000ffff);
assert_eq!(Flags::USER_CODE64.bits(), 0x00affb000000ffff);
assert_eq!(Flags::USER_CODE32.bits(), 0x00cffb000000ffff);
assert_eq!(Flags::USER_DATA.bits(), 0x00cff3000000ffff);
}
// Makes a GDT that has two free slots
fn make_six_entry_gdt() -> GlobalDescriptorTable {
let mut gdt = GlobalDescriptorTable::new();
gdt.append(Descriptor::kernel_code_segment());
gdt.append(Descriptor::kernel_data_segment());
gdt.append(Descriptor::UserSegment(DescriptorFlags::USER_CODE32.bits()));
gdt.append(Descriptor::user_data_segment());
gdt.append(Descriptor::user_code_segment());
assert_eq!(gdt.len, 6);
gdt
}
static TSS: TaskStateSegment = TaskStateSegment::new();
fn make_full_gdt() -> GlobalDescriptorTable {
let mut gdt = make_six_entry_gdt();
gdt.append(Descriptor::tss_segment(&TSS));
assert_eq!(gdt.len, 8);
gdt
}
#[test]
pub fn push_max_segments() {
// Make sure we don't panic with user segments
let mut gdt = make_six_entry_gdt();
gdt.append(Descriptor::user_data_segment());
assert_eq!(gdt.len, 7);
gdt.append(Descriptor::user_data_segment());
assert_eq!(gdt.len, 8);
// Make sure we don't panic with system segments
let _ = make_full_gdt();
}
#[test]
#[should_panic]
pub fn panic_user_segment() {
let mut gdt = make_full_gdt();
gdt.append(Descriptor::user_data_segment());
}
#[test]
#[should_panic]
pub fn panic_system_segment() {
let mut gdt = make_six_entry_gdt();
gdt.append(Descriptor::user_data_segment());
// We have one free slot, but the GDT requires two
gdt.append(Descriptor::tss_segment(&TSS));
}
#[test]
pub fn from_entries() {
let raw = [0, Flags::KERNEL_CODE64.bits(), Flags::KERNEL_DATA.bits()];
let gdt = GlobalDescriptorTable::<3>::from_raw_entries(&raw);
assert_eq!(gdt.table.len(), 3);
assert_eq!(gdt.entries().len(), 3);
}
#[test]
pub fn descriptor_dpl() {
assert_eq!(
Descriptor::kernel_code_segment().dpl(),
PrivilegeLevel::Ring0
);
assert_eq!(
Descriptor::kernel_data_segment().dpl(),
PrivilegeLevel::Ring0
);
assert_eq!(Descriptor::user_code_segment().dpl(), PrivilegeLevel::Ring3);
assert_eq!(Descriptor::user_code_segment().dpl(), PrivilegeLevel::Ring3);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,40 @@
//! Representations of various x86 specific structures and descriptor tables.
use crate::VirtAddr;
pub mod gdt;
pub mod idt;
pub mod paging;
pub mod port;
pub mod tss;
/// A struct describing a pointer to a descriptor table (GDT / IDT).
/// This is in a format suitable for giving to 'lgdt' or 'lidt'.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed(2))]
pub struct DescriptorTablePointer {
/// Size of the DT.
pub limit: u16,
/// Pointer to the memory region containing the DT.
pub base: VirtAddr,
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem::size_of;
#[test]
pub fn check_descriptor_pointer_size() {
// Per the SDM, a descriptor pointer has to be 2+8=10 bytes
assert_eq!(size_of::<DescriptorTablePointer>(), 10);
// Make sure that we can reference a pointer's limit
let p = DescriptorTablePointer {
limit: 5,
base: VirtAddr::zero(),
};
let _: &u16 = &p.limit;
}
}

View File

@ -0,0 +1,210 @@
//! Abstractions for default-sized and huge physical memory frames.
use super::page::AddressNotAligned;
use crate::structures::paging::page::{PageSize, Size4KiB};
use crate::PhysAddr;
use core::fmt;
use core::marker::PhantomData;
use core::ops::{Add, AddAssign, Sub, SubAssign};
/// A physical memory frame.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(C)]
pub struct PhysFrame<S: PageSize = Size4KiB> {
// TODO: Make private when our minimum supported stable Rust version is 1.61
pub(crate) start_address: PhysAddr,
size: PhantomData<S>,
}
impl<S: PageSize> PhysFrame<S> {
/// Returns the frame that starts at the given virtual address.
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start).
#[inline]
pub fn from_start_address(address: PhysAddr) -> Result<Self, AddressNotAligned> {
if !address.is_aligned(S::SIZE) {
return Err(AddressNotAligned);
}
// SAFETY: correct address alignment is checked above
Ok(unsafe { PhysFrame::from_start_address_unchecked(address) })
}
/// Returns the frame that starts at the given virtual address.
///
/// ## Safety
///
/// The address must be correctly aligned.
#[inline]
pub const unsafe fn from_start_address_unchecked(start_address: PhysAddr) -> Self {
PhysFrame {
start_address,
size: PhantomData,
}
}
/// Returns the frame that contains the given physical address.
#[inline]
pub fn containing_address(address: PhysAddr) -> Self {
PhysFrame {
start_address: address.align_down(S::SIZE),
size: PhantomData,
}
}
/// Returns the start address of the frame.
#[inline]
pub const fn start_address(self) -> PhysAddr {
self.start_address
}
/// Returns the size the frame (4KB, 2MB or 1GB).
#[inline]
pub const fn size(self) -> u64 {
S::SIZE
}
/// Returns a range of frames, exclusive `end`.
#[inline]
pub const fn range(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRange<S> {
PhysFrameRange { start, end }
}
/// Returns a range of frames, inclusive `end`.
#[inline]
pub const fn range_inclusive(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRangeInclusive<S> {
PhysFrameRangeInclusive { start, end }
}
}
impl<S: PageSize> fmt::Debug for PhysFrame<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"PhysFrame[{}]({:#x})",
S::DEBUG_STR,
self.start_address().as_u64()
))
}
}
impl<S: PageSize> Add<u64> for PhysFrame<S> {
type Output = Self;
#[inline]
fn add(self, rhs: u64) -> Self::Output {
PhysFrame::containing_address(self.start_address() + rhs * S::SIZE)
}
}
impl<S: PageSize> AddAssign<u64> for PhysFrame<S> {
#[inline]
fn add_assign(&mut self, rhs: u64) {
*self = *self + rhs;
}
}
impl<S: PageSize> Sub<u64> for PhysFrame<S> {
type Output = Self;
#[inline]
fn sub(self, rhs: u64) -> Self::Output {
PhysFrame::containing_address(self.start_address() - rhs * S::SIZE)
}
}
impl<S: PageSize> SubAssign<u64> for PhysFrame<S> {
#[inline]
fn sub_assign(&mut self, rhs: u64) {
*self = *self - rhs;
}
}
impl<S: PageSize> Sub<PhysFrame<S>> for PhysFrame<S> {
type Output = u64;
#[inline]
fn sub(self, rhs: PhysFrame<S>) -> Self::Output {
(self.start_address - rhs.start_address) / S::SIZE
}
}
/// An range of physical memory frames, exclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
pub struct PhysFrameRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The end of the range, exclusive.
pub end: PhysFrame<S>,
}
impl<S: PageSize> PhysFrameRange<S> {
/// Returns whether the range contains no frames.
#[inline]
pub fn is_empty(&self) -> bool {
self.start >= self.end
}
}
impl<S: PageSize> Iterator for PhysFrameRange<S> {
type Item = PhysFrame<S>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.start < self.end {
let frame = self.start;
self.start += 1;
Some(frame)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PhysFrameRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// An range of physical memory frames, inclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
pub struct PhysFrameRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The start of the range, inclusive.
pub end: PhysFrame<S>,
}
impl<S: PageSize> PhysFrameRangeInclusive<S> {
/// Returns whether the range contains no frames.
#[inline]
pub fn is_empty(&self) -> bool {
self.start > self.end
}
}
impl<S: PageSize> Iterator for PhysFrameRangeInclusive<S> {
type Item = PhysFrame<S>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.start <= self.end {
let frame = self.start;
self.start += 1;
Some(frame)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PhysFrameRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}

View File

@ -0,0 +1,25 @@
//! Traits for abstracting away frame allocation and deallocation.
use crate::structures::paging::{PageSize, PhysFrame};
/// A trait for types that can allocate a frame of memory.
///
/// # Safety
///
/// The implementer of this trait must guarantee that the `allocate_frame`
/// method returns only unique unused frames. Otherwise, undefined behavior
/// may result from two callers modifying or deallocating the same frame.
pub unsafe trait FrameAllocator<S: PageSize> {
/// Allocate a frame of the appropriate size and return it if possible.
fn allocate_frame(&mut self) -> Option<PhysFrame<S>>;
}
/// A trait for types that can deallocate a frame of memory.
pub trait FrameDeallocator<S: PageSize> {
/// Deallocate the given unused frame.
///
/// ## Safety
///
/// The caller must ensure that the passed frame is unused.
unsafe fn deallocate_frame(&mut self, frame: PhysFrame<S>);
}

View File

@ -0,0 +1,883 @@
use crate::structures::paging::{
frame::PhysFrame,
frame_alloc::{FrameAllocator, FrameDeallocator},
mapper::*,
page::{AddressNotAligned, Page, PageRangeInclusive, Size1GiB, Size2MiB, Size4KiB},
page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags, PageTableLevel},
};
/// A Mapper implementation that relies on a PhysAddr to VirtAddr conversion function.
///
/// This type requires that the all physical page table frames are mapped to some virtual
/// address. Normally, this is done by mapping the complete physical address space into
/// the virtual address space at some offset. Other mappings between physical and virtual
/// memory are possible too, as long as they can be calculated as an `PhysAddr` to
/// `VirtAddr` closure.
#[derive(Debug)]
pub struct MappedPageTable<'a, P: PageTableFrameMapping> {
page_table_walker: PageTableWalker<P>,
level_4_table: &'a mut PageTable,
}
impl<'a, P: PageTableFrameMapping> MappedPageTable<'a, P> {
/// Creates a new `MappedPageTable` that uses the passed `PageTableFrameMapping` for converting virtual
/// to physical addresses.
///
/// ## Safety
///
/// This function is unsafe because the caller must guarantee that the passed `page_table_frame_mapping`
/// `PageTableFrameMapping` is correct. Also, the passed `level_4_table` must point to the level 4 page table
/// of a valid page table hierarchy. Otherwise this function might break memory safety, e.g.
/// by writing to an illegal memory location.
#[inline]
pub unsafe fn new(level_4_table: &'a mut PageTable, page_table_frame_mapping: P) -> Self {
Self {
level_4_table,
page_table_walker: unsafe { PageTableWalker::new(page_table_frame_mapping) },
}
}
/// Returns an immutable reference to the wrapped level 4 `PageTable` instance.
pub fn level_4_table(&self) -> &PageTable {
self.level_4_table
}
/// Returns a mutable reference to the wrapped level 4 `PageTable` instance.
pub fn level_4_table_mut(&mut self) -> &mut PageTable {
self.level_4_table
}
/// Returns the `PageTableFrameMapping` used for converting virtual to physical addresses.
pub fn page_table_frame_mapping(&self) -> &P {
&self.page_table_walker.page_table_frame_mapping
}
/// Helper function for implementing Mapper. Safe to limit the scope of unsafe, see
/// https://github.com/rust-lang/rfcs/pull/2585.
fn map_to_1gib<A>(
&mut self,
page: Page<Size1GiB>,
frame: PhysFrame<Size1GiB>,
flags: PageTableFlags,
parent_table_flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size1GiB>, MapToError<Size1GiB>>
where
A: FrameAllocator<Size4KiB> + ?Sized,
{
let p4 = &mut self.level_4_table;
let p3 = self.page_table_walker.create_next_table(
&mut p4[page.p4_index()],
parent_table_flags,
allocator,
)?;
if !p3[page.p3_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped(frame));
}
p3[page.p3_index()].set_addr(frame.start_address(), flags | PageTableFlags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
/// Helper function for implementing Mapper. Safe to limit the scope of unsafe, see
/// https://github.com/rust-lang/rfcs/pull/2585.
fn map_to_2mib<A>(
&mut self,
page: Page<Size2MiB>,
frame: PhysFrame<Size2MiB>,
flags: PageTableFlags,
parent_table_flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size2MiB>, MapToError<Size2MiB>>
where
A: FrameAllocator<Size4KiB> + ?Sized,
{
let p4 = &mut self.level_4_table;
let p3 = self.page_table_walker.create_next_table(
&mut p4[page.p4_index()],
parent_table_flags,
allocator,
)?;
let p2 = self.page_table_walker.create_next_table(
&mut p3[page.p3_index()],
parent_table_flags,
allocator,
)?;
if !p2[page.p2_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped(frame));
}
p2[page.p2_index()].set_addr(frame.start_address(), flags | PageTableFlags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
/// Helper function for implementing Mapper. Safe to limit the scope of unsafe, see
/// https://github.com/rust-lang/rfcs/pull/2585.
fn map_to_4kib<A>(
&mut self,
page: Page<Size4KiB>,
frame: PhysFrame<Size4KiB>,
flags: PageTableFlags,
parent_table_flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size4KiB>, MapToError<Size4KiB>>
where
A: FrameAllocator<Size4KiB> + ?Sized,
{
let p4 = &mut self.level_4_table;
let p3 = self.page_table_walker.create_next_table(
&mut p4[page.p4_index()],
parent_table_flags,
allocator,
)?;
let p2 = self.page_table_walker.create_next_table(
&mut p3[page.p3_index()],
parent_table_flags,
allocator,
)?;
let p1 = self.page_table_walker.create_next_table(
&mut p2[page.p2_index()],
parent_table_flags,
allocator,
)?;
if !p1[page.p1_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped(frame));
}
p1[page.p1_index()].set_frame(frame, flags);
Ok(MapperFlush::new(page))
}
}
impl<'a, P: PageTableFrameMapping> Mapper<Size1GiB> for MappedPageTable<'a, P> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
page: Page<Size1GiB>,
frame: PhysFrame<Size1GiB>,
flags: PageTableFlags,
parent_table_flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size1GiB>, MapToError<Size1GiB>>
where
A: FrameAllocator<Size4KiB> + ?Sized,
{
self.map_to_1gib(page, frame, flags, parent_table_flags, allocator)
}
fn unmap(
&mut self,
page: Page<Size1GiB>,
) -> Result<(PhysFrame<Size1GiB>, MapperFlush<Size1GiB>), UnmapError> {
let p4 = &mut self.level_4_table;
let p3 = self
.page_table_walker
.next_table_mut(&mut p4[page.p4_index()])?;
let p3_entry = &mut p3[page.p3_index()];
let flags = p3_entry.flags();
if !flags.contains(PageTableFlags::PRESENT) {
return Err(UnmapError::PageNotMapped);
}
if !flags.contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::ParentEntryHugePage);
}
let frame = PhysFrame::from_start_address(p3_entry.addr())
.map_err(|AddressNotAligned| UnmapError::InvalidFrameAddress(p3_entry.addr()))?;
p3_entry.set_unused();
Ok((frame, MapperFlush::new(page)))
}
unsafe fn update_flags(
&mut self,
page: Page<Size1GiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size1GiB>, FlagUpdateError> {
let p4 = &mut self.level_4_table;
let p3 = self
.page_table_walker
.next_table_mut(&mut p4[page.p4_index()])?;
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p3[page.p3_index()].set_flags(flags | PageTableFlags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
unsafe fn set_flags_p4_entry(
&mut self,
page: Page<Size1GiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
let p4 = &mut self.level_4_table;
let p4_entry = &mut p4[page.p4_index()];
if p4_entry.is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p4_entry.set_flags(flags);
Ok(MapperFlushAll::new())
}
unsafe fn set_flags_p3_entry(
&mut self,
_page: Page<Size1GiB>,
_flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
Err(FlagUpdateError::ParentEntryHugePage)
}
unsafe fn set_flags_p2_entry(
&mut self,
_page: Page<Size1GiB>,
_flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
Err(FlagUpdateError::ParentEntryHugePage)
}
fn translate_page(&self, page: Page<Size1GiB>) -> Result<PhysFrame<Size1GiB>, TranslateError> {
let p4 = &self.level_4_table;
let p3 = self.page_table_walker.next_table(&p4[page.p4_index()])?;
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return Err(TranslateError::PageNotMapped);
}
PhysFrame::from_start_address(p3_entry.addr())
.map_err(|AddressNotAligned| TranslateError::InvalidFrameAddress(p3_entry.addr()))
}
}
impl<'a, P: PageTableFrameMapping> Mapper<Size2MiB> for MappedPageTable<'a, P> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
page: Page<Size2MiB>,
frame: PhysFrame<Size2MiB>,
flags: PageTableFlags,
parent_table_flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size2MiB>, MapToError<Size2MiB>>
where
A: FrameAllocator<Size4KiB> + ?Sized,
{
self.map_to_2mib(page, frame, flags, parent_table_flags, allocator)
}
fn unmap(
&mut self,
page: Page<Size2MiB>,
) -> Result<(PhysFrame<Size2MiB>, MapperFlush<Size2MiB>), UnmapError> {
let p4 = &mut self.level_4_table;
let p3 = self
.page_table_walker
.next_table_mut(&mut p4[page.p4_index()])?;
let p2 = self
.page_table_walker
.next_table_mut(&mut p3[page.p3_index()])?;
let p2_entry = &mut p2[page.p2_index()];
let flags = p2_entry.flags();
if !flags.contains(PageTableFlags::PRESENT) {
return Err(UnmapError::PageNotMapped);
}
if !flags.contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::ParentEntryHugePage);
}
let frame = PhysFrame::from_start_address(p2_entry.addr())
.map_err(|AddressNotAligned| UnmapError::InvalidFrameAddress(p2_entry.addr()))?;
p2_entry.set_unused();
Ok((frame, MapperFlush::new(page)))
}
unsafe fn update_flags(
&mut self,
page: Page<Size2MiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size2MiB>, FlagUpdateError> {
let p4 = &mut self.level_4_table;
let p3 = self
.page_table_walker
.next_table_mut(&mut p4[page.p4_index()])?;
let p2 = self
.page_table_walker
.next_table_mut(&mut p3[page.p3_index()])?;
if p2[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p2[page.p2_index()].set_flags(flags | PageTableFlags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
unsafe fn set_flags_p4_entry(
&mut self,
page: Page<Size2MiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
let p4 = &mut self.level_4_table;
let p4_entry = &mut p4[page.p4_index()];
if p4_entry.is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p4_entry.set_flags(flags);
Ok(MapperFlushAll::new())
}
unsafe fn set_flags_p3_entry(
&mut self,
page: Page<Size2MiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
let p4 = &mut self.level_4_table;
let p3 = self
.page_table_walker
.next_table_mut(&mut p4[page.p4_index()])?;
let p3_entry = &mut p3[page.p3_index()];
if p3_entry.is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p3_entry.set_flags(flags);
Ok(MapperFlushAll::new())
}
unsafe fn set_flags_p2_entry(
&mut self,
_page: Page<Size2MiB>,
_flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
Err(FlagUpdateError::ParentEntryHugePage)
}
fn translate_page(&self, page: Page<Size2MiB>) -> Result<PhysFrame<Size2MiB>, TranslateError> {
let p4 = &self.level_4_table;
let p3 = self.page_table_walker.next_table(&p4[page.p4_index()])?;
let p2 = self.page_table_walker.next_table(&p3[page.p3_index()])?;
let p2_entry = &p2[page.p2_index()];
if p2_entry.is_unused() {
return Err(TranslateError::PageNotMapped);
}
PhysFrame::from_start_address(p2_entry.addr())
.map_err(|AddressNotAligned| TranslateError::InvalidFrameAddress(p2_entry.addr()))
}
}
impl<'a, P: PageTableFrameMapping> Mapper<Size4KiB> for MappedPageTable<'a, P> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
page: Page<Size4KiB>,
frame: PhysFrame<Size4KiB>,
flags: PageTableFlags,
parent_table_flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size4KiB>, MapToError<Size4KiB>>
where
A: FrameAllocator<Size4KiB> + ?Sized,
{
self.map_to_4kib(page, frame, flags, parent_table_flags, allocator)
}
fn unmap(
&mut self,
page: Page<Size4KiB>,
) -> Result<(PhysFrame<Size4KiB>, MapperFlush<Size4KiB>), UnmapError> {
let p4 = &mut self.level_4_table;
let p3 = self
.page_table_walker
.next_table_mut(&mut p4[page.p4_index()])?;
let p2 = self
.page_table_walker
.next_table_mut(&mut p3[page.p3_index()])?;
let p1 = self
.page_table_walker
.next_table_mut(&mut p2[page.p2_index()])?;
let p1_entry = &mut p1[page.p1_index()];
let frame = p1_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
p1_entry.set_unused();
Ok((frame, MapperFlush::new(page)))
}
unsafe fn update_flags(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size4KiB>, FlagUpdateError> {
let p4 = &mut self.level_4_table;
let p3 = self
.page_table_walker
.next_table_mut(&mut p4[page.p4_index()])?;
let p2 = self
.page_table_walker
.next_table_mut(&mut p3[page.p3_index()])?;
let p1 = self
.page_table_walker
.next_table_mut(&mut p2[page.p2_index()])?;
if p1[page.p1_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p1[page.p1_index()].set_flags(flags);
Ok(MapperFlush::new(page))
}
unsafe fn set_flags_p4_entry(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
let p4 = &mut self.level_4_table;
let p4_entry = &mut p4[page.p4_index()];
if p4_entry.is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p4_entry.set_flags(flags);
Ok(MapperFlushAll::new())
}
unsafe fn set_flags_p3_entry(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
let p4 = &mut self.level_4_table;
let p3 = self
.page_table_walker
.next_table_mut(&mut p4[page.p4_index()])?;
let p3_entry = &mut p3[page.p3_index()];
if p3_entry.is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p3_entry.set_flags(flags);
Ok(MapperFlushAll::new())
}
unsafe fn set_flags_p2_entry(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
let p4 = &mut self.level_4_table;
let p3 = self
.page_table_walker
.next_table_mut(&mut p4[page.p4_index()])?;
let p2 = self
.page_table_walker
.next_table_mut(&mut p3[page.p3_index()])?;
let p2_entry = &mut p2[page.p2_index()];
if p2_entry.is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p2_entry.set_flags(flags);
Ok(MapperFlushAll::new())
}
fn translate_page(&self, page: Page<Size4KiB>) -> Result<PhysFrame<Size4KiB>, TranslateError> {
let p4 = &self.level_4_table;
let p3 = self.page_table_walker.next_table(&p4[page.p4_index()])?;
let p2 = self.page_table_walker.next_table(&p3[page.p3_index()])?;
let p1 = self.page_table_walker.next_table(&p2[page.p2_index()])?;
let p1_entry = &p1[page.p1_index()];
if p1_entry.is_unused() {
return Err(TranslateError::PageNotMapped);
}
PhysFrame::from_start_address(p1_entry.addr())
.map_err(|AddressNotAligned| TranslateError::InvalidFrameAddress(p1_entry.addr()))
}
}
impl<'a, P: PageTableFrameMapping> Translate for MappedPageTable<'a, P> {
#[allow(clippy::inconsistent_digit_grouping)]
fn translate(&self, addr: VirtAddr) -> TranslateResult {
let p4 = &self.level_4_table;
let p3 = match self.page_table_walker.next_table(&p4[addr.p4_index()]) {
Ok(page_table) => page_table,
Err(PageTableWalkError::NotMapped) => return TranslateResult::NotMapped,
Err(PageTableWalkError::MappedToHugePage) => {
panic!("level 4 entry has huge page bit set")
}
};
let p2 = match self.page_table_walker.next_table(&p3[addr.p3_index()]) {
Ok(page_table) => page_table,
Err(PageTableWalkError::NotMapped) => return TranslateResult::NotMapped,
Err(PageTableWalkError::MappedToHugePage) => {
let entry = &p3[addr.p3_index()];
let frame = PhysFrame::containing_address(entry.addr());
let offset = addr.as_u64() & 0o_777_777_7777;
let flags = entry.flags();
return TranslateResult::Mapped {
frame: MappedFrame::Size1GiB(frame),
offset,
flags,
};
}
};
let p1 = match self.page_table_walker.next_table(&p2[addr.p2_index()]) {
Ok(page_table) => page_table,
Err(PageTableWalkError::NotMapped) => return TranslateResult::NotMapped,
Err(PageTableWalkError::MappedToHugePage) => {
let entry = &p2[addr.p2_index()];
let frame = PhysFrame::containing_address(entry.addr());
let offset = addr.as_u64() & 0o_777_7777;
let flags = entry.flags();
return TranslateResult::Mapped {
frame: MappedFrame::Size2MiB(frame),
offset,
flags,
};
}
};
let p1_entry = &p1[addr.p1_index()];
if p1_entry.is_unused() {
return TranslateResult::NotMapped;
}
let frame = match PhysFrame::from_start_address(p1_entry.addr()) {
Ok(frame) => frame,
Err(AddressNotAligned) => return TranslateResult::InvalidFrameAddress(p1_entry.addr()),
};
let offset = u64::from(addr.page_offset());
let flags = p1_entry.flags();
TranslateResult::Mapped {
frame: MappedFrame::Size4KiB(frame),
offset,
flags,
}
}
}
impl<'a, P: PageTableFrameMapping> CleanUp for MappedPageTable<'a, P> {
#[inline]
unsafe fn clean_up<D>(&mut self, frame_deallocator: &mut D)
where
D: FrameDeallocator<Size4KiB>,
{
unsafe {
self.clean_up_addr_range(
PageRangeInclusive {
start: Page::from_start_address(VirtAddr::new(0)).unwrap(),
end: Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(),
},
frame_deallocator,
)
}
}
unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>,
{
unsafe fn clean_up<P: PageTableFrameMapping>(
page_table: &mut PageTable,
page_table_walker: &PageTableWalker<P>,
level: PageTableLevel,
range: PageRangeInclusive,
frame_deallocator: &mut impl FrameDeallocator<Size4KiB>,
) -> bool {
if range.is_empty() {
return false;
}
let table_addr = range
.start
.start_address()
.align_down(level.table_address_space_alignment());
let start = range.start.page_table_index(level);
let end = range.end.page_table_index(level);
if let Some(next_level) = level.next_lower_level() {
let offset_per_entry = level.entry_address_space_alignment();
for (i, entry) in page_table
.iter_mut()
.enumerate()
.take(usize::from(end) + 1)
.skip(usize::from(start))
{
if let Ok(page_table) = page_table_walker.next_table_mut(entry) {
let start = VirtAddr::forward_checked_impl(
table_addr,
(offset_per_entry as usize) * i,
)
.unwrap();
let end = start + (offset_per_entry - 1);
let start = Page::<Size4KiB>::containing_address(start);
let start = start.max(range.start);
let end = Page::<Size4KiB>::containing_address(end);
let end = end.min(range.end);
unsafe {
if clean_up(
page_table,
page_table_walker,
next_level,
Page::range_inclusive(start, end),
frame_deallocator,
) {
let frame = entry.frame().unwrap();
entry.set_unused();
frame_deallocator.deallocate_frame(frame);
}
}
}
}
}
page_table.iter().all(PageTableEntry::is_unused)
}
unsafe {
clean_up(
self.level_4_table,
&self.page_table_walker,
PageTableLevel::Four,
range,
frame_deallocator,
);
}
}
}
#[derive(Debug)]
struct PageTableWalker<P: PageTableFrameMapping> {
page_table_frame_mapping: P,
}
impl<P: PageTableFrameMapping> PageTableWalker<P> {
#[inline]
pub unsafe fn new(page_table_frame_mapping: P) -> Self {
Self {
page_table_frame_mapping,
}
}
/// Internal helper function to get a reference to the page table of the next level.
///
/// Returns `PageTableWalkError::NotMapped` if the entry is unused. Returns
/// `PageTableWalkError::MappedToHugePage` if the `HUGE_PAGE` flag is set
/// in the passed entry.
#[inline]
fn next_table<'b>(
&self,
entry: &'b PageTableEntry,
) -> Result<&'b PageTable, PageTableWalkError> {
let page_table_ptr = self
.page_table_frame_mapping
.frame_to_pointer(entry.frame()?);
let page_table: &PageTable = unsafe { &*page_table_ptr };
Ok(page_table)
}
/// Internal helper function to get a mutable reference to the page table of the next level.
///
/// Returns `PageTableWalkError::NotMapped` if the entry is unused. Returns
/// `PageTableWalkError::MappedToHugePage` if the `HUGE_PAGE` flag is set
/// in the passed entry.
#[inline]
fn next_table_mut<'b>(
&self,
entry: &'b mut PageTableEntry,
) -> Result<&'b mut PageTable, PageTableWalkError> {
let page_table_ptr = self
.page_table_frame_mapping
.frame_to_pointer(entry.frame()?);
let page_table: &mut PageTable = unsafe { &mut *page_table_ptr };
Ok(page_table)
}
/// Internal helper function to create the page table of the next level if needed.
///
/// If the passed entry is unused, a new frame is allocated from the given allocator, zeroed,
/// and the entry is updated to that address. If the passed entry is already mapped, the next
/// table is returned directly.
///
/// Returns `MapToError::FrameAllocationFailed` if the entry is unused and the allocator
/// returned `None`. Returns `MapToError::ParentEntryHugePage` if the `HUGE_PAGE` flag is set
/// in the passed entry.
fn create_next_table<'b, A>(
&self,
entry: &'b mut PageTableEntry,
insert_flags: PageTableFlags,
allocator: &mut A,
) -> Result<&'b mut PageTable, PageTableCreateError>
where
A: FrameAllocator<Size4KiB> + ?Sized,
{
let created;
if entry.is_unused() {
if let Some(frame) = allocator.allocate_frame() {
entry.set_frame(frame, insert_flags);
created = true;
} else {
return Err(PageTableCreateError::FrameAllocationFailed);
}
} else {
if !insert_flags.is_empty() && !entry.flags().contains(insert_flags) {
entry.set_flags(entry.flags() | insert_flags);
}
created = false;
}
let page_table = match self.next_table_mut(entry) {
Err(PageTableWalkError::MappedToHugePage) => {
return Err(PageTableCreateError::MappedToHugePage);
}
Err(PageTableWalkError::NotMapped) => panic!("entry should be mapped at this point"),
Ok(page_table) => page_table,
};
if created {
page_table.zero();
}
Ok(page_table)
}
}
#[derive(Debug)]
enum PageTableWalkError {
NotMapped,
MappedToHugePage,
}
#[derive(Debug)]
enum PageTableCreateError {
MappedToHugePage,
FrameAllocationFailed,
}
impl From<PageTableCreateError> for MapToError<Size4KiB> {
#[inline]
fn from(err: PageTableCreateError) -> Self {
match err {
PageTableCreateError::MappedToHugePage => MapToError::ParentEntryHugePage,
PageTableCreateError::FrameAllocationFailed => MapToError::FrameAllocationFailed,
}
}
}
impl From<PageTableCreateError> for MapToError<Size2MiB> {
#[inline]
fn from(err: PageTableCreateError) -> Self {
match err {
PageTableCreateError::MappedToHugePage => MapToError::ParentEntryHugePage,
PageTableCreateError::FrameAllocationFailed => MapToError::FrameAllocationFailed,
}
}
}
impl From<PageTableCreateError> for MapToError<Size1GiB> {
#[inline]
fn from(err: PageTableCreateError) -> Self {
match err {
PageTableCreateError::MappedToHugePage => MapToError::ParentEntryHugePage,
PageTableCreateError::FrameAllocationFailed => MapToError::FrameAllocationFailed,
}
}
}
impl From<FrameError> for PageTableWalkError {
#[inline]
fn from(err: FrameError) -> Self {
match err {
FrameError::HugeFrame => PageTableWalkError::MappedToHugePage,
FrameError::FrameNotPresent => PageTableWalkError::NotMapped,
}
}
}
impl From<PageTableWalkError> for UnmapError {
#[inline]
fn from(err: PageTableWalkError) -> Self {
match err {
PageTableWalkError::MappedToHugePage => UnmapError::ParentEntryHugePage,
PageTableWalkError::NotMapped => UnmapError::PageNotMapped,
}
}
}
impl From<PageTableWalkError> for FlagUpdateError {
#[inline]
fn from(err: PageTableWalkError) -> Self {
match err {
PageTableWalkError::MappedToHugePage => FlagUpdateError::ParentEntryHugePage,
PageTableWalkError::NotMapped => FlagUpdateError::PageNotMapped,
}
}
}
impl From<PageTableWalkError> for TranslateError {
#[inline]
fn from(err: PageTableWalkError) -> Self {
match err {
PageTableWalkError::MappedToHugePage => TranslateError::ParentEntryHugePage,
PageTableWalkError::NotMapped => TranslateError::PageNotMapped,
}
}
}
/// Provides a virtual address mapping for physical page table frames.
///
/// This only works if the physical address space is somehow mapped to the virtual
/// address space, e.g. at an offset.
///
/// ## Safety
///
/// This trait is unsafe to implement because the implementer must ensure that
/// `frame_to_pointer` returns a valid page table pointer for any given physical frame.
pub unsafe trait PageTableFrameMapping {
/// Translate the given physical frame to a virtual page table pointer.
fn frame_to_pointer(&self, frame: PhysFrame) -> *mut PageTable;
}

View File

@ -0,0 +1,530 @@
//! Abstractions for reading and modifying the mapping of pages.
pub use self::mapped_page_table::{MappedPageTable, PageTableFrameMapping};
#[cfg(target_pointer_width = "64")]
pub use self::offset_page_table::OffsetPageTable;
#[cfg(feature = "instructions")]
pub use self::recursive_page_table::{InvalidPageTable, RecursivePageTable};
use crate::structures::paging::{
frame_alloc::{FrameAllocator, FrameDeallocator},
page::PageRangeInclusive,
page_table::PageTableFlags,
Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB,
};
use crate::{PhysAddr, VirtAddr};
mod mapped_page_table;
mod offset_page_table;
#[cfg(feature = "instructions")]
mod recursive_page_table;
/// An empty convencience trait that requires the `Mapper` trait for all page sizes.
pub trait MapperAllSizes: Mapper<Size4KiB> + Mapper<Size2MiB> + Mapper<Size1GiB> {}
impl<T> MapperAllSizes for T where T: Mapper<Size4KiB> + Mapper<Size2MiB> + Mapper<Size1GiB> {}
/// Provides methods for translating virtual addresses.
pub trait Translate {
/// Return the frame that the given virtual address is mapped to and the offset within that
/// frame.
///
/// If the given address has a valid mapping, the mapped frame and the offset within that
/// frame is returned. Otherwise an error value is returned.
///
/// This function works with huge pages of all sizes.
fn translate(&self, addr: VirtAddr) -> TranslateResult;
/// Translates the given virtual address to the physical address that it maps to.
///
/// Returns `None` if there is no valid mapping for the given address.
///
/// This is a convenience method. For more information about a mapping see the
/// [`translate`](Translate::translate) method.
#[inline]
fn translate_addr(&self, addr: VirtAddr) -> Option<PhysAddr> {
match self.translate(addr) {
TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => None,
TranslateResult::Mapped { frame, offset, .. } => Some(frame.start_address() + offset),
}
}
}
/// The return value of the [`Translate::translate`] function.
///
/// If the given address has a valid mapping, a `Frame4KiB`, `Frame2MiB`, or `Frame1GiB` variant
/// is returned, depending on the size of the mapped page. The remaining variants indicate errors.
#[derive(Debug)]
pub enum TranslateResult {
/// The virtual address is mapped to a physical frame.
Mapped {
/// The mapped frame.
frame: MappedFrame,
/// The offset within the mapped frame.
offset: u64,
/// The entry flags in the lowest-level page table.
///
/// Flags of higher-level page table entries are not included here, but they can still
/// affect the effective flags for an address, for example when the WRITABLE flag is not
/// set for a level 3 entry.
flags: PageTableFlags,
},
/// The given virtual address is not mapped to a physical frame.
NotMapped,
/// The page table entry for the given virtual address points to an invalid physical address.
InvalidFrameAddress(PhysAddr),
}
/// Represents a physical frame mapped in a page table.
#[derive(Debug)]
pub enum MappedFrame {
/// The virtual address is mapped to a 4KiB frame.
Size4KiB(PhysFrame<Size4KiB>),
/// The virtual address is mapped to a "large" 2MiB frame.
Size2MiB(PhysFrame<Size2MiB>),
/// The virtual address is mapped to a "huge" 1GiB frame.
Size1GiB(PhysFrame<Size1GiB>),
}
impl MappedFrame {
/// Returns the start address of the frame.
pub const fn start_address(&self) -> PhysAddr {
match self {
MappedFrame::Size4KiB(frame) => frame.start_address,
MappedFrame::Size2MiB(frame) => frame.start_address,
MappedFrame::Size1GiB(frame) => frame.start_address,
}
}
/// Returns the size the frame (4KB, 2MB or 1GB).
pub const fn size(&self) -> u64 {
match self {
MappedFrame::Size4KiB(_) => Size4KiB::SIZE,
MappedFrame::Size2MiB(_) => Size2MiB::SIZE,
MappedFrame::Size1GiB(_) => Size1GiB::SIZE,
}
}
}
/// A trait for common page table operations on pages of size `S`.
pub trait Mapper<S: PageSize> {
/// Creates a new mapping in the page table.
///
/// This function might need additional physical frames to create new page tables. These
/// frames are allocated from the `allocator` argument. At most three frames are required.
///
/// Parent page table entries are automatically updated with `PRESENT | WRITABLE | USER_ACCESSIBLE`
/// if present in the `PageTableFlags`. Depending on the used mapper implementation
/// the `PRESENT` and `WRITABLE` flags might be set for parent tables,
/// even if they are not set in `PageTableFlags`.
///
/// The `map_to_with_table_flags` method gives explicit control over the parent page table flags.
///
/// ## Safety
///
/// Creating page table mappings is a fundamentally unsafe operation because
/// there are various ways to break memory safety through it. For example,
/// re-mapping an in-use page to a different frame changes and invalidates
/// all values stored in that page, resulting in undefined behavior on the
/// next use.
///
/// The caller must ensure that no undefined behavior or memory safety
/// violations can occur through the new mapping. Among other things, the
/// caller must prevent the following:
///
/// - Aliasing of `&mut` references, i.e. two `&mut` references that point to
/// the same physical address. This is undefined behavior in Rust.
/// - This can be ensured by mapping each page to an individual physical
/// frame that is not mapped anywhere else.
/// - Creating uninitialized or invalid values: Rust requires that all values
/// have a correct memory layout. For example, a `bool` must be either a 0
/// or a 1 in memory, but not a 3 or 4. An exception is the `MaybeUninit`
/// wrapper type, which abstracts over possibly uninitialized memory.
/// - This is only a problem when re-mapping pages to different physical
/// frames. Mapping a page that is not in use yet is fine.
///
/// Special care must be taken when sharing pages with other address spaces,
/// e.g. by setting the `GLOBAL` flag. For example, a global mapping must be
/// the same in all address spaces, otherwise undefined behavior can occur
/// because of TLB races. It's worth noting that all the above requirements
/// also apply to shared mappings, including the aliasing requirements.
///
/// # Examples
///
/// Create a USER_ACCESSIBLE mapping:
///
/// ```
/// # #[cfg(feature = "instructions")]
/// # use x86_64::structures::paging::{
/// # Mapper, Page, PhysFrame, FrameAllocator,
/// # Size4KiB, OffsetPageTable, page_table::PageTableFlags
/// # };
/// # #[cfg(feature = "instructions")]
/// # unsafe fn test(mapper: &mut OffsetPageTable, frame_allocator: &mut impl FrameAllocator<Size4KiB>,
/// # page: Page<Size4KiB>, frame: PhysFrame) {
/// mapper
/// .map_to(
/// page,
/// frame,
/// PageTableFlags::PRESENT
/// | PageTableFlags::WRITABLE
/// | PageTableFlags::USER_ACCESSIBLE,
/// frame_allocator,
/// )
/// .unwrap()
/// .flush();
/// # }
/// ```
#[inline]
unsafe fn map_to<A>(
&mut self,
page: Page<S>,
frame: PhysFrame<S>,
flags: PageTableFlags,
frame_allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError<S>>
where
Self: Sized,
A: FrameAllocator<Size4KiB> + ?Sized,
{
let parent_table_flags = flags
& (PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE);
unsafe {
self.map_to_with_table_flags(page, frame, flags, parent_table_flags, frame_allocator)
}
}
/// Creates a new mapping in the page table.
///
/// This function might need additional physical frames to create new page tables. These
/// frames are allocated from the `allocator` argument. At most three frames are required.
///
/// The flags of the parent table(s) can be explicitly specified. Those flags are used for
/// newly created table entries, and for existing entries the flags are added.
///
/// Depending on the used mapper implementation, the `PRESENT` and `WRITABLE` flags might
/// be set for parent tables, even if they are not specified in `parent_table_flags`.
///
/// ## Safety
///
/// Creating page table mappings is a fundamentally unsafe operation because
/// there are various ways to break memory safety through it. For example,
/// re-mapping an in-use page to a different frame changes and invalidates
/// all values stored in that page, resulting in undefined behavior on the
/// next use.
///
/// The caller must ensure that no undefined behavior or memory safety
/// violations can occur through the new mapping. Among other things, the
/// caller must prevent the following:
///
/// - Aliasing of `&mut` references, i.e. two `&mut` references that point to
/// the same physical address. This is undefined behavior in Rust.
/// - This can be ensured by mapping each page to an individual physical
/// frame that is not mapped anywhere else.
/// - Creating uninitialized or invalid values: Rust requires that all values
/// have a correct memory layout. For example, a `bool` must be either a 0
/// or a 1 in memory, but not a 3 or 4. An exception is the `MaybeUninit`
/// wrapper type, which abstracts over possibly uninitialized memory.
/// - This is only a problem when re-mapping pages to different physical
/// frames. Mapping a page that is not in use yet is fine.
///
/// Special care must be taken when sharing pages with other address spaces,
/// e.g. by setting the `GLOBAL` flag. For example, a global mapping must be
/// the same in all address spaces, otherwise undefined behavior can occur
/// because of TLB races. It's worth noting that all the above requirements
/// also apply to shared mappings, including the aliasing requirements.
///
/// # Examples
///
/// Create USER_ACCESSIBLE | NO_EXECUTE | NO_CACHE mapping and update
/// the top hierarchy only with USER_ACCESSIBLE:
///
/// ```
/// # #[cfg(feature = "instructions")]
/// # use x86_64::structures::paging::{
/// # Mapper, PhysFrame, Page, FrameAllocator,
/// # Size4KiB, OffsetPageTable, page_table::PageTableFlags
/// # };
/// # #[cfg(feature = "instructions")]
/// # unsafe fn test(mapper: &mut OffsetPageTable, frame_allocator: &mut impl FrameAllocator<Size4KiB>,
/// # page: Page<Size4KiB>, frame: PhysFrame) {
/// mapper
/// .map_to_with_table_flags(
/// page,
/// frame,
/// PageTableFlags::PRESENT
/// | PageTableFlags::WRITABLE
/// | PageTableFlags::USER_ACCESSIBLE
/// | PageTableFlags::NO_EXECUTE
/// | PageTableFlags::NO_CACHE,
/// PageTableFlags::USER_ACCESSIBLE,
/// frame_allocator,
/// )
/// .unwrap()
/// .flush();
/// # }
/// ```
unsafe fn map_to_with_table_flags<A>(
&mut self,
page: Page<S>,
frame: PhysFrame<S>,
flags: PageTableFlags,
parent_table_flags: PageTableFlags,
frame_allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError<S>>
where
Self: Sized,
A: FrameAllocator<Size4KiB> + ?Sized;
/// Removes a mapping from the page table and returns the frame that used to be mapped.
///
/// Note that no page tables or pages are deallocated.
fn unmap(&mut self, page: Page<S>) -> Result<(PhysFrame<S>, MapperFlush<S>), UnmapError>;
/// Updates the flags of an existing mapping.
///
/// To read the current flags of a mapped page, use the [`Translate::translate`] method.
///
/// ## Safety
///
/// This method is unsafe because changing the flags of a mapping
/// might result in undefined behavior. For example, setting the
/// `GLOBAL` and `WRITABLE` flags for a page might result in the corruption
/// of values stored in that page from processes running in other address
/// spaces.
unsafe fn update_flags(
&mut self,
page: Page<S>,
flags: PageTableFlags,
) -> Result<MapperFlush<S>, FlagUpdateError>;
/// Set the flags of an existing page level 4 table entry
///
/// ## Safety
///
/// This method is unsafe because changing the flags of a mapping
/// might result in undefined behavior. For example, setting the
/// `GLOBAL` and `WRITABLE` flags for a page might result in the corruption
/// of values stored in that page from processes running in other address
/// spaces.
unsafe fn set_flags_p4_entry(
&mut self,
page: Page<S>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError>;
/// Set the flags of an existing page table level 3 entry
///
/// ## Safety
///
/// This method is unsafe because changing the flags of a mapping
/// might result in undefined behavior. For example, setting the
/// `GLOBAL` and `WRITABLE` flags for a page might result in the corruption
/// of values stored in that page from processes running in other address
/// spaces.
unsafe fn set_flags_p3_entry(
&mut self,
page: Page<S>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError>;
/// Set the flags of an existing page table level 2 entry
///
/// ## Safety
///
/// This method is unsafe because changing the flags of a mapping
/// might result in undefined behavior. For example, setting the
/// `GLOBAL` and `WRITABLE` flags for a page might result in the corruption
/// of values stored in that page from processes running in other address
/// spaces.
unsafe fn set_flags_p2_entry(
&mut self,
page: Page<S>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError>;
/// Return the frame that the specified page is mapped to.
///
/// This function assumes that the page is mapped to a frame of size `S` and returns an
/// error otherwise.
fn translate_page(&self, page: Page<S>) -> Result<PhysFrame<S>, TranslateError>;
/// Maps the given frame to the virtual page with the same address.
///
/// ## Safety
///
/// This is a convencience function that invokes [`Mapper::map_to`] internally, so
/// all safety requirements of it also apply for this function.
#[inline]
unsafe fn identity_map<A>(
&mut self,
frame: PhysFrame<S>,
flags: PageTableFlags,
frame_allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError<S>>
where
Self: Sized,
A: FrameAllocator<Size4KiB> + ?Sized,
S: PageSize,
Self: Mapper<S>,
{
let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
unsafe { self.map_to(page, frame, flags, frame_allocator) }
}
}
/// This type represents a page whose mapping has changed in the page table.
///
/// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs
/// to be flushed from the TLB before it's accessed. This type is returned from a function that
/// changed the mapping of a page to ensure that the TLB flush is not forgotten.
#[derive(Debug)]
#[must_use = "Page Table changes must be flushed or ignored."]
pub struct MapperFlush<S: PageSize>(Page<S>);
impl<S: PageSize> MapperFlush<S> {
/// Create a new flush promise
///
/// Note that this method is intended for implementing the [`Mapper`] trait and no other uses
/// are expected.
#[inline]
pub fn new(page: Page<S>) -> Self {
MapperFlush(page)
}
/// Flush the page from the TLB to ensure that the newest mapping is used.
#[cfg(feature = "instructions")]
#[inline]
pub fn flush(self) {
crate::instructions::tlb::flush(self.0.start_address());
}
/// Don't flush the TLB and silence the “must be used” warning.
#[inline]
pub fn ignore(self) {}
}
/// This type represents a change of a page table requiring a complete TLB flush
///
/// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs
/// to be flushed from the TLB before it's accessed. This type is returned from a function that
/// made the change to ensure that the TLB flush is not forgotten.
#[derive(Debug, Default)]
#[must_use = "Page Table changes must be flushed or ignored."]
pub struct MapperFlushAll(());
impl MapperFlushAll {
/// Create a new flush promise
///
/// Note that this method is intended for implementing the [`Mapper`] trait and no other uses
/// are expected.
#[inline]
pub fn new() -> Self {
MapperFlushAll(())
}
/// Flush all pages from the TLB to ensure that the newest mapping is used.
#[cfg(feature = "instructions")]
#[inline]
pub fn flush_all(self) {
crate::instructions::tlb::flush_all()
}
/// Don't flush the TLB and silence the “must be used” warning.
#[inline]
pub fn ignore(self) {}
}
/// This error is returned from `map_to` and similar methods.
#[derive(Debug)]
pub enum MapToError<S: PageSize> {
/// An additional frame was needed for the mapping process, but the frame allocator
/// returned `None`.
FrameAllocationFailed,
/// An upper level page table entry has the `HUGE_PAGE` flag set, which means that the
/// given page is part of an already mapped huge page.
ParentEntryHugePage,
/// The given page is already mapped to a physical frame.
PageAlreadyMapped(PhysFrame<S>),
}
/// An error indicating that an `unmap` call failed.
#[derive(Debug)]
pub enum UnmapError {
/// An upper level page table entry has the `HUGE_PAGE` flag set, which means that the
/// given page is part of a huge page and can't be freed individually.
ParentEntryHugePage,
/// The given page is not mapped to a physical frame.
PageNotMapped,
/// The page table entry for the given page points to an invalid physical address.
InvalidFrameAddress(PhysAddr),
}
/// An error indicating that an `update_flags` call failed.
#[derive(Debug)]
pub enum FlagUpdateError {
/// The given page is not mapped to a physical frame.
PageNotMapped,
/// An upper level page table entry has the `HUGE_PAGE` flag set, which means that the
/// given page is part of a huge page and can't be freed individually.
ParentEntryHugePage,
}
/// An error indicating that an `translate` call failed.
#[derive(Debug)]
pub enum TranslateError {
/// The given page is not mapped to a physical frame.
PageNotMapped,
/// An upper level page table entry has the `HUGE_PAGE` flag set, which means that the
/// given page is part of a huge page and can't be freed individually.
ParentEntryHugePage,
/// The page table entry for the given page points to an invalid physical address.
InvalidFrameAddress(PhysAddr),
}
static _ASSERT_OBJECT_SAFE: Option<&(dyn Translate + Sync)> = None;
/// Provides methods for cleaning up unused entries.
pub trait CleanUp {
/// Remove all empty P1-P3 tables
///
/// ## Safety
///
/// The caller has to guarantee that it's safe to free page table frames:
/// All page table frames must only be used once and only in this page table
/// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table).
unsafe fn clean_up<D>(&mut self, frame_deallocator: &mut D)
where
D: FrameDeallocator<Size4KiB>;
/// Remove all empty P1-P3 tables in a certain range
/// ```
/// # use core::ops::RangeInclusive;
/// # use x86_64::{VirtAddr, structures::paging::{
/// # FrameDeallocator, Size4KiB, mapper::CleanUp, page::Page,
/// # }};
/// # unsafe fn test(page_table: &mut impl CleanUp, frame_deallocator: &mut impl FrameDeallocator<Size4KiB>) {
/// // clean up all page tables in the lower half of the address space
/// let lower_half = Page::range_inclusive(
/// Page::containing_address(VirtAddr::new(0)),
/// Page::containing_address(VirtAddr::new(0x0000_7fff_ffff_ffff)),
/// );
/// page_table.clean_up_addr_range(lower_half, frame_deallocator);
/// # }
/// ```
///
/// ## Safety
///
/// The caller has to guarantee that it's safe to free page table frames:
/// All page table frames must only be used once and only in this page table
/// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table).
unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>;
}

View File

@ -0,0 +1,304 @@
#![cfg(target_pointer_width = "64")]
use crate::structures::paging::{
frame::PhysFrame, mapper::*, page::PageRangeInclusive, page_table::PageTable, FrameDeallocator,
Page, PageTableFlags,
};
/// A Mapper implementation that requires that the complete physically memory is mapped at some
/// offset in the virtual address space.
#[derive(Debug)]
pub struct OffsetPageTable<'a> {
inner: MappedPageTable<'a, PhysOffset>,
}
impl<'a> OffsetPageTable<'a> {
/// Creates a new `OffsetPageTable` that uses the given offset for converting virtual
/// to physical addresses.
///
/// The complete physical memory must be mapped in the virtual address space starting at
/// address `phys_offset`. This means that for example physical address `0x5000` can be
/// accessed through virtual address `phys_offset + 0x5000`. This mapping is required because
/// the mapper needs to access page tables, which are not mapped into the virtual address
/// space by default.
///
/// ## Safety
///
/// This function is unsafe because the caller must guarantee that the passed `phys_offset`
/// is correct. Also, the passed `level_4_table` must point to the level 4 page table
/// of a valid page table hierarchy. Otherwise this function might break memory safety, e.g.
/// by writing to an illegal memory location.
#[inline]
pub unsafe fn new(level_4_table: &'a mut PageTable, phys_offset: VirtAddr) -> Self {
let phys_offset = PhysOffset {
offset: phys_offset,
};
Self {
inner: unsafe { MappedPageTable::new(level_4_table, phys_offset) },
}
}
/// Returns an immutable reference to the wrapped level 4 `PageTable` instance.
pub fn level_4_table(&self) -> &PageTable {
self.inner.level_4_table()
}
/// Returns a mutable reference to the wrapped level 4 `PageTable` instance.
pub fn level_4_table_mut(&mut self) -> &mut PageTable {
self.inner.level_4_table_mut()
}
/// Returns the offset used for converting virtual to physical addresses.
pub fn phys_offset(&self) -> VirtAddr {
self.inner.page_table_frame_mapping().offset
}
}
#[derive(Debug)]
struct PhysOffset {
offset: VirtAddr,
}
unsafe impl PageTableFrameMapping for PhysOffset {
fn frame_to_pointer(&self, frame: PhysFrame) -> *mut PageTable {
let virt = self.offset + frame.start_address().as_u64();
virt.as_mut_ptr()
}
}
// delegate all trait implementations to inner
impl<'a> Mapper<Size1GiB> for OffsetPageTable<'a> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
page: Page<Size1GiB>,
frame: PhysFrame<Size1GiB>,
flags: PageTableFlags,
parent_table_flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size1GiB>, MapToError<Size1GiB>>
where
A: FrameAllocator<Size4KiB> + ?Sized,
{
unsafe {
self.inner
.map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator)
}
}
#[inline]
fn unmap(
&mut self,
page: Page<Size1GiB>,
) -> Result<(PhysFrame<Size1GiB>, MapperFlush<Size1GiB>), UnmapError> {
self.inner.unmap(page)
}
#[inline]
unsafe fn update_flags(
&mut self,
page: Page<Size1GiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size1GiB>, FlagUpdateError> {
unsafe { self.inner.update_flags(page, flags) }
}
#[inline]
unsafe fn set_flags_p4_entry(
&mut self,
page: Page<Size1GiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
unsafe { self.inner.set_flags_p4_entry(page, flags) }
}
#[inline]
unsafe fn set_flags_p3_entry(
&mut self,
page: Page<Size1GiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
unsafe { self.inner.set_flags_p3_entry(page, flags) }
}
#[inline]
unsafe fn set_flags_p2_entry(
&mut self,
page: Page<Size1GiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
unsafe { self.inner.set_flags_p2_entry(page, flags) }
}
#[inline]
fn translate_page(&self, page: Page<Size1GiB>) -> Result<PhysFrame<Size1GiB>, TranslateError> {
self.inner.translate_page(page)
}
}
impl<'a> Mapper<Size2MiB> for OffsetPageTable<'a> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
page: Page<Size2MiB>,
frame: PhysFrame<Size2MiB>,
flags: PageTableFlags,
parent_table_flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size2MiB>, MapToError<Size2MiB>>
where
A: FrameAllocator<Size4KiB> + ?Sized,
{
unsafe {
self.inner
.map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator)
}
}
#[inline]
fn unmap(
&mut self,
page: Page<Size2MiB>,
) -> Result<(PhysFrame<Size2MiB>, MapperFlush<Size2MiB>), UnmapError> {
self.inner.unmap(page)
}
#[inline]
unsafe fn update_flags(
&mut self,
page: Page<Size2MiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size2MiB>, FlagUpdateError> {
unsafe { self.inner.update_flags(page, flags) }
}
#[inline]
unsafe fn set_flags_p4_entry(
&mut self,
page: Page<Size2MiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
unsafe { self.inner.set_flags_p4_entry(page, flags) }
}
#[inline]
unsafe fn set_flags_p3_entry(
&mut self,
page: Page<Size2MiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
unsafe { self.inner.set_flags_p3_entry(page, flags) }
}
#[inline]
unsafe fn set_flags_p2_entry(
&mut self,
page: Page<Size2MiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
unsafe { self.inner.set_flags_p2_entry(page, flags) }
}
#[inline]
fn translate_page(&self, page: Page<Size2MiB>) -> Result<PhysFrame<Size2MiB>, TranslateError> {
self.inner.translate_page(page)
}
}
impl<'a> Mapper<Size4KiB> for OffsetPageTable<'a> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
page: Page<Size4KiB>,
frame: PhysFrame<Size4KiB>,
flags: PageTableFlags,
parent_table_flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size4KiB>, MapToError<Size4KiB>>
where
A: FrameAllocator<Size4KiB> + ?Sized,
{
unsafe {
self.inner
.map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator)
}
}
#[inline]
fn unmap(
&mut self,
page: Page<Size4KiB>,
) -> Result<(PhysFrame<Size4KiB>, MapperFlush<Size4KiB>), UnmapError> {
self.inner.unmap(page)
}
#[inline]
unsafe fn update_flags(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size4KiB>, FlagUpdateError> {
unsafe { self.inner.update_flags(page, flags) }
}
#[inline]
unsafe fn set_flags_p4_entry(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
unsafe { self.inner.set_flags_p4_entry(page, flags) }
}
#[inline]
unsafe fn set_flags_p3_entry(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
unsafe { self.inner.set_flags_p3_entry(page, flags) }
}
#[inline]
unsafe fn set_flags_p2_entry(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlushAll, FlagUpdateError> {
unsafe { self.inner.set_flags_p2_entry(page, flags) }
}
#[inline]
fn translate_page(&self, page: Page<Size4KiB>) -> Result<PhysFrame<Size4KiB>, TranslateError> {
self.inner.translate_page(page)
}
}
impl<'a> Translate for OffsetPageTable<'a> {
#[inline]
fn translate(&self, addr: VirtAddr) -> TranslateResult {
self.inner.translate(addr)
}
}
impl<'a> CleanUp for OffsetPageTable<'a> {
#[inline]
unsafe fn clean_up<D>(&mut self, frame_deallocator: &mut D)
where
D: FrameDeallocator<Size4KiB>,
{
unsafe { self.inner.clean_up(frame_deallocator) }
}
#[inline]
unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>,
{
unsafe { self.inner.clean_up_addr_range(range, frame_deallocator) }
}
}

View File

@ -0,0 +1,23 @@
//! Abstractions for page tables and other paging related structures.
//!
//! Page tables translate virtual memory “pages” to physical memory “frames”.
pub use self::frame::PhysFrame;
pub use self::frame_alloc::{FrameAllocator, FrameDeallocator};
#[doc(no_inline)]
pub use self::mapper::MappedPageTable;
#[cfg(target_pointer_width = "64")]
#[doc(no_inline)]
pub use self::mapper::OffsetPageTable;
#[cfg(feature = "instructions")]
#[doc(no_inline)]
pub use self::mapper::RecursivePageTable;
pub use self::mapper::{Mapper, Translate};
pub use self::page::{Page, PageSize, Size1GiB, Size2MiB, Size4KiB};
pub use self::page_table::{PageOffset, PageTable, PageTableFlags, PageTableIndex};
pub mod frame;
mod frame_alloc;
pub mod mapper;
pub mod page;
pub mod page_table;

View File

@ -0,0 +1,462 @@
//! Abstractions for default-sized and huge virtual memory pages.
use crate::sealed::Sealed;
use crate::structures::paging::page_table::PageTableLevel;
use crate::structures::paging::PageTableIndex;
use crate::VirtAddr;
use core::fmt;
#[cfg(feature = "step_trait")]
use core::iter::Step;
use core::marker::PhantomData;
use core::ops::{Add, AddAssign, Sub, SubAssign};
/// Trait for abstracting over the three possible page sizes on x86_64, 4KiB, 2MiB, 1GiB.
pub trait PageSize: Copy + Eq + PartialOrd + Ord + Sealed {
/// The page size in bytes.
const SIZE: u64;
/// A string representation of the page size for debug output.
const DEBUG_STR: &'static str;
}
/// This trait is implemented for 4KiB and 2MiB pages, but not for 1GiB pages.
pub trait NotGiantPageSize: PageSize {}
/// A standard 4KiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size4KiB {}
/// A “huge” 2MiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size2MiB {}
/// A “giant” 1GiB page.
///
/// (Only available on newer x86_64 CPUs.)
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size1GiB {}
impl PageSize for Size4KiB {
const SIZE: u64 = 4096;
const DEBUG_STR: &'static str = "4KiB";
}
impl NotGiantPageSize for Size4KiB {}
impl Sealed for super::Size4KiB {}
impl PageSize for Size2MiB {
const SIZE: u64 = Size4KiB::SIZE * 512;
const DEBUG_STR: &'static str = "2MiB";
}
impl NotGiantPageSize for Size2MiB {}
impl Sealed for super::Size2MiB {}
impl PageSize for Size1GiB {
const SIZE: u64 = Size2MiB::SIZE * 512;
const DEBUG_STR: &'static str = "1GiB";
}
impl Sealed for super::Size1GiB {}
/// A virtual memory page.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(C)]
pub struct Page<S: PageSize = Size4KiB> {
start_address: VirtAddr,
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// The page size in bytes.
pub const SIZE: u64 = S::SIZE;
/// Returns the page that starts at the given virtual address.
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid page start).
#[inline]
pub const fn from_start_address(address: VirtAddr) -> Result<Self, AddressNotAligned> {
if !address.is_aligned_u64(S::SIZE) {
return Err(AddressNotAligned);
}
Ok(Page::containing_address(address))
}
/// Returns the page that starts at the given virtual address.
///
/// ## Safety
///
/// The address must be correctly aligned.
#[inline]
pub const unsafe fn from_start_address_unchecked(start_address: VirtAddr) -> Self {
Page {
start_address,
size: PhantomData,
}
}
/// Returns the page that contains the given virtual address.
#[inline]
pub const fn containing_address(address: VirtAddr) -> Self {
Page {
start_address: address.align_down_u64(S::SIZE),
size: PhantomData,
}
}
/// Returns the start address of the page.
#[inline]
pub const fn start_address(self) -> VirtAddr {
self.start_address
}
/// Returns the size the page (4KB, 2MB or 1GB).
#[inline]
pub const fn size(self) -> u64 {
S::SIZE
}
/// Returns the level 4 page table index of this page.
#[inline]
pub const fn p4_index(self) -> PageTableIndex {
self.start_address().p4_index()
}
/// Returns the level 3 page table index of this page.
#[inline]
pub const fn p3_index(self) -> PageTableIndex {
self.start_address().p3_index()
}
/// Returns the table index of this page at the specified level.
#[inline]
pub const fn page_table_index(self, level: PageTableLevel) -> PageTableIndex {
self.start_address().page_table_index(level)
}
/// Returns a range of pages, exclusive `end`.
#[inline]
pub const fn range(start: Self, end: Self) -> PageRange<S> {
PageRange { start, end }
}
/// Returns a range of pages, inclusive `end`.
#[inline]
pub const fn range_inclusive(start: Self, end: Self) -> PageRangeInclusive<S> {
PageRangeInclusive { start, end }
}
// FIXME: Move this into the `Step` impl, once `Step` is stabilized.
pub(crate) fn steps_between_impl(start: &Self, end: &Self) -> Option<usize> {
VirtAddr::steps_between_impl(&start.start_address, &end.start_address)
.map(|steps| steps / S::SIZE as usize)
}
// FIXME: Move this into the `Step` impl, once `Step` is stabilized.
pub(crate) fn forward_checked_impl(start: Self, count: usize) -> Option<Self> {
let count = count.checked_mul(S::SIZE as usize)?;
let start_address = VirtAddr::forward_checked_impl(start.start_address, count)?;
Some(Self {
start_address,
size: PhantomData,
})
}
}
impl<S: NotGiantPageSize> Page<S> {
/// Returns the level 2 page table index of this page.
#[inline]
pub const fn p2_index(self) -> PageTableIndex {
self.start_address().p2_index()
}
}
impl Page<Size1GiB> {
/// Returns the 1GiB memory page with the specified page table indices.
#[inline]
pub const fn from_page_table_indices_1gib(
p4_index: PageTableIndex,
p3_index: PageTableIndex,
) -> Self {
let mut addr = 0;
addr |= p4_index.into_u64() << 39;
addr |= p3_index.into_u64() << 30;
Page::containing_address(VirtAddr::new_truncate(addr))
}
}
impl Page<Size2MiB> {
/// Returns the 2MiB memory page with the specified page table indices.
#[inline]
pub const fn from_page_table_indices_2mib(
p4_index: PageTableIndex,
p3_index: PageTableIndex,
p2_index: PageTableIndex,
) -> Self {
let mut addr = 0;
addr |= p4_index.into_u64() << 39;
addr |= p3_index.into_u64() << 30;
addr |= p2_index.into_u64() << 21;
Page::containing_address(VirtAddr::new_truncate(addr))
}
}
impl Page<Size4KiB> {
/// Returns the 4KiB memory page with the specified page table indices.
#[inline]
pub const fn from_page_table_indices(
p4_index: PageTableIndex,
p3_index: PageTableIndex,
p2_index: PageTableIndex,
p1_index: PageTableIndex,
) -> Self {
let mut addr = 0;
addr |= p4_index.into_u64() << 39;
addr |= p3_index.into_u64() << 30;
addr |= p2_index.into_u64() << 21;
addr |= p1_index.into_u64() << 12;
Page::containing_address(VirtAddr::new_truncate(addr))
}
/// Returns the level 1 page table index of this page.
#[inline]
pub const fn p1_index(self) -> PageTableIndex {
self.start_address.p1_index()
}
}
impl<S: PageSize> fmt::Debug for Page<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"Page[{}]({:#x})",
S::DEBUG_STR,
self.start_address().as_u64()
))
}
}
impl<S: PageSize> Add<u64> for Page<S> {
type Output = Self;
#[inline]
fn add(self, rhs: u64) -> Self::Output {
Page::containing_address(self.start_address() + rhs * S::SIZE)
}
}
impl<S: PageSize> AddAssign<u64> for Page<S> {
#[inline]
fn add_assign(&mut self, rhs: u64) {
*self = *self + rhs;
}
}
impl<S: PageSize> Sub<u64> for Page<S> {
type Output = Self;
#[inline]
fn sub(self, rhs: u64) -> Self::Output {
Page::containing_address(self.start_address() - rhs * S::SIZE)
}
}
impl<S: PageSize> SubAssign<u64> for Page<S> {
#[inline]
fn sub_assign(&mut self, rhs: u64) {
*self = *self - rhs;
}
}
impl<S: PageSize> Sub<Self> for Page<S> {
type Output = u64;
#[inline]
fn sub(self, rhs: Self) -> Self::Output {
(self.start_address - rhs.start_address) / S::SIZE
}
}
#[cfg(feature = "step_trait")]
impl<S: PageSize> Step for Page<S> {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
Self::steps_between_impl(start, end)
}
fn forward_checked(start: Self, count: usize) -> Option<Self> {
Self::forward_checked_impl(start, count)
}
fn backward_checked(start: Self, count: usize) -> Option<Self> {
let count = count.checked_mul(S::SIZE as usize)?;
let start_address = Step::backward_checked(start.start_address, count)?;
Some(Self {
start_address,
size: PhantomData,
})
}
}
/// A range of pages with exclusive upper bound.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
pub struct PageRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: Page<S>,
/// The end of the range, exclusive.
pub end: Page<S>,
}
impl<S: PageSize> PageRange<S> {
/// Returns wether this range contains no pages.
#[inline]
pub fn is_empty(&self) -> bool {
self.start >= self.end
}
}
impl<S: PageSize> Iterator for PageRange<S> {
type Item = Page<S>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.start < self.end {
let page = self.start;
self.start += 1;
Some(page)
} else {
None
}
}
}
impl PageRange<Size2MiB> {
/// Converts the range of 2MiB pages to a range of 4KiB pages.
#[inline]
pub fn as_4kib_page_range(self) -> PageRange<Size4KiB> {
PageRange {
start: Page::containing_address(self.start.start_address()),
end: Page::containing_address(self.end.start_address()),
}
}
}
impl<S: PageSize> fmt::Debug for PageRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// A range of pages with inclusive upper bound.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
pub struct PageRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: Page<S>,
/// The end of the range, inclusive.
pub end: Page<S>,
}
impl<S: PageSize> PageRangeInclusive<S> {
/// Returns whether this range contains no pages.
#[inline]
pub fn is_empty(&self) -> bool {
self.start > self.end
}
}
impl<S: PageSize> Iterator for PageRangeInclusive<S> {
type Item = Page<S>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.start <= self.end {
let page = self.start;
// If the end of the inclusive range is the maximum page possible for size S,
// incrementing start until it is greater than the end will cause an integer overflow.
// So instead, in that case we decrement end rather than incrementing start.
let max_page_addr = VirtAddr::new(u64::MAX) - (S::SIZE - 1);
if self.start.start_address() < max_page_addr {
self.start += 1;
} else {
self.end -= 1;
}
Some(page)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PageRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// The given address was not sufficiently aligned.
#[derive(Debug)]
pub struct AddressNotAligned;
impl fmt::Display for AddressNotAligned {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "the given address was not sufficiently aligned")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn test_page_ranges() {
let page_size = Size4KiB::SIZE;
let number = 1000;
let start_addr = VirtAddr::new(0xdead_beaf);
let start: Page = Page::containing_address(start_addr);
let end = start + number;
let mut range = Page::range(start, end);
for i in 0..number {
assert_eq!(
range.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range.next(), None);
let mut range_inclusive = Page::range_inclusive(start, end);
for i in 0..=number {
assert_eq!(
range_inclusive.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range_inclusive.next(), None);
}
#[test]
pub fn test_page_range_inclusive_overflow() {
let page_size = Size4KiB::SIZE;
let number = 1000;
let start_addr = VirtAddr::new(u64::MAX).align_down(page_size) - number * page_size;
let start: Page = Page::containing_address(start_addr);
let end = start + number;
let mut range_inclusive = Page::range_inclusive(start, end);
for i in 0..=number {
assert_eq!(
range_inclusive.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range_inclusive.next(), None);
}
}

View File

@ -0,0 +1,460 @@
//! Abstractions for page tables and page table entries.
use core::fmt;
#[cfg(feature = "step_trait")]
use core::iter::Step;
use core::ops::{Index, IndexMut};
use super::{PageSize, PhysFrame, Size4KiB};
use crate::addr::PhysAddr;
use bitflags::bitflags;
/// The error returned by the `PageTableEntry::frame` method.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum FrameError {
/// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
FrameNotPresent,
/// The entry does have the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
/// as return type, so a huge frame can't be returned.
HugeFrame,
}
/// A 64-bit page table entry.
#[derive(Clone)]
#[repr(transparent)]
pub struct PageTableEntry {
entry: u64,
}
impl PageTableEntry {
/// Creates an unused page table entry.
#[inline]
pub const fn new() -> Self {
PageTableEntry { entry: 0 }
}
/// Returns whether this entry is zero.
#[inline]
pub const fn is_unused(&self) -> bool {
self.entry == 0
}
/// Sets this entry to zero.
#[inline]
pub fn set_unused(&mut self) {
self.entry = 0;
}
/// Returns the flags of this entry.
#[inline]
pub const fn flags(&self) -> PageTableFlags {
PageTableFlags::from_bits_truncate(self.entry)
}
/// Returns the physical address mapped by this entry, might be zero.
#[inline]
pub fn addr(&self) -> PhysAddr {
PhysAddr::new(self.entry & 0x000f_ffff_ffff_f000)
}
/// Returns the physical frame mapped by this entry.
///
/// Returns the following errors:
///
/// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
/// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
/// `addr` function must be used)
#[inline]
pub fn frame(&self) -> Result<PhysFrame, FrameError> {
if !self.flags().contains(PageTableFlags::PRESENT) {
Err(FrameError::FrameNotPresent)
} else if self.flags().contains(PageTableFlags::HUGE_PAGE) {
Err(FrameError::HugeFrame)
} else {
Ok(PhysFrame::containing_address(self.addr()))
}
}
/// Map the entry to the specified physical address with the specified flags.
#[inline]
pub fn set_addr(&mut self, addr: PhysAddr, flags: PageTableFlags) {
assert!(addr.is_aligned(Size4KiB::SIZE));
self.entry = (addr.as_u64()) | flags.bits();
}
/// Map the entry to the specified physical frame with the specified flags.
#[inline]
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
assert!(!flags.contains(PageTableFlags::HUGE_PAGE));
self.set_addr(frame.start_address(), flags)
}
/// Sets the flags of this entry.
#[inline]
pub fn set_flags(&mut self, flags: PageTableFlags) {
self.entry = self.addr().as_u64() | flags.bits();
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("addr", &self.addr());
f.field("flags", &self.flags());
f.finish()
}
}
bitflags! {
/// Possible flags for a page table entry.
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
pub struct PageTableFlags: u64 {
/// Specifies whether the mapped frame or page table is loaded in memory.
const PRESENT = 1;
/// Controls whether writes to the mapped frames are allowed.
///
/// If this bit is unset in a level 1 page table entry, the mapped frame is read-only.
/// If this bit is unset in a higher level page table entry the complete range of mapped
/// pages is read-only.
const WRITABLE = 1 << 1;
/// Controls whether accesses from userspace (i.e. ring 3) are permitted.
const USER_ACCESSIBLE = 1 << 2;
/// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
/// policy is used.
const WRITE_THROUGH = 1 << 3;
/// Disables caching for the pointed entry is cacheable.
const NO_CACHE = 1 << 4;
/// Set by the CPU when the mapped frame or page table is accessed.
const ACCESSED = 1 << 5;
/// Set by the CPU on a write to the mapped frame.
const DIRTY = 1 << 6;
/// Specifies that the entry maps a huge frame instead of a page table. Only allowed in
/// P2 or P3 tables.
const HUGE_PAGE = 1 << 7;
/// Indicates that the mapping is present in all address spaces, so it isn't flushed from
/// the TLB on an address space switch.
const GLOBAL = 1 << 8;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_9 = 1 << 9;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_10 = 1 << 10;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_11 = 1 << 11;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_52 = 1 << 52;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_53 = 1 << 53;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_54 = 1 << 54;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_55 = 1 << 55;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_56 = 1 << 56;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_57 = 1 << 57;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_58 = 1 << 58;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_59 = 1 << 59;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_60 = 1 << 60;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_61 = 1 << 61;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_62 = 1 << 62;
/// Forbid code execution from the mapped frames.
///
/// Can be only used when the no-execute page protection feature is enabled in the EFER
/// register.
const NO_EXECUTE = 1 << 63;
}
}
/// The number of entries in a page table.
const ENTRY_COUNT: usize = 512;
/// Represents a page table.
///
/// Always page-sized.
///
/// This struct implements the `Index` and `IndexMut` traits, so the entries can be accessed
/// through index operations. For example, `page_table[15]` returns the 16th page table entry.
///
/// Note that while this type implements [`Clone`], the users must be careful not to introduce
/// mutable aliasing by using the cloned page tables.
#[repr(align(4096))]
#[repr(C)]
#[derive(Clone)]
pub struct PageTable {
entries: [PageTableEntry; ENTRY_COUNT],
}
impl PageTable {
/// Creates an empty page table.
#[inline]
pub const fn new() -> Self {
const EMPTY: PageTableEntry = PageTableEntry::new();
PageTable {
entries: [EMPTY; ENTRY_COUNT],
}
}
/// Clears all entries.
#[inline]
pub fn zero(&mut self) {
for entry in self.iter_mut() {
entry.set_unused();
}
}
/// Returns an iterator over the entries of the page table.
#[inline]
pub fn iter(&self) -> impl Iterator<Item = &PageTableEntry> {
(0..512).map(move |i| &self.entries[i])
}
/// Returns an iterator that allows modifying the entries of the page table.
#[inline]
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
// Note that we intentionally don't just return `self.entries.iter()`:
// Some users may choose to create a reference to a page table at
// `0xffff_ffff_ffff_f000`. This causes problems because calculating
// the end pointer of the page tables causes an overflow. Therefore
// creating page tables at that address is unsound and must be avoided.
// Unfortunately creating such page tables is quite common when
// recursive page tables are used, so we try to avoid calculating the
// end pointer if possible. `core::slice::Iter` calculates the end
// pointer to determine when it should stop yielding elements. Because
// we want to avoid calculating the end pointer, we don't use
// `core::slice::Iter`, we implement our own iterator that doesn't
// calculate the end pointer. This doesn't make creating page tables at
// that address sound, but it avoids some easy to trigger
// miscompilations.
let ptr = self.entries.as_mut_ptr();
(0..512).map(move |i| unsafe { &mut *ptr.add(i) })
}
/// Checks if the page table is empty (all entries are zero).
#[inline]
pub fn is_empty(&self) -> bool {
self.iter().all(|entry| entry.is_unused())
}
}
impl Index<usize> for PageTable {
type Output = PageTableEntry;
#[inline]
fn index(&self, index: usize) -> &Self::Output {
&self.entries[index]
}
}
impl IndexMut<usize> for PageTable {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.entries[index]
}
}
impl Index<PageTableIndex> for PageTable {
type Output = PageTableEntry;
#[inline]
fn index(&self, index: PageTableIndex) -> &Self::Output {
&self.entries[usize::from(index)]
}
}
impl IndexMut<PageTableIndex> for PageTable {
#[inline]
fn index_mut(&mut self, index: PageTableIndex) -> &mut Self::Output {
&mut self.entries[usize::from(index)]
}
}
impl Default for PageTable {
fn default() -> Self {
Self::new()
}
}
impl fmt::Debug for PageTable {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.entries[..].fmt(f)
}
}
/// A 9-bit index into a page table.
///
/// Can be used to select one of the 512 entries of a page table.
///
/// Guaranteed to only ever contain 0..512.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PageTableIndex(u16);
impl PageTableIndex {
/// Creates a new index from the given `u16`. Panics if the given value is >=512.
#[inline]
pub const fn new(index: u16) -> Self {
assert!((index as usize) < ENTRY_COUNT);
Self(index)
}
/// Creates a new index from the given `u16`. Throws away bits if the value is >=512.
#[inline]
pub const fn new_truncate(index: u16) -> Self {
Self(index % ENTRY_COUNT as u16)
}
#[inline]
pub(crate) const fn into_u64(self) -> u64 {
self.0 as u64
}
}
impl From<PageTableIndex> for u16 {
#[inline]
fn from(index: PageTableIndex) -> Self {
index.0
}
}
impl From<PageTableIndex> for u32 {
#[inline]
fn from(index: PageTableIndex) -> Self {
u32::from(index.0)
}
}
impl From<PageTableIndex> for u64 {
#[inline]
fn from(index: PageTableIndex) -> Self {
index.into_u64()
}
}
impl From<PageTableIndex> for usize {
#[inline]
fn from(index: PageTableIndex) -> Self {
usize::from(index.0)
}
}
#[cfg(feature = "step_trait")]
impl Step for PageTableIndex {
#[inline]
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
end.0.checked_sub(start.0).map(usize::from)
}
#[inline]
fn forward_checked(start: Self, count: usize) -> Option<Self> {
let idx = usize::from(start).checked_add(count)?;
(idx < ENTRY_COUNT).then(|| Self::new(idx as u16))
}
#[inline]
fn backward_checked(start: Self, count: usize) -> Option<Self> {
let idx = usize::from(start).checked_sub(count)?;
Some(Self::new(idx as u16))
}
}
/// A 12-bit offset into a 4KiB Page.
///
/// This type is returned by the `VirtAddr::page_offset` method.
///
/// Guaranteed to only ever contain 0..4096.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PageOffset(u16);
impl PageOffset {
/// Creates a new offset from the given `u16`. Panics if the passed value is >=4096.
#[inline]
pub fn new(offset: u16) -> Self {
assert!(offset < (1 << 12));
Self(offset)
}
/// Creates a new offset from the given `u16`. Throws away bits if the value is >=4096.
#[inline]
pub const fn new_truncate(offset: u16) -> Self {
Self(offset % (1 << 12))
}
}
impl From<PageOffset> for u16 {
#[inline]
fn from(offset: PageOffset) -> Self {
offset.0
}
}
impl From<PageOffset> for u32 {
#[inline]
fn from(offset: PageOffset) -> Self {
u32::from(offset.0)
}
}
impl From<PageOffset> for u64 {
#[inline]
fn from(offset: PageOffset) -> Self {
u64::from(offset.0)
}
}
impl From<PageOffset> for usize {
#[inline]
fn from(offset: PageOffset) -> Self {
usize::from(offset.0)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
/// A value between 1 and 4.
pub enum PageTableLevel {
/// Represents the level for a page table.
One = 1,
/// Represents the level for a page directory.
Two,
/// Represents the level for a page-directory pointer.
Three,
/// Represents the level for a page-map level-4.
Four,
}
impl PageTableLevel {
/// Returns the next lower level or `None` for level 1
pub const fn next_lower_level(self) -> Option<Self> {
match self {
PageTableLevel::Four => Some(PageTableLevel::Three),
PageTableLevel::Three => Some(PageTableLevel::Two),
PageTableLevel::Two => Some(PageTableLevel::One),
PageTableLevel::One => None,
}
}
/// Returns the next higher level or `None` for level 4
pub const fn next_higher_level(self) -> Option<Self> {
match self {
PageTableLevel::Four => None,
PageTableLevel::Three => Some(PageTableLevel::Four),
PageTableLevel::Two => Some(PageTableLevel::Three),
PageTableLevel::One => Some(PageTableLevel::Two),
}
}
/// Returns the alignment for the address space described by a table of this level.
pub const fn table_address_space_alignment(self) -> u64 {
1u64 << (self as u8 * 9 + 12)
}
/// Returns the alignment for the address space described by an entry in a table of this level.
pub const fn entry_address_space_alignment(self) -> u64 {
1u64 << (((self as u8 - 1) * 9) + 12)
}
}

View File

@ -0,0 +1,29 @@
//! Traits for accessing I/O ports.
/// A helper trait that implements the read port operation.
///
/// On x86, I/O ports operate on either `u8` (via `inb`/`outb`), `u16` (via `inw`/`outw`),
/// or `u32` (via `inl`/`outl`). Therefore this trait is implemented for exactly these types.
pub trait PortRead {
/// Reads a `Self` value from the given port.
///
/// ## Safety
///
/// This function is unsafe because the I/O port could have side effects that violate memory
/// safety.
unsafe fn read_from_port(port: u16) -> Self;
}
/// A helper trait that implements the write port operation.
///
/// On x86, I/O ports operate on either `u8` (via `inb`/`outb`), `u16` (via `inw`/`outw`),
/// or `u32` (via `inl`/`outl`). Therefore this trait is implemented for exactly these types.
pub trait PortWrite {
/// Writes a `Self` value to the given port.
///
/// ## Safety
///
/// This function is unsafe because the I/O port could have side effects that violate memory
/// safety.
unsafe fn write_to_port(port: u16, value: Self);
}

View File

@ -0,0 +1,56 @@
//! Provides a type for the task state segment structure.
use crate::VirtAddr;
use core::mem::size_of;
/// In 64-bit mode the TSS holds information that is not
/// directly related to the task-switch mechanism,
/// but is used for finding kernel level stack
/// if interrupts arrive while in kernel mode.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed(4))]
pub struct TaskStateSegment {
reserved_1: u32,
/// The full 64-bit canonical forms of the stack pointers (RSP) for privilege levels 0-2.
pub privilege_stack_table: [VirtAddr; 3],
reserved_2: u64,
/// The full 64-bit canonical forms of the interrupt stack table (IST) pointers.
pub interrupt_stack_table: [VirtAddr; 7],
reserved_3: u64,
reserved_4: u16,
/// The 16-bit offset to the I/O permission bit map from the 64-bit TSS base.
pub iomap_base: u16,
}
impl TaskStateSegment {
/// Creates a new TSS with zeroed privilege and interrupt stack table and an
/// empty I/O-Permission Bitmap.
///
/// As we always set the TSS segment limit to
/// `size_of::<TaskStateSegment>() - 1`, this means that `iomap_base` is
/// initialized to `size_of::<TaskStateSegment>()`.
#[inline]
pub const fn new() -> TaskStateSegment {
TaskStateSegment {
privilege_stack_table: [VirtAddr::zero(); 3],
interrupt_stack_table: [VirtAddr::zero(); 7],
iomap_base: size_of::<TaskStateSegment>() as u16,
reserved_1: 0,
reserved_2: 0,
reserved_3: 0,
reserved_4: 0,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn check_tss_size() {
// Per the SDM, the minimum size of a TSS is 0x68 bytes, giving a
// minimum limit of 0x67.
assert_eq!(size_of::<TaskStateSegment>(), 0x68);
}
}