Restrict usage of panicking code and require justification for panics

This commit is contained in:
pjht 2024-07-14 10:45:40 -05:00
parent 40a3a2c3b4
commit 49e71b1555
Signed by: pjht
GPG Key ID: 7B5F6AFBEC7EE78E
13 changed files with 842 additions and 311 deletions

37
Cargo.lock generated
View File

@ -26,6 +26,12 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "az"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973"
[[package]]
name = "bit_field"
version = "0.10.2"
@ -59,6 +65,12 @@ dependencies = [
"spin",
]
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cfg-if"
version = "1.0.0"
@ -113,6 +125,15 @@ dependencies = [
"allocator-api2",
]
[[package]]
name = "humansize"
version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6cb51c9a029ddc91b07a787f1d86b53ccfa49b0e86688c946ebe8d3555685dd7"
dependencies = [
"libm",
]
[[package]]
name = "intrusive-collections"
version = "0.9.6"
@ -126,16 +147,20 @@ dependencies = [
name = "kernel"
version = "0.1.0"
dependencies = [
"az",
"bootloader_api",
"buddy_system_allocator",
"cast",
"crossbeam-queue",
"derive-try-from-primitive",
"elf",
"hashbrown",
"humansize",
"intrusive-collections",
"linked_list_allocator",
"pic8259",
"replace_with",
"saturating_cast",
"slab",
"spin",
"static_assertions",
@ -146,6 +171,12 @@ dependencies = [
"x86_64",
]
[[package]]
name = "libm"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
[[package]]
name = "linked_list_allocator"
version = "0.10.5"
@ -249,6 +280,12 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
[[package]]
name = "saturating_cast"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fc4972f129a0ea378b69fa7c186d63255606e362ad00795f00b869dea5265eb"
[[package]]
name = "scopeguard"
version = "1.2.0"

View File

@ -24,3 +24,7 @@ elf = { version = "0.7.4", default-features = false }
x86_64 = "0.15.1"
buddy_system_allocator = "0.9.1"
derive-try-from-primitive = { version = "1.0.0", default-features = false }
saturating_cast = "0.1.0"
humansize = "2.1.3"
cast = "0.3.0"
az = "1.2.1"

View File

@ -16,6 +16,13 @@ impl BootInfoHolder {
impl Deref for BootInfoHolder {
type Target = &'static BootInfo;
fn deref(&self) -> &Self::Target {
#[expect(
clippy::expect_used,
reason = "
Bootinfo gets initialized during _start, se any use before initialization is a bug.
Error propagation would result in having to write a justified unwrap every time bootinfo is used, so just panic.
"
)]
self.0.get().expect("Boot info used before initialization!")
}
}

View File

@ -10,13 +10,10 @@ use x86_64::{
use spin::Lazy;
#[allow(unused)]
struct Selectors {
code_sel: SegmentSelector,
data_sel: SegmentSelector,
user_data_sel: SegmentSelector,
user_code_sel: SegmentSelector,
tss_sel: SegmentSelector,
code: SegmentSelector,
data: SegmentSelector,
tss: SegmentSelector,
}
struct GDTAndSelectors {
@ -27,14 +24,13 @@ struct GDTAndSelectors {
static mut TSS: TaskStateSegment = TaskStateSegment::new();
static GDT: Lazy<GDTAndSelectors> = Lazy::new(|| {
let mut gdt = GlobalDescriptorTable::new();
let selectors = Selectors {
code_sel: gdt.append(Descriptor::kernel_code_segment()),
data_sel: gdt.append(Descriptor::kernel_data_segment()),
// SAFETY: The TSS is a static and thus a pointer to it will always be valid
tss_sel: gdt.append(unsafe { Descriptor::tss_segment_unchecked(addr_of!(TSS)) }),
user_data_sel: gdt.append(Descriptor::user_data_segment()),
user_code_sel: gdt.append(Descriptor::user_code_segment()),
};
let code_sel = gdt.append(Descriptor::kernel_code_segment());
let data_sel = gdt.append(Descriptor::kernel_data_segment());
// SAFETY: The TSS is a static and thus a pointer to it will always be valid
let tss_sel = gdt.append(unsafe { Descriptor::tss_segment_unchecked(addr_of!(TSS)) });
gdt.append(Descriptor::user_data_segment());
gdt.append(Descriptor::user_code_segment());
let selectors = Selectors { code: code_sel, data: data_sel, tss: tss_sel };
GDTAndSelectors { gdt, selectors }
});
@ -43,13 +39,12 @@ pub fn init() {
// SAFETY: The selectors are always valid due to coming
// from the currently loaded GDT
unsafe {
CS::set_reg(GDT.selectors.code_sel);
SS::set_reg(GDT.selectors.data_sel);
load_tss(GDT.selectors.tss_sel);
CS::set_reg(GDT.selectors.code);
SS::set_reg(GDT.selectors.data);
load_tss(GDT.selectors.tss);
};
}
#[allow(unused)]
pub fn set_tss_stack(addr: VirtAddr) {
// SAFETY: This is safe as there is no other way to write to
// the TSS except via this function, and the CPU only reads it

View File

@ -7,9 +7,12 @@ use crate::{
TASKING,
};
use alloc::{boxed::Box, vec::Vec};
use core::{arch::asm, ptr::addr_of, slice, str};
use az::WrappingCast;
use cast::{u64, usize};
use core::{arch::asm, ptr, slice, str};
use hashbrown::HashMap;
use pic8259::ChainedPics;
use saturating_cast::SaturatingCast;
use spin::{Lazy, Mutex, RwLock};
use tap::Tap;
use x86_64::{
@ -86,34 +89,50 @@ extern "x86-interrupt" fn page_fault_handler(
stack_frame: InterruptStackFrame,
error_code: PageFaultErrorCode,
) {
#[warn(clippy::expect_used, reason = "FIXME")]
let faulting_addr =
Cr2::read().expect("Cannot handle page faults caused by non-canonical addresses");
#[warn(clippy::panic, reason = "FIXME: This should just abort the current process")]
if error_code.contains(PageFaultErrorCode::PROTECTION_VIOLATION) {
panic!(
"Got Page Fault {error_code:#?} at {:#x}\nEntry flags: {:#?}\n{stack_frame:#?}",
Cr2::read().unwrap(),
match ACTIVE_SPACE.lock().translate(Cr2::read().unwrap()) {
faulting_addr,
match ACTIVE_SPACE.lock().translate(faulting_addr) {
TranslateResult::Mapped { flags, .. } => flags,
_ => {
_ =>
{
#![allow(
clippy::panic,
reason = "A protection violation only happends on mapped addresses. If we get here, something has gone VERY wrong."
)]
panic!();
}
},
);
} else {
panic!("Got Page Fault {error_code:#?} at {:#x}\n{stack_frame:#?}", Cr2::read().unwrap(),);
panic!("Got Page Fault {error_code:#?} at {:#x}\n{stack_frame:#?}", faulting_addr);
}
}
#[expect(clippy::needless_pass_by_value, reason = "Signature dictated by external crate")]
fn general_handler(stack_frame: InterruptStackFrame, index: u8, _error_code: Option<u64>) {
println!("Other interrupt {index}\n{stack_frame:#?}");
}
#[expect(clippy::needless_pass_by_value, reason = "Signature dictated by external crate")]
fn exc_handler(stack_frame: InterruptStackFrame, index: u8, error_code: Option<u64>) {
#[warn(clippy::panic, reason = "FIXME: This should just abort the current process")]
#[expect(
clippy::indexing_slicing,
reason = "This function is only called for interrupt numbers 0-32, which all are valid indexes"
)]
if let Some(error_code) = error_code {
panic!(
"Got exception {} with error code {error_code}\n{stack_frame:#?}",
INTERRUPT_NAMES[index as usize]
INTERRUPT_NAMES[usize(index)]
);
} else {
panic!("Got exception {}\n{stack_frame:#?}", INTERRUPT_NAMES[index as usize]);
panic!("Got exception {}\n{stack_frame:#?}", INTERRUPT_NAMES[usize(index)]);
};
}
@ -127,10 +146,19 @@ impl Drop for EoiGuard {
}
}
#[expect(clippy::needless_pass_by_value, reason = "Signature dictated by external crate")]
fn irq_handler(_stack_frame: InterruptStackFrame, index: u8, _error_code: Option<u64>) {
#[expect(
clippy::arithmetic_side_effects,
reason = "This function is only called for irqs, which are always above the base index."
)]
let irq_num = index - IRQ_BASE;
let eoi_guard = EoiGuard(index);
if let Some(handler) = IRQ_HANDLERS.read()[irq_num as usize] {
#[expect(
clippy::indexing_slicing,
reason = "This function is only called for 16 irqs, which are all valid indexes"
)]
if let Some(handler) = IRQ_HANDLERS.read()[usize(irq_num)] {
handler(irq_num, eoi_guard);
};
}
@ -206,14 +234,13 @@ fn get_buffer(id: u64) -> Option<Box<[u8], &'static ASpaceMutex>> {
TASKING
.lock()
.data_buffers_mut()
.try_remove(id as usize)
.try_remove(usize(id))
.map(|buf| unsafe { Box::from_raw_in(buf, &*KERNEL_SPACE) })
}
static REGISTERD_PIDS: Lazy<RwLock<HashMap<u64, u64>>> = Lazy::new(|| RwLock::new(HashMap::new()));
#[no_mangle]
#[allow(clippy::unit_arg)]
extern "C" fn syscall_handler() {
let regs = unsafe { SYSCALL_REGS };
let mut retval = 0;
@ -221,92 +248,142 @@ extern "C" fn syscall_handler() {
let mut retval3 = regs.rdx;
match regs.rax {
0 => {
retval = if let Some(chr) = char::from_u32(regs.rcx as u32) {
let rval = if let Some(chr) = char::from_u32(regs.rcx.wrapping_cast()) {
print!("{}", chr);
0
} else {
1
}
};
retval = rval;
}
1 => TASKING.lock().exit(),
2 => {
retval = if regs.rcx == 0 {
ACTIVE_SPACE
.lock()
.map_free(regs.rdx as usize, PageTableFlags::from_bits_truncate(regs.rsi))
.map_or(0, |x| x as u64)
} else if let Some(space) =
TASKING.lock().address_spaces_mut().get_mut((regs.rcx - 1) as usize)
.map_free(usize(regs.rdx), PageTableFlags::from_bits_truncate(regs.rsi))
.map_or(0, |x| u64(x.expose_provenance()))
} else if let Some(space) = #[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
TASKING
.lock()
.address_spaces_mut()
.get_mut(usize(regs.rcx - 1))
{
space
.map_free(regs.rdx as usize, PageTableFlags::from_bits_truncate(regs.rsi))
.map_or(0, |x| x as u64)
.map_free(usize(regs.rdx), PageTableFlags::from_bits_truncate(regs.rsi))
.map_or(0, |x| u64(x.expose_provenance()))
} else {
0
}
}
3 => {
let initrd = unsafe {
#[warn(clippy::expect_used, reason = "FIXME")]
let ramdisk_start =
BOOTINFO.ramdisk_addr.into_option().expect("ramdisk to be present");
BOOTINFO.ramdisk_addr.into_option().expect("initrd not present");
let ramdisk_len = BOOTINFO.ramdisk_len;
slice::from_raw_parts(ramdisk_start as *const u8, ramdisk_len as usize)
slice::from_raw_parts(
ptr::with_exposed_provenance::<u8>(usize(ramdisk_start)),
usize(ramdisk_len),
)
};
let initrd = Box::leak(
Vec::with_capacity_in(initrd.len(), &*ACTIVE_SPACE)
.tap_mut(|v| v.extend_from_slice(initrd))
.into_boxed_slice(),
);
retval = addr_of!(initrd[0]) as u64;
retval2 = initrd.len() as u64;
retval = u64(initrd.as_ptr().expose_provenance());
retval2 = u64(initrd.len());
}
4 => {
retval = (TASKING.lock().address_spaces_mut().insert(AddressSpace::new().unwrap()) + 1)
as u64;
#[warn(clippy::expect_used, reason = "FIXME")]
#[expect(
clippy::arithmetic_side_effects,
reason = "usize::MAX will never be returned as an index, and so incrementing can never overflow."
)]
let address_space = u64(TASKING
.lock()
.address_spaces_mut()
.insert(AddressSpace::new().expect("Failed to create address space"))
+ 1);
retval = address_space;
}
5 => {
TASKING.lock().address_spaces_mut().remove((regs.rcx - 1) as usize);
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
TASKING.lock().address_spaces_mut().remove(usize(regs.rcx - 1));
}
6 => {
let page = Page::from_start_address(VirtAddr::new(regs.rdx)).unwrap();
let num_pages = regs.rsi as usize;
6 => 'call6: {
let Ok(page) = Page::from_start_address(VirtAddr::new(regs.rdx)) else {
retval = 1;
break 'call6;
};
let num_pages = usize(regs.rsi);
let flags = PageTableFlags::from_bits_truncate(regs.rdi);
retval = if regs.rcx == 0 {
let failed = if regs.rcx == 0 {
ACTIVE_SPACE.lock().map_assert_unused(page, num_pages, flags)
} else {
TASKING
.lock()
.address_spaces_mut()
.get_mut((regs.rcx - 1) as usize)
.unwrap()
.map_assert_unused(page, num_pages, flags)
}
.is_err() as u64;
}
7 => {
if let Some(buffer) = get_buffer(regs.rdx) {
let len = regs.rdi;
assert!(len <= buffer.len() as u64);
let mut tasking = TASKING.lock();
let space = tasking.address_spaces_mut().get_mut((regs.rcx - 1) as usize).unwrap();
space.run(|| unsafe { (regs.rsi as *mut u8).copy_from(&buffer[0], len as usize) });
let Some(address_space) =
#[warn(clippy::arithmetic_side_effects, reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0.")]
tasking.address_spaces_mut().get_mut(usize(regs.rcx - 1))
else {
retval = 1;
break 'call6;
};
address_space.map_assert_unused(page, num_pages, flags)
}
.is_err();
retval = failed.into();
}
7 => 'call7: {
if let Some(buffer) = get_buffer(regs.rdx) {
let len = usize(regs.rdi);
assert!(len <= buffer.len());
let mut tasking = TASKING.lock();
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
let Some(space) = tasking.address_spaces_mut().get_mut(usize(regs.rcx - 1)) else {
retval = 1;
break 'call7;
};
space.run(|| unsafe {
(ptr::with_exposed_provenance_mut::<u8>(usize(regs.rsi)))
.copy_from(buffer.as_ptr(), len);
});
retval = 0;
} else {
retval = 1;
}
}
8 => {
let space = TASKING.lock().address_spaces_mut().remove((regs.rdx - 1) as usize);
let res = TASKING.lock().new_process(regs.rcx as *const _, space);
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
let space = TASKING.lock().address_spaces_mut().remove(usize(regs.rdx - 1));
let res =
TASKING.lock().new_process(ptr::with_exposed_provenance(usize(regs.rcx)), space);
if let Ok(pid) = res {
retval = 0;
retval2 = pid as u64;
retval2 = u64(pid);
} else {
retval = 1;
}
}
9 => {
REGISTERD_PIDS.write().insert(regs.rcx, TASKING.lock().current_pid().unwrap() as u64);
#[expect(
clippy::unwrap_used,
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
)]
REGISTERD_PIDS.write().insert(regs.rcx, u64(TASKING.lock().current_pid().unwrap()));
}
10 => {
let id = REGISTERD_PIDS.read().get(&regs.rcx).copied();
@ -318,34 +395,73 @@ extern "C" fn syscall_handler() {
}
}
11 => {
let pid = regs.rcx as usize;
let pid = usize(regs.rcx);
if let Some(buffer) = get_buffer(regs.rdx) {
assert!(regs.rsi as usize <= buffer.len());
let len = usize(regs.rsi);
assert!(len <= buffer.len());
let mut tasking = TASKING.lock();
if let Some(_queue) = tasking.message_queue_mut(pid) {
let len = regs.rsi as usize;
let trunc_len = usize::min(len, 4096) as u32;
if let Ok(_queue) = tasking.message_queue_mut(pid) {
#[expect(
clippy::unwrap_used,
reason = "The min call guarantees that the value is in the range of a u32 before the cast"
)]
let trunc_len: u32 = usize::min(len, 4096).try_into().unwrap();
#[expect(
clippy::arithmetic_side_effects,
reason = "Can't underflow, as x % 4 < 4 no matter the x"
)]
let padding = if (trunc_len % 4) != 0 { 4 - (trunc_len % 4) } else { 0 };
#[expect(
clippy::arithmetic_side_effects,
reason = "Can't overflow, as padding is no more than 4 and trunc_len is no more than 4096."
)]
let padded_len = trunc_len + padding;
#[expect(
clippy::arithmetic_side_effects,
reason = "Can't overflow, as padded_len is no more than 4096 and 4096+24 < u32::MAX"
)]
let total_len = padded_len + 8 + (4 * 4);
SECOND_PORT.write_u32s(&[
0x3, // SPB type
padded_len + 8 + (4 * 4), // Total block length
(len as u32) + 8, // Packet length
0x3, // SPB type
total_len, // Total block length
len.saturating_cast::<u32>().saturating_add(8), // Packet length
]);
SECOND_PORT.write_bytes(&pid.to_ne_bytes());
SECOND_PORT.write_bytes(&buffer[0..trunc_len as usize]);
#[expect(
clippy::indexing_slicing,
reason = "The truncated length is always <= the buffer's length"
)]
SECOND_PORT.write_bytes(&buffer[0..usize(trunc_len)]);
for _ in 0..padding {
SECOND_PORT.write_bytes(&[0]);
}
SECOND_PORT.write_u32s(&[
padded_len + 8 + (4 * 4), // Total block length
total_len, // Total block length
]);
let buffer = Box::into_raw(buffer);
let new_buffer_key = tasking.proc_data_buffers_mut(pid).insert(buffer);
#[expect(
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
let data_bufs = tasking.proc_data_buffers_mut(pid).unwrap();
let new_buffer_key = data_bufs.insert(buffer);
#[expect(
clippy::unwrap_used,
reason = "The option was already checked at the start of the if-let"
)]
let queue = tasking.message_queue_mut(pid).unwrap();
queue.push((new_buffer_key, len));
if tasking.proc_sleeping(pid) == Some(SleepReason::WaitingForIPC) {
tasking.wake(pid);
#[expect(
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
let sleep_status = tasking.proc_sleeping(pid).unwrap();
if sleep_status == Some(SleepReason::WaitingForIPC) {
#[expect(
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
tasking.wake(pid).unwrap();
}
retval = 0;
} else {
@ -358,53 +474,80 @@ extern "C" fn syscall_handler() {
12 => {
let mut tasking = TASKING.lock();
if let Some(msg) = tasking.current_message_queue_mut().pop() {
retval2 = msg.1 as u64;
retval = *tasking.data_buffers_mut().get(msg.0).unwrap() as *const u8 as u64;
retval3 = msg.0 as u64;
#[expect(
clippy::unwrap_used,
reason = "The message queue only contains valid buffer IDs"
)]
let buffer_addr =
u64((*tasking.data_buffers_mut().get(msg.0).unwrap()).expose_provenance());
retval2 = u64(msg.1);
retval = buffer_addr;
retval3 = u64(msg.0);
} else {
retval = 0;
}
}
13 => {
retval = TASKING.lock().current_pid().unwrap() as u64;
#[expect(
clippy::unwrap_used,
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
)]
let pid = u64(TASKING.lock().current_pid().unwrap());
retval = pid;
}
14 => {
let page = Page::from_start_address(VirtAddr::new(regs.rdx)).unwrap();
let num_pages = regs.rsi as usize;
14 => 'call14: {
let Ok(page) = Page::from_start_address(VirtAddr::new(regs.rdx)) else {
retval = 1;
break 'call14;
};
let num_pages = usize(regs.rsi);
let flags = PageTableFlags::from_bits_truncate(regs.rdi);
retval = if regs.rcx == 0 {
let failed = if regs.rcx == 0 {
ACTIVE_SPACE.lock().map_only_unused(page, num_pages, flags)
} else {
TASKING
.lock()
.address_spaces_mut()
.get_mut((regs.rcx - 1) as usize)
.unwrap()
.map_only_unused(page, num_pages, flags)
let mut tasking = TASKING.lock();
let Some(address_space) =
#[warn(clippy::arithmetic_side_effects, reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0.")]
tasking.address_spaces_mut().get_mut(usize(regs.rcx - 1))
else {
retval = 1;
break 'call14;
};
address_space.map_only_unused(page, num_pages, flags)
}
.is_err() as u64;
.is_err();
retval = failed.into();
}
15 => {
get_buffer(regs.rcx);
}
16 => {
let size = regs.rcx as usize;
let rounded_size = size + (4096 - (size % 4096));
let size = usize(regs.rcx);
let rounded_size = size.next_multiple_of(4096);
KERNEL_SPACE.lock().alloc_force_user = true;
let mut buffer = Vec::with_capacity_in(rounded_size, &*KERNEL_SPACE);
buffer.resize(rounded_size, 0);
let buffer = buffer.into_boxed_slice();
let buffer = Box::into_raw(buffer);
KERNEL_SPACE.lock().alloc_force_user = false;
retval = TASKING.lock().data_buffers_mut().insert(buffer) as u64;
retval2 = buffer as *mut u8 as u64;
retval3 = rounded_size as u64;
retval = u64(TASKING.lock().data_buffers_mut().insert(buffer));
retval2 = u64(buffer.cast::<u8>().expose_provenance());
retval3 = u64(rounded_size);
}
17 => {
let mut tasking = TASKING.lock();
let space = tasking.address_spaces_mut().get_mut((regs.rcx - 1) as usize).unwrap();
#[warn(clippy::expect_used, reason = "FIXME")]
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
let space = tasking
.address_spaces_mut()
.get_mut(usize(regs.rcx - 1))
.expect("Invalid address space");
let slice_start: *mut u8 = ptr::with_exposed_provenance_mut(usize(regs.rdx));
space.run(|| unsafe {
slice::from_raw_parts_mut(regs.rdx as *mut u8, regs.rsi as usize).fill(0)
slice::from_raw_parts_mut(slice_start, usize(regs.rsi)).fill(0);
});
retval = 0;
}
@ -439,6 +582,6 @@ extern "C" fn syscall_handler() {
}
pub fn register_handler(irq: u8, handler: IrqHandler) -> Result<(), InvalidIrq> {
*(IRQ_HANDLERS.write().get_mut(irq as usize).ok_or(InvalidIrq)?) = Some(handler);
*(IRQ_HANDLERS.write().get_mut(usize(irq)).ok_or(InvalidIrq)?) = Some(handler);
Ok(())
}

View File

@ -2,45 +2,30 @@ use crate::{println, virtual_memory::KERNEL_SPACE};
use core::{
alloc::{GlobalAlloc, Layout},
ptr::NonNull,
ptr::{self, NonNull},
sync::atomic::{AtomicUsize, Ordering},
};
use humansize::{SizeFormatter, BINARY};
use linked_list_allocator::hole::HoleList;
use spin::Mutex;
use x86_64::structures::paging::PageTableFlags;
pub struct Heap {
heap: Mutex<HoleList>,
bytes_alloced: AtomicUsize,
bytes_freed: AtomicUsize,
hole_list: Mutex<HoleList>,
bytes_used: AtomicUsize,
pmem_size: AtomicUsize,
}
fn format_byte_count(count: usize) -> (f64, &'static str) {
let mut count = count as f64;
let mut prefix = 0;
while count >= 1024.0 {
count /= 1024.0;
prefix += 1;
}
let prefix = ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei"][prefix];
(count, prefix)
}
impl Heap {
pub fn print_stats(&self) {
let (fmtd_alloced, alloced_pfx) =
format_byte_count(self.bytes_alloced.load(Ordering::Relaxed));
let (fmtd_freed, freed_pfx) = format_byte_count(self.bytes_freed.load(Ordering::Relaxed));
let (fmtd_total, total_pfx) = format_byte_count(
self.bytes_alloced.load(Ordering::Relaxed) - self.bytes_freed.load(Ordering::Relaxed),
);
let (fmtd_pmem, pmem_pfx) = format_byte_count(self.pmem_size.load(Ordering::Relaxed));
println!(
"[HEAP] {:2} {}B allocated, {:2} {}B freed ({:2} {}B total)",
fmtd_alloced, alloced_pfx, fmtd_freed, freed_pfx, fmtd_total, total_pfx
"[HEAP] {} currenly allocated",
SizeFormatter::new(self.bytes_used.load(Ordering::Relaxed), BINARY)
);
println!(
"[HEAP] {} physical memory used for heap",
SizeFormatter::new(self.pmem_size.load(Ordering::Relaxed), BINARY)
);
println!("[HEAP] {:2} {}B physical memory used for heap", fmtd_pmem, pmem_pfx);
}
}
@ -49,43 +34,66 @@ unsafe impl Sync for Heap {}
unsafe impl GlobalAlloc for Heap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut locked_self = self.heap.lock();
let mut locked_self = self.hole_list.lock();
let (ptr, true_layout) = locked_self
.allocate_first_fit(layout)
.map(|(allocation, true_layout)| (allocation.as_ptr(), true_layout))
.unwrap_or_else(|_| {
.map_or_else(|()| {
drop(locked_self);
let num_pages = layout.size().div_ceil(4096) * 2;
self.pmem_size.fetch_add(num_pages * 4096, Ordering::Relaxed);
let ptr = KERNEL_SPACE.lock().map_free(num_pages, PageTableFlags::empty()).unwrap();
let layout = Layout::from_size_align(num_pages * 4096, 4096).unwrap();
unsafe { self.heap.lock().deallocate(NonNull::new_unchecked(ptr), layout) };
self.heap
let bytes_added = layout.size().next_multiple_of(4096).saturating_mul(2) & !(0xFFF);
let num_pages = bytes_added / 4096;
self.pmem_size.fetch_add(bytes_added, Ordering::Relaxed);
let Ok(ptr) = KERNEL_SPACE.lock().map_free(num_pages, PageTableFlags::empty()) else {
return (ptr::null_mut(), layout);
};
#[expect(clippy::unwrap_used, reason = "
from_size_align requires align to be a nonzero power of two, which it is.
Also, size must be less than isize when rounded up to a multiple of align.
Since size is already a multiple of align, no overflow will occur.
")]
let layout = Layout::from_size_align(bytes_added, 4096).unwrap();
unsafe { self.hole_list.lock().deallocate(NonNull::new_unchecked(ptr), layout) };
#[expect(clippy::expect_used, reason = "
The deallocate call should have given enough space to complete the allocation.
Thus, if this allocation fails it is a bug in the allocator, which should be treated as fatal.
")]
self.hole_list
.lock()
.allocate_first_fit(layout)
.map(|(allocation, true_layout)| (allocation.as_ptr(), true_layout))
.unwrap()
});
assert!((ptr as usize & (layout.align() - 1)) == 0);
self.bytes_alloced.fetch_add(true_layout.size(), Ordering::Relaxed);
.expect("Failed to allocate after adding memory to the heap")
}, |(allocation, true_layout)| (allocation.as_ptr(), true_layout));
if ptr.is_null() {
return ptr::null_mut();
}
#[expect(
clippy::arithmetic_side_effects,
reason = "align cannot be 0, so the subtraction cannot underflow"
)]
{
assert!((ptr.expose_provenance() & (layout.align() - 1)) == 0);
}
self.bytes_used.fetch_add(true_layout.size(), Ordering::Relaxed);
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.bytes_freed.fetch_add(layout.size(), Ordering::Relaxed);
unsafe { self.heap.lock().deallocate(NonNull::new_unchecked(ptr), layout) };
self.bytes_used.fetch_sub(layout.size(), Ordering::Relaxed);
unsafe { self.hole_list.lock().deallocate(NonNull::new_unchecked(ptr), layout) };
}
}
#[global_allocator]
pub static HEAP: Heap = Heap {
heap: Mutex::new(HoleList::empty()),
bytes_alloced: AtomicUsize::new(0),
bytes_freed: AtomicUsize::new(0),
hole_list: Mutex::new(HoleList::empty()),
bytes_used: AtomicUsize::new(0),
pmem_size: AtomicUsize::new(0),
};
#[alloc_error_handler]
#[expect(
clippy::panic,
reason = "A panic is our only real choice here as this function must diverge"
)]
fn alloc_error_handler(layout: Layout) -> ! {
panic!("allocation error: {:?}", layout)
}

View File

@ -4,9 +4,71 @@
#![feature(alloc_error_handler)]
#![feature(allocator_api)]
#![feature(naked_functions)]
#![feature(int_roundings)]
#![feature(type_alias_impl_trait)]
#![deny(unsafe_op_in_unsafe_fn)]
#![feature(stmt_expr_attributes)]
#![feature(exposed_provenance)]
// #![feature(strict_provenance)]
#![deny(
unsafe_op_in_unsafe_fn,
reason = "Unsafe blocks make it clear what parts of the code is unsafe and should not be skipped in unsafe functions, which are only unsafe to call."
)]
#![deny(
unfulfilled_lint_expectations,
reason = "If the expected lint goes away, this most likely means the expect attribute should be removed"
)]
// #![deny(fuzzy_provenance_casts)]
#![allow(clippy::duplicated_attributes, reason = "Identical reasons trigger this falsely")]
#![deny(
clippy::unwrap_used,
reason = "Unwraps must be used only when they will provably not panic, and the justification must be provided"
)]
#![deny(
clippy::expect_used,
reason = "Expects will cause a kernel panic. Kernel panics must be justified, as the kernel crashing is irrecoverable and brings the whole system down."
)]
#![deny(
clippy::panic,
reason = "Kernel panics must be justified, as the kernel crashing is irrecoverable and brings the whole system down."
)]
#![deny(
clippy::expect_used,
reason = "Expects will cause a kernel panic. Kernel panics must be justified, as the kernel crashing is irrecoverable and brings the whole system down."
)]
#![deny(
clippy::allow_attributes,
reason = "Expect attributes warn when the expected lint is not present, which is preferrable"
)]
#![deny(clippy::allow_attributes_without_reason, reason = "Allowing lints needs a justification")]
#![deny(clippy::arithmetic_side_effects)]
#![deny(clippy::indexing_slicing)]
#![warn(clippy::unimplemented)]
#![warn(clippy::todo)]
#![warn(clippy::pedantic)]
#![allow(clippy::cast_lossless, reason = "Covered by as_conversions")]
#![allow(clippy::cast_possible_truncation, reason = "Covered by as_conversions")]
#![allow(clippy::cast_possible_wrap, reason = "Covered by as_conversions")]
#![allow(clippy::cast_precision_loss, reason = "Covered by as_conversions")]
#![allow(clippy::cast_sign_loss, reason = "Covered by as_conversions")]
#![allow(
clippy::match_same_arms,
reason = "Repeated match arms usually exist for readability or when the ordering is important"
)]
#![allow(clippy::missing_panics_doc, reason = "Don't care")]
#![allow(clippy::missing_errors_doc, reason = "Don't care")]
#![allow(clippy::similar_names, reason = "Don't care")]
#![allow(clippy::too_many_lines, reason = "Don't care")]
#![warn(clippy::nursery)]
#![allow(clippy::suspicious_operation_groupings, reason = "Too easy for false positives")]
#![allow(
clippy::option_if_let_else,
reason = "if-let is for imperative code, map_or for functional. Not the same unlike what this lint says."
)]
#![allow(clippy::non_send_fields_in_send_ty, reason = "Too easy for false positives")]
#![allow(clippy::missing_const_for_fn, reason = "Most triggers don't actually make sense as const")]
#![allow(
clippy::while_float,
reason = "Lint checks for a construct you'd have to be really stupid to write and has easy false positives"
)]
#![deny(clippy::as_conversions)]
extern crate alloc;
@ -23,9 +85,10 @@ mod start;
mod tasking;
mod virtual_memory;
use core::{slice, usize};
use core::{ptr, slice};
use bootinfo::BOOTINFO;
use cast::usize;
use elf::{
abi::{
PT_DYNAMIC, PT_GNU_EH_FRAME, PT_GNU_RELRO, PT_GNU_STACK, PT_LOAD, PT_NULL, PT_PHDR,
@ -47,6 +110,10 @@ use crate::virtual_memory::AddressSpace;
// pub static INITRD: &[u8] = include_bytes!("../initrd.tar");
#[expect(
clippy::expect_used,
reason = "Nothing to do here but panic on errors, this is the top level"
)]
pub fn main() {
let mut rflags_data = rflags::read();
rflags_data |= RFlags::IOPL_HIGH | RFlags::IOPL_LOW;
@ -57,19 +124,23 @@ pub fn main() {
interrupts::init();
pit::init(100);
let initrd = unsafe {
let ramdisk_start = BOOTINFO.ramdisk_addr.into_option().expect("ramdisk to be present");
let ramdisk_start = BOOTINFO.ramdisk_addr.into_option().expect("initrd no present");
let ramdisk_len = BOOTINFO.ramdisk_len;
slice::from_raw_parts(ramdisk_start as *const u8, ramdisk_len as usize)
slice::from_raw_parts(
ptr::with_exposed_provenance(usize(ramdisk_start)),
usize(ramdisk_len),
)
};
let initrd = TarArchiveRef::new(initrd).unwrap();
let initrd = TarArchiveRef::new(initrd).expect("initrd not valid TAR archive");
let init_data = initrd
.entries()
.find(|x| x.filename().as_str().unwrap() == "bin/init")
.find(|x| x.filename().as_str() == Ok("bin/init"))
.expect("Could not find init in initrd")
.data();
let init = ElfBytes::<AnyEndian>::minimal_parse(&init_data).unwrap();
let mut init_addr_space = AddressSpace::new().unwrap();
for mut pheader in init.segments().unwrap().iter() {
let init = ElfBytes::<AnyEndian>::minimal_parse(init_data).expect("init not valid ELF file");
let mut init_addr_space = AddressSpace::new().expect("failed to create address space for init");
let pheaders = init.segments().expect("init has no program headers (not an executable?)");
for mut pheader in pheaders {
match pheader.p_type {
PT_NULL => (),
PT_LOAD => {
@ -77,33 +148,65 @@ pub fn main() {
if pheader.p_memsz < 0x1000 {
continue;
}
pheader.p_offset += 0x1000 - pheader.p_vaddr;
pheader.p_memsz -= 0x1000 - pheader.p_vaddr;
pheader.p_filesz -= 0x1000 - pheader.p_vaddr;
#[expect(
clippy::arithmetic_side_effects,
reason = "p_vaddr has been cheched to be below 0x1000, thus this cannot underflow"
)]
{
pheader.p_offset += 0x1000 - pheader.p_vaddr;
pheader.p_memsz -= 0x1000 - pheader.p_vaddr;
pheader.p_filesz -= 0x1000 - pheader.p_vaddr;
}
pheader.p_vaddr = 0x1000;
}
let start_page = Page::containing_address(VirtAddr::new(pheader.p_vaddr));
let num_pages = (pheader.p_memsz.div_ceil(4096)
+ (pheader.p_vaddr & 0xFFF).div_ceil(4096))
as usize;
assert!(
(start_page.start_address().as_u64() + num_pages as u64 * 4096)
>= (pheader.p_vaddr + pheader.p_memsz)
);
#[allow(clippy::cast_possible_truncation)]
let num_pages = if pheader.p_vaddr.trailing_zeros() >= 12 {
usize(pheader.p_memsz.div_ceil(4096))
} else {
#[expect(
clippy::arithmetic_side_effects,
reason = "The RHS is always < 4096, thus this cannot underflow"
)]
let page_part_sz = 4096 - (pheader.p_vaddr & 0xFFF);
if pheader.p_memsz < page_part_sz {
1
} else {
#[expect(
clippy::arithmetic_side_effects,
reason = "Sub: pheader.p_memsz >= page_part_sz, thus this cannot underflow. Add: usize::MAX.div_ceil(4096) < usize::MAX, so this cannot overflow."
)]
let res = 1 + usize((pheader.p_memsz - page_part_sz).div_ceil(4096));
res
}
};
init_addr_space
.map_only_unused(start_page, num_pages, PageTableFlags::USER_ACCESSIBLE)
.expect("Unable to map region");
init_addr_space.run(|| unsafe {
let dst = slice::from_raw_parts_mut(
pheader.p_vaddr as *mut u8,
pheader.p_memsz as usize,
ptr::with_exposed_provenance_mut(usize(pheader.p_vaddr)),
usize(pheader.p_memsz),
);
dst[0..pheader.p_filesz as usize].copy_from_slice(
&init_data[(pheader.p_offset as usize)
..((pheader.p_offset + pheader.p_filesz) as usize)],
);
dst[(pheader.p_filesz as usize)..(pheader.p_memsz as usize)].fill(0)
dst.get_mut(0..usize(pheader.p_filesz))
.expect("Pheader filesize greater tham memsize")
.copy_from_slice(
init_data
.get(
usize(pheader.p_offset)
..usize(
pheader
.p_offset
.checked_add(pheader.p_filesz)
.expect("End of segment in file wraps around"),
),
)
.expect(
"Program header references data beyond the end of the file",
),
);
dst.get_mut(usize(pheader.p_filesz)..usize(pheader.p_memsz))
.expect("Pheader filesize greater than memsize")
.fill(0);
});
}
PT_GNU_RELRO => (),
@ -114,24 +217,47 @@ pub fn main() {
_ => println!("Warning: Unimplemented ELF program header type {:#x}", pheader.p_type),
}
}
for section in init.section_headers().unwrap().iter() {
if section.sh_type == SHT_REL {
for rel in init.section_data_as_rels(&section).unwrap() {
match rel.r_type {
_ => unimplemented!("ELF relocation type {}", rel.r_type),
if let Some(section_headers) = init.section_headers() {
for section in section_headers.iter() {
if section.sh_type == SHT_REL {
#[expect(
clippy::never_loop,
reason = "This loop exists as a template to add relocations when they are implemented"
)]
#[expect(
clippy::unwrap_used,
reason = "section_data_as_rels requires the section type to be SHT_REL, which we already checked"
)]
for rel in init.section_data_as_rels(&section).unwrap() {
#[expect(
clippy::match_single_binding,
reason = "This match exists as a template to add relocations when they are implemented"
)]
match rel.r_type {
_ => unimplemented!("ELF relocation type {}", rel.r_type),
}
}
}
}
if section.sh_type == SHT_RELA {
for rela in init.section_data_as_relas(&section).unwrap() {
match rela.r_type {
R_X86_64_RELATIVE => {
init_addr_space.run(|| unsafe {
let ptr = rela.r_offset as *mut u64;
ptr.write(rela.r_addend as u64);
});
if section.sh_type == SHT_RELA {
#[expect(
clippy::unwrap_used,
reason = "section_data_as_relas requires the section type to be SHT_RELA, which we already checked"
)]
for rela in init.section_data_as_relas(&section).unwrap() {
match rela.r_type {
R_X86_64_RELATIVE => {
init_addr_space.run(|| unsafe {
let ptr =
ptr::with_exposed_provenance_mut::<u64>(usize(rela.r_offset));
ptr.write(
rela.r_addend
.try_into()
.expect("Invalid addend for relocation"),
);
});
}
_ => unimplemented!("ELF relocation type {}", rela.r_type),
}
_ => unimplemented!("ELF relocation type {}", rela.r_type),
}
}
}
@ -139,12 +265,12 @@ pub fn main() {
// Before starting init, write the pcapng section header + interface description to the second serial port
SECOND_PORT.write_u32s(&[
0x0A0D0D0A, // SHB type
0x0A0D_0D0A, // SHB type
7 * 4, // Total block length
0x1A2B3C4D, // Byte order magic
0x1A2B_3C4D, // Byte order magic
0x0000_0001, // Version (1.0)
0xFFFFFFFF, // Length upper (-1) across both
0xFFFFFFFF, // Length lower
0xFFFF_FFFF, // Length upper (-1) across both
0xFFFF_FFFF, // Length lower
7 * 4, // Total block length
0x1, // IDB type
5 * 4, // Total block length
@ -153,8 +279,12 @@ pub fn main() {
5 * 4, // Total block length
]);
#[expect(
clippy::expect_used,
reason = "Boot cannot happen if the init process cannot be created"
)]
TASKING
.lock()
.new_process(init.ehdr.e_entry as _, init_addr_space)
.new_process(ptr::with_exposed_provenance(usize(init.ehdr.e_entry)), init_addr_space)
.expect("Failed to create init process");
}

View File

@ -4,8 +4,10 @@ use crate::{
virtual_memory::{AsVirt, PHYS_OFFSET},
};
use bootloader_api::info::MemoryRegionKind;
use cast::{u64, usize};
use core::{alloc::Layout, ptr::NonNull};
use derive_try_from_primitive::TryFromPrimitive;
use humansize::{SizeFormatter, BINARY};
use linked_list_allocator::hole::HoleList;
use spin::{Lazy, Mutex};
use x86_64::{
@ -27,42 +29,24 @@ enum EfiMemoryTypes {
Unusable,
ACPIReclaim,
ACPIMemoryNVS,
MMIO,
MMIOPortSpace,
Mmio,
MmioPortSpace,
PalCode,
Persistent,
Invalid = u32::MAX,
}
pub struct PhysicalMemory {
alloc: HoleList,
frames_allocated: usize,
frames_freed: usize,
bytes_used: usize,
}
unsafe impl Send for PhysicalMemory {}
unsafe impl Sync for PhysicalMemory {}
fn format_byte_count(count: usize) -> (f64, &'static str) {
let mut count = count as f64;
let mut prefix = 0;
while count >= 1024.0 {
count /= 1024.0;
prefix += 1;
}
let prefix = ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei"][prefix];
(count, prefix)
}
impl PhysicalMemory {
pub fn print_stats(&self) {
let (fmtd_alloced, alloced_pfx) = format_byte_count(self.frames_allocated * 4096);
let (fmtd_freed, freed_pfx) = format_byte_count(self.frames_freed * 4096);
let (fmtd_total, total_pfx) =
format_byte_count((self.frames_allocated - self.frames_freed) * 4096);
println!(
"[PMM] {:2} {}B allocated, {:2} {}B freed ({:2} {}B total)",
fmtd_alloced, alloced_pfx, fmtd_freed, freed_pfx, fmtd_total, total_pfx
);
println!("[PMM] {} currently allocated", SizeFormatter::new(self.bytes_used, BINARY));
}
}
@ -70,10 +54,12 @@ const FRAME_LAYOUT: Layout = unsafe { Layout::from_size_align_unchecked(4096, 40
unsafe impl FrameAllocator<Size4KiB> for PhysicalMemory {
fn allocate_frame(&mut self) -> Option<PhysFrame> {
self.frames_allocated += 1;
self.bytes_used = self.bytes_used.saturating_add(4096);
self.alloc.allocate_first_fit(FRAME_LAYOUT).ok().map(|(ptr, _)| {
#[expect(clippy::unwrap_used, reason = "PhysFrame requires its argument to be 4k aligned, which is guaranteed by the allocator")]
#[expect(clippy::arithmetic_side_effects, reason = "All addresses passed to the allocator are > PHYS_OFFSET, so this cannot underflow")]
PhysFrame::from_start_address(PhysAddr::new(
(ptr.as_ptr() as u64) - PHYS_OFFSET.as_u64(),
(u64(ptr.as_ptr().expose_provenance())) - PHYS_OFFSET.as_u64(),
))
.unwrap()
})
@ -82,8 +68,14 @@ unsafe impl FrameAllocator<Size4KiB> for PhysicalMemory {
impl FrameDeallocator<Size4KiB> for PhysicalMemory {
unsafe fn deallocate_frame(&mut self, frame: PhysFrame) {
self.frames_freed += 1;
unsafe { self.alloc.deallocate(NonNull::new(frame.as_virt_ptr()).unwrap(), FRAME_LAYOUT) };
self.bytes_used = self.bytes_used.saturating_sub(4096);
#[expect(
clippy::unwrap_used,
reason = "The virtual mapping for the frame will never be null as the physical mapping offset is in kernel space"
)]
unsafe {
self.alloc.deallocate(NonNull::new(frame.as_virt_ptr()).unwrap(), FRAME_LAYOUT)
};
}
}
@ -93,60 +85,55 @@ pub static PHYSICAL_MEMORY: Lazy<Mutex<PhysicalMemory>> = Lazy::new(|| {
let mut total_mem = 0;
let mut usable_mem = 0;
let mut alloc = HoleList::empty();
loop {
let mut region = if let Some(region) = region_iter.next() {
region.clone()
} else {
break;
};
loop {
if let Some(next_region) = region_iter.peek() {
if (next_region.kind == region.kind) && (next_region.start == region.end) {
region.end = next_region.end;
region_iter.next();
} else {
break;
}
while let Some(&(mut region)) = region_iter.next() {
while let Some(next_region) = region_iter.peek() {
if (next_region.kind == region.kind) && (next_region.start == region.end) {
region.end = next_region.end;
region_iter.next();
} else {
break;
}
}
let (fmtd_size, pfx) = format_byte_count((region.end - region.start) as usize);
let fmtd_size = SizeFormatter::new(region.end - region.start, BINARY);
if let MemoryRegionKind::UnknownUefi(efi_type) = region.kind {
println!(
"[PMM] Efi{:?}: {:#x} - {:#x} ({:2} {}B)",
EfiMemoryTypes::try_from(efi_type).unwrap(),
"[PMM] Efi{:?}: {:#x} - {:#x} ({})",
EfiMemoryTypes::try_from(efi_type).unwrap_or(EfiMemoryTypes::Invalid),
region.start,
region.end,
fmtd_size,
pfx
);
} else {
println!(
"[PMM] {:?}: {:#x} - {:#x} ({:2} {}B)",
region.kind, region.start, region.end, fmtd_size, pfx
"[PMM] {:?}: {:#x} - {:#x} ({})",
region.kind, region.start, region.end, fmtd_size
);
}
total_mem += region.end - region.start;
if region.kind == MemoryRegionKind::Usable {
region.end = region.end & !(0xFFF);
region.end &= !(0xFFF);
if region.start & 0xFFF != 0 {
region.start = (region.start & !(0xFFF)) + 0x1000;
}
usable_mem += region.end - region.start;
unsafe {
alloc.deallocate(
#[expect(clippy::unwrap_used, reason = "The virtual mapping for the frame will never be null as the physical mapping offset is in kernel space")]
NonNull::new(PhysAddr::new(region.start).as_virt_ptr()).unwrap(),
Layout::from_size_align((region.end - region.start) as usize, 4096).unwrap(),
#[expect(clippy::unwrap_used, reason = "
from_size_align requires align to be a nonzero power of two, which it is.
Also, size must be less than isize when rounded up to a multiple of align.
Since size is already a multiple of align due to start and end being page aligned, no overflow will occur.
")]
Layout::from_size_align(usize(region.end - region.start), 4096).unwrap(),
);
}
}
}
let (fmtd_usable, usable_pfx) = format_byte_count(usable_mem as usize);
let (fmtd_total, total_pfx) = format_byte_count(total_mem as usize);
println!(
"[PMM] Initialized, found {:.2} {}B of usable memory, {:2} {}B total",
fmtd_usable, usable_pfx, fmtd_total, total_pfx
"[PMM] Initialized, found {} of usable memory, {} total",
SizeFormatter::new(usable_mem, BINARY),
SizeFormatter::new(total_mem, BINARY),
);
Mutex::new(PhysicalMemory { alloc, frames_allocated: 0, frames_freed: 0 })
Mutex::new(PhysicalMemory { alloc, bytes_used: 0 })
});

View File

@ -11,7 +11,13 @@ static CMD: Mutex<PortWriteOnly<u8>> = Mutex::new(PortWriteOnly::new(0x43));
const MAX_FREQ: u32 = 1_193_180;
pub fn init(mut freq: u32) {
assert_ne!(freq, 0);
#[expect(
clippy::unwrap_used,
reason = "register_handler requires the interrupt number to be less than 16, which it is."
)]
interrupts::register_handler(0, handler).unwrap();
#[expect(clippy::arithmetic_side_effects, reason = "freq has been checked to not be 0")]
let mut div = MAX_FREQ / freq;
if div > 65535 {
println!("[PIT] Frequency of {}Hz too slow, min freq is 18 Hz", freq);
@ -22,6 +28,12 @@ pub fn init(mut freq: u32) {
div = 1;
freq = MAX_FREQ;
}
#[expect(
clippy::unwrap_used,
reason = "div has been checked to be in u16 range above, so this cannot panic"
)]
let div_bytes = u16::try_from(div).unwrap().to_le_bytes();
println!("[PIT] Setting PIT to {}Hz with divisor of {}", freq, div);
// Command breakdown (MSB to LSB):
// 00 - Channel 0
@ -30,8 +42,8 @@ pub fn init(mut freq: u32) {
// frequency
// 0 - binary mode, always used
unsafe { CMD.lock().write(0b0011_0110_u8) };
unsafe { DATA.lock().write((div & 0xFF) as u8) };
unsafe { DATA.lock().write(((div >> 8) & 0xFF) as u8) };
unsafe { DATA.lock().write(div_bytes[0]) };
unsafe { DATA.lock().write(div_bytes[1]) };
}
fn handler(_irq: u8, eoi_guard: EoiGuard) {

View File

@ -78,5 +78,9 @@ macro_rules! dbg {
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
(&*FIRST_PORT).write_fmt(args).unwrap();
#[expect(
clippy::expect_used,
reason = "Kernel prints tend to be fairly important, so if printing fails a panic is in order."
)]
(&*FIRST_PORT).write_fmt(args).expect("Failed to print to the serial port");
}

View File

@ -10,8 +10,6 @@ pub static BOOTLOADER_CONFIG: BootloaderConfig = {
entry_point!(start, config = &BOOTLOADER_CONFIG);
#[allow(clippy::missing_panics_doc)]
#[allow(clippy::missing_errors_doc)]
fn start(bootinfo: &'static mut BootInfo) -> ! {
BOOTINFO.set(bootinfo);
main();

View File

@ -1,3 +1,5 @@
//#![allow(clippy::indexing_slicing, reason = "temp")]
use crate::{
gdt, println, qemu_exit,
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, KERNEL_SPACE},
@ -100,6 +102,9 @@ struct Process {
unsafe impl Send for Process {}
#[derive(Copy, Clone, Debug)]
pub struct InvalidPid;
pub static TASKING: Lazy<Mutex<Tasking>> = Lazy::new(|| {
Mutex::new(Tasking {
processes: Slab::new(),
@ -124,19 +129,34 @@ impl Tasking {
mut address_space: AddressSpace,
) -> Result<usize, PagingError> {
let mut kernel_stack = Vec::new_in(&*KERNEL_SPACE);
kernel_stack.resize(0x1_0000, 0);
kernel_stack.resize(0x1_0000 - 0x4, 0);
#[expect(clippy::as_conversions, reason = "Needed to get address of function")]
{
kernel_stack.push(task_force_unlock as usize);
kernel_stack.push(task_init as usize);
}
kernel_stack.push(0xFFF_FF80_0000 + (16 * 4096));
kernel_stack.push(entry_point.expose_provenance());
let mut kernel_stack = kernel_stack.into_boxed_slice();
kernel_stack[0xFFFF] = entry_point as usize;
address_space.map_assert_unused(
#[expect(
clippy::unwrap_used,
reason = "from_start_address requires the address to be page aligned, which it is."
)]
Page::from_start_address(VirtAddr::new(0xFFF_FF80_0000)).unwrap(),
16,
PageTableFlags::USER_ACCESSIBLE,
)?;
kernel_stack[0xFFFE] = 0xFFF_FF80_0000 + (16 * 4096);
kernel_stack[0xFFFD] = task_init as usize;
kernel_stack[0xFFFC] = task_force_unlock as usize;
let pid = self.processes.insert(Process {
#[expect(
clippy::indexing_slicing,
reason = "Stack length is 0x1_0000, this cannot panic"
)]
kernel_esp: &mut kernel_stack[0xFFF6],
#[expect(
clippy::indexing_slicing,
reason = "Stack length is 0x1_0000, this cannot panic"
)]
kernel_esp_top: VirtAddr::from_ptr(addr_of!(kernel_stack[0xFFFF]).wrapping_add(1)),
kernel_stack,
address_space: Some(address_space),
@ -151,26 +171,40 @@ impl Tasking {
pub fn task_yield(&mut self) {
self.freeable_kstacks.clear();
let current_process = match self.current_process {
Some(x) => x,
None => return,
let Some(current_process) = self.current_process else {
return;
};
if let Some(next_process_pid) = self.ready_to_run.pop_front() {
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
)]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let current_address_space = self.processes[next_process_pid]
.address_space
.take()
.expect("Non-current process has active page table")
.activate();
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.processes[current_process].address_space = Some(current_address_space);
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let next_process = &self.processes[next_process_pid];
gdt::set_tss_stack(next_process.kernel_esp_top);
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
if self.processes[current_process].sleeping.is_none() {
self.ready_to_run.push_back(current_process);
}
let kernel_esp = next_process.kernel_esp;
let previous_process = self.current_process.replace(next_process_pid).unwrap();
let previous_process = current_process;
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.current_process = Some(next_process_pid);
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
switch_to_asm(&mut (self.processes[previous_process].kernel_esp), kernel_esp);
} else if self.processes[current_process].sleeping.is_some() {
} else if {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let res = self.processes[current_process].sleeping.is_some();
res
} {
println!("All processes sleeping, exiting QEMU");
qemu_exit::exit_qemu();
}
@ -185,7 +219,12 @@ impl Tasking {
if let Some(current_process) = self.current_process {
self.freeable_kstacks.push(self.processes.remove(current_process).kernel_stack);
}
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let next_process = &mut self.processes[next_process_pid];
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
)]
next_process
.address_space
.take()
@ -203,38 +242,53 @@ impl Tasking {
}
pub fn address_spaces_mut(&mut self) -> &mut Slab<AddressSpace> {
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
&mut self.processes[self.current_process.unwrap()].address_spaces
}
pub fn data_buffers_mut(&mut self) -> &mut Slab<*mut [u8]> {
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
&mut self.processes[self.current_process.unwrap()].data_buffers
}
pub fn proc_data_buffers_mut(&mut self, pid: usize) -> &mut Slab<*mut [u8]> {
&mut self.processes[pid].data_buffers
pub fn proc_data_buffers_mut(
&mut self,
pid: usize,
) -> Result<&mut Slab<*mut [u8]>, InvalidPid> {
Ok(&mut self.processes.get_mut(pid).ok_or(InvalidPid)?.data_buffers)
}
pub fn current_message_queue_mut(&mut self) -> &mut SegQueue<(usize, usize)> {
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
&mut self.processes[self.current_process.unwrap()].message_queue
}
pub fn message_queue_mut(&mut self, pid: usize) -> Option<&mut SegQueue<(usize, usize)>> {
Some(&mut self.processes.get_mut(pid)?.message_queue)
pub fn message_queue_mut(
&mut self,
pid: usize,
) -> Result<&mut SegQueue<(usize, usize)>, InvalidPid> {
Ok(&mut self.processes.get_mut(pid).ok_or(InvalidPid)?.message_queue)
}
pub fn proc_sleeping(&mut self, pid: usize) -> Option<SleepReason> {
self.processes[pid].sleeping
pub fn proc_sleeping(&self, pid: usize) -> Result<Option<SleepReason>, InvalidPid> {
Ok(self.processes.get(pid).ok_or(InvalidPid)?.sleeping)
}
pub fn sleep(&mut self, reason: SleepReason) {
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.processes[self.current_process.unwrap()].sleeping = Some(reason);
self.task_yield();
}
pub fn wake(&mut self, pid: usize) {
if self.processes[pid].sleeping.is_some() {
self.processes[pid].sleeping = None;
pub fn wake(&mut self, pid: usize) -> Result<(), InvalidPid> {
if self.processes.get(pid).ok_or(InvalidPid)?.sleeping.is_some() {
self.processes.get_mut(pid).ok_or(InvalidPid)?.sleeping = None;
self.ready_to_run.push_back(pid);
}
Ok(())
}
}

View File

@ -1,8 +1,12 @@
mod holes;
use crate::{bootinfo::BOOTINFO, physical_memory::PHYSICAL_MEMORY};
use alloc::alloc::{AllocError, Allocator, Layout};
use core::{fmt, ops::Deref, ptr::NonNull, slice};
use cast::{u64, usize};
use core::{
fmt,
ops::Deref,
ptr::{self, NonNull},
slice,
};
use replace_with::replace_with_or_abort;
use spin::{Lazy, Mutex};
use x86_64::{
@ -30,7 +34,7 @@ impl fmt::Debug for AddressSpace {
f.debug_struct("AddressSpace")
.field("is_kernel", &self.is_kernel)
.field("alloc_force_user", &self.alloc_force_user)
.field("level_4_table", &(self.mapper.level_4_table() as *const PageTable))
.field("level_4_table", &ptr::from_ref(&self.mapper.level_4_table()))
.finish()
}
}
@ -44,7 +48,10 @@ pub enum PagingError {
PageAlreadyMapped,
PagesAlreadyMapped,
PageNotMapped,
InvalidFrameAddress(#[allow(dead_code)] PhysAddr),
InvalidFrameAddress(
#[expect(dead_code, reason = "This is useful in debug output when the error is unwrapped")]
PhysAddr,
),
}
impl From<MapToError<Size4KiB>> for PagingError {
@ -101,12 +108,26 @@ impl AsVirt for PhysFrame {
impl AsVirt for PhysAddr {
fn as_virt_ptr<T>(&self) -> *mut T {
#[expect(
clippy::arithmetic_side_effects,
reason = "While a sufficiently large amount of physical memory could cause this to wrap, this is so unlikely that paying the cost of checking is not worth it."
)]
(*PHYS_OFFSET + self.as_u64()).as_mut_ptr()
}
}
pub static PHYS_OFFSET: Lazy<VirtAddr> =
Lazy::new(|| VirtAddr::new(BOOTINFO.physical_memory_offset.into_option().unwrap()));
#[expect(
clippy::expect_used,
reason = "Without the physical memory mapping, the kernel cannot function."
)]
pub static PHYS_OFFSET: Lazy<VirtAddr> = Lazy::new(|| {
VirtAddr::new(
BOOTINFO
.physical_memory_offset
.into_option()
.expect("Bootloader failed to provide physical memory mapping"),
)
});
pub struct ASpaceMutex(Mutex<AddressSpace>);
@ -140,6 +161,10 @@ pub static KERNEL_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
// the reference cannot point to uninitialized data.
table[i].set_addr(
unsafe {
#[expect(
clippy::expect_used,
reason = "If we fail to allocate the kernel page table, it's fatal."
)]
let (new_child, new_child_phys) =
alloc_pt().expect("Could not allocate new kernel entry");
new_child.write(PageTable::new());
@ -153,15 +178,27 @@ pub static KERNEL_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
}
let mut kernel_space = AddressSpace::new_with_addr(table);
kernel_space.is_kernel = true;
let l4_virt = VirtAddr::from_ptr(kernel_space.mapper.level_4_table() as *const PageTable);
let l4_virt = VirtAddr::from_ptr(ptr::from_ref(kernel_space.mapper.level_4_table()));
#[expect(
clippy::unwrap_used,
reason = "This can only fail if the address is invalid. Since the address comes from a reference, that can't happen"
)]
let l4_phys = kernel_space.mapper.translate_addr(l4_virt).unwrap();
unsafe { Cr3::write(PhysFrame::containing_address(l4_phys), Cr3::read().1) };
ASpaceMutex::new(kernel_space)
});
pub static ACTIVE_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
#[expect(
clippy::expect_used,
reason = "If we fail to allocate the first active table, it's fatal."
)]
let new_space = AddressSpace::new().expect("Could not allocate new user table");
let l4_virt = VirtAddr::from_ptr(new_space.mapper.level_4_table() as *const PageTable);
let l4_virt = VirtAddr::from_ptr(ptr::from_ref(new_space.mapper.level_4_table()));
#[expect(
clippy::unwrap_used,
reason = "This can only fail if the address is invalid. Since the address comes from a reference, that can't happen"
)]
let l4_phys = new_space.mapper.translate_addr(l4_virt).unwrap();
unsafe { Cr3::write(PhysFrame::containing_address(l4_phys), Cr3::read().1) };
ASpaceMutex::new(new_space)
@ -203,17 +240,45 @@ impl AddressSpace {
// ownership of a value in a mutex unless you own the mutex, and no function owns a static.
assert!(!self.is_kernel);
Lazy::force(&ACTIVE_SPACE);
let l4_virt = VirtAddr::from_ptr(self.mapper.level_4_table() as *const PageTable);
let l4_virt = VirtAddr::from_ptr(ptr::from_ref(self.mapper.level_4_table()));
#[expect(
clippy::unwrap_used,
reason = "This can only fail if the address is invalid. Since the address comes from a reference, that can't happen"
)]
let l4_phys = self.mapper.translate_addr(l4_virt).unwrap();
unsafe { Cr3::write(PhysFrame::containing_address(l4_phys), Cr3::read().1) };
core::mem::replace(&mut *ACTIVE_SPACE.lock(), self)
}
fn check_request_valid(&self, start: Page, length: usize) -> Result<(), PagingError> {
if (self.is_kernel && start < KERNEL_PAGE_RANGE.start)
if (self.is_kernel
&& (start < KERNEL_PAGE_RANGE.start
|| ({
#[expect(
clippy::arithmetic_side_effects,
reason = "The end of the kernel range is the top of the address space. Thus no subtraction from it can underflow."
)]
let res = length >= usize(KERNEL_PAGE_RANGE.end - start);
res
})))
|| (!self.is_kernel
&& (start >= KERNEL_PAGE_RANGE.start)
&& ((start + (length as u64)) >= KERNEL_PAGE_RANGE.start))
&& ((start >= USER_PAGE_RANGE.end)
|| ({
#[expect(
clippy::arithmetic_side_effects,
reason = "The previous check guarenteed the request's start is not greater than the range's end"
)]
let res = length >= usize(USER_PAGE_RANGE.end - start);
res
})
|| {
#[expect(
clippy::arithmetic_side_effects,
reason = "The previous check guarenteed this wouldn't overflow"
)]
let res = (start + u64(length)) >= USER_PAGE_RANGE.end;
res
}))
{
Err(PagingError::RequestInvalid)
} else {
@ -222,7 +287,12 @@ impl AddressSpace {
}
fn check_request_unmapped(&self, start: Page, length: usize) -> Result<(), PagingError> {
for page in Page::range(start, start + length as u64) {
self.check_request_valid(start, length)?;
#[expect(
clippy::arithmetic_side_effects,
reason = "check_request_valid guarentees this won't overflow"
)]
for page in Page::range(start, start + u64(length)) {
if self.translate_addr(page.start_address()).is_some() {
return Err(PagingError::PagesAlreadyMapped);
}
@ -262,14 +332,13 @@ impl AddressSpace {
/// - Aliasing of `&mut` references, i.e. two `&mut` references that point to
/// the same physical address. This is undefined behavior in Rust.
/// - This can be ensured by by making sure all frames in the provided range
/// are not mapped anywhere else.
/// are not mapped anywhere else.
/// - Creating uninitalized or invalid values: Rust requires that all values
/// have a correct memory layout. For example, a `bool` must be either a 0
/// or a 1 in memory, but not a 3 or 4. An exception is the `MaybeUninit`
/// wrapper type, which abstracts over possibly uninitialized memory.
/// - This is only a problem when re-mapping pages to different physical
/// frames. Mapping pages that are not in use yet is fine.
#[allow(dead_code)]
unsafe fn map_to(
&mut self,
page: Page,
@ -277,10 +346,40 @@ impl AddressSpace {
num_pages: usize,
flags: PageTableFlags,
) -> Result<*mut u8, PagingError> {
// SAFETY: from_start_address_unchecked requires its argument to be 4k aligned, which it is
const MAX_PHYS_FRAME: PhysFrame = unsafe {
PhysFrame::from_start_address_unchecked(PhysAddr::new(0x000F_FFFF_FFFF_F000))
};
self.check_request_valid(page, num_pages)?;
for (page, frame) in (PageRange { start: page, end: page + num_pages as u64 })
.zip(PhysFrameRange { start: phys_frame, end: phys_frame + num_pages as u64 })
{
#[expect(
clippy::arithmetic_side_effects,
reason = "This is the maximum physical frame, so there is no way the subtraction can underflow"
)]
if (MAX_PHYS_FRAME - phys_frame) / 4096 < u64(num_pages) {
return Err(PagingError::RequestInvalid);
}
for (page, frame) in (PageRange {
start: page,
end: {
#[expect(
clippy::arithmetic_side_effects,
reason = "check_request_valid guarentees this won't overflow"
)]
let res = page + u64(num_pages);
res
},
})
.zip(PhysFrameRange {
start: phys_frame,
end: {
#[expect(
clippy::arithmetic_side_effects,
reason = "The check above guarentees this won't overflow"
)]
let res = phys_frame + u64(num_pages);
res
},
}) {
unsafe {
self.mapper
.map_to_with_table_flags(
@ -325,10 +424,14 @@ impl AddressSpace {
flags: PageTableFlags,
) -> Result<*mut u8, PagingError> {
self.check_request_valid(page, num_pages)?;
for page in (PageRange { start: page, end: page + num_pages as u64 }) {
#[expect(
clippy::arithmetic_side_effects,
reason = "check_request_valid guarentees this won't overflow"
)]
for page in (PageRange { start: page, end: page + u64(num_pages) }) {
unsafe {
let mut phys_mem = PHYSICAL_MEMORY.lock();
let frame = phys_mem.allocate_frame().unwrap();
let frame = phys_mem.allocate_frame().ok_or(PagingError::FrameAllocationFailed)?;
self.mapper
.map_to_with_table_flags(
page,
@ -377,7 +480,6 @@ impl AddressSpace {
/// aliasing of `&mut` references, i.e. two `&mut` references that point to the same
/// physical address. This is undefined behavior in Rust. Aliasing can be prevented
/// by making sure all frames in the provided range are not mapped anywhere else.
#[allow(dead_code)]
pub unsafe fn map_free_to(
&mut self,
phys_frame: PhysFrame,
@ -402,7 +504,6 @@ impl AddressSpace {
/// Same behavior as `map`, but asserts that the requested virtual page range is unmapped, and
/// thus is safe.
#[allow(unused)]
pub fn map_assert_unused(
&mut self,
page: Page,
@ -415,7 +516,6 @@ impl AddressSpace {
/// Same behavior as `map`, but only maps unmapped pages, and
/// thus is safe.
#[allow(unused)]
pub fn map_only_unused(
&mut self,
page: Page,
@ -423,16 +523,18 @@ impl AddressSpace {
flags: PageTableFlags,
) -> Result<*mut u8, PagingError> {
self.check_request_valid(page, num_pages)?;
if self.alloc_force_user {
panic!();
}
for page in (PageRange { start: page, end: page + num_pages as u64 }) {
assert!(!self.alloc_force_user);
#[expect(
clippy::arithmetic_side_effects,
reason = "check_request_valid guarentees this won't overflow"
)]
for page in (PageRange { start: page, end: page + u64(num_pages) }) {
if self.translate_addr(page.start_address()).is_some() {
continue;
}
unsafe {
let mut phys_mem = PHYSICAL_MEMORY.lock();
let frame = phys_mem.allocate_frame().unwrap();
let frame = phys_mem.allocate_frame().ok_or(PagingError::FrameAllocationFailed)?;
self.mapper
.map_to_with_table_flags(
page,
@ -451,13 +553,26 @@ impl AddressSpace {
/// Finds a range of free pages and returns the starting page
fn find_free_pages(&self, num_pages: usize) -> Result<Page, PagingError> {
if num_pages == 0 {
return Err(PagingError::PageAllocationFailed);
}
let mut remaining_pages = num_pages;
let range = if self.is_kernel { KERNEL_PAGE_RANGE } else { USER_PAGE_RANGE };
for page in range {
if self.translate_addr(page.start_address()).is_none() {
remaining_pages -= 1;
#[expect(
clippy::arithmetic_side_effects,
reason = "remaining_pages can never be 0 here, thus this can't underflow"
)]
{
remaining_pages -= 1;
}
if remaining_pages == 0 {
return Ok(page + 1 - (num_pages as u64));
#[expect(
clippy::arithmetic_side_effects,
reason = "page is at minimum num_pages - 1, thus this can't underflow"
)]
return Ok(page + 1 - u64(num_pages));
}
} else {
remaining_pages = num_pages;
@ -484,10 +599,19 @@ fn drop_table(table: &PageTable, level: u8) {
{
// SAFETY: The present flag is set on the entry, which means the child frame must
// contain a valid page table, so making a reference to it must be ok.
// Unwrap: from_start_address requires it's input to be 4KiB aligned (have none of its lower 12 bits
// set). The addr method only returns a 4KiB aligned address if the HUGE_PAGE flag is not set.
// In addition, the returned address is only valid if the PRESENT flag is set.
// The if statements has ensured that if we get here, HUGE_FLAG is not set and PRESENT is set.
#[expect(
clippy::unwrap_used,
reason = "
from_start_address requires it's input to be 4k aligned.
The addr method only returns a 4k aligned address if the HUGE_PAGE flag is not set.
In addition, the returned address is only valid if the PRESENT flag is set.
The if statements has ensured that if we get here, HUGE_FLAG is not set and PRESENT is set.
"
)]
#[expect(
clippy::arithmetic_side_effects,
reason = "level is at minimum 3 here, thus this can't underflow"
)]
drop_table(
unsafe { PhysFrame::from_start_address(entry.addr()).unwrap().as_virt_ref() },
level - 1,
@ -495,7 +619,15 @@ fn drop_table(table: &PageTable, level: u8) {
}
}
}
#[expect(
clippy::unwrap_used,
reason = "This can only fail if the address is invalid. Since the address comes from a reference, that can't happen"
)]
let phys_addr = ACTIVE_SPACE.lock().translate_addr(VirtAddr::from_ptr(table)).unwrap();
#[expect(
clippy::unwrap_used,
reason = "PhysFrame requires its argument to be 4k aligned, which is guaranteed since page tables must be 4k aligned"
)]
unsafe {
PHYSICAL_MEMORY.lock().deallocate_frame(PhysFrame::from_start_address(phys_addr).unwrap());
};
@ -523,14 +655,34 @@ unsafe impl Allocator for ASpaceMutex {
// if space.alloc_force_user {
// dbg!(start);
// }
Ok(unsafe { slice::from_raw_parts_mut(start.cast::<u8>(), size) }.into())
let res = Ok(unsafe { slice::from_raw_parts_mut(start.cast::<u8>(), size) }.into());
res
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
let start_page = Page::from_start_address(VirtAddr::new(ptr.as_ptr() as u64)).unwrap();
for page in Page::range(start_page, start_page + (layout.size().div_ceil(4096) - 1) as u64)
{
#[expect(
clippy::unwrap_used,
reason = "
from_start_address requires it's input to be 4k aligned.
Calling deallocate requires that the pointer was returned by the same allocator.
The allocator only returns 4k aligned pointers, so from_start_address cannot panic.
"
)]
let start_page =
Page::from_start_address(VirtAddr::new(u64(ptr.as_ptr().expose_provenance()))).unwrap();
#[expect(
clippy::arithmetic_side_effects,
reason = "div_ceil always returns at least one, and the call safety requirements means that start_page+<layout size in pages> cannot overflow."
)]
for page in Page::range(start_page, start_page + u64(layout.size().div_ceil(4096) - 1)) {
unsafe {
#[expect(
clippy::unwrap_used,
reason = "
Unmap only fails if a subpage of a huge page is freed, or if the mapping is invalid.
The kernel doesn't use huge pages and the mapping must be valid to be returned from allocate, so unmap cannot fail.
"
)]
let (frame, flush) = self.0.lock().mapper.unmap(page).unwrap();
PHYSICAL_MEMORY.lock().deallocate_frame(frame);
flush.flush();