Work (better mem tracking, mpfrt/mpcntphys syscalls, shared initrd, tasking internal locks)

This commit is contained in:
pjht 2024-08-12 13:19:06 -05:00
parent 5558fa427b
commit 899fde8218
Signed by: pjht
GPG Key ID: 7B5F6AFBEC7EE78E
9 changed files with 649 additions and 305 deletions

View File

@ -1,10 +1,15 @@
use crate::{
bootinfo::BOOTINFO, print, println, serial::SECOND_PORT, tasking::SleepReason, virtual_memory::{ASpaceMutex, AddressSpace, ACTIVE_SPACE, KERNEL_SPACE}, TASKING
bootinfo::BOOTINFO,
print, println,
serial::SECOND_PORT,
tasking::SleepReason,
virtual_memory::{ASpaceMutex, AddressSpace, ACTIVE_SPACE, KERNEL_SPACE},
TASKING,
};
use alloc::{boxed::Box, vec::Vec};
use az::WrappingCast;
use cast::{u64, usize};
use core::{arch::asm, ffi::CStr, ptr, slice, str};
use core::{arch::asm, ffi::CStr, ptr, slice};
use hashbrown::HashMap;
use pic8259::ChainedPics;
use saturating_cast::SaturatingCast;
@ -15,46 +20,12 @@ use x86_64::{
set_general_handler,
structures::{
idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode},
paging::{mapper::TranslateResult, Page, PageTableFlags, Translate},
paging::{mapper::TranslateResult, Page, PageTableFlags, PhysFrame, Translate},
},
PrivilegeLevel, VirtAddr,
PhysAddr, PrivilegeLevel, VirtAddr,
};
const IRQ_BASE: u8 = 32;
const INTERRUPT_NAMES: [&str; 32] = [
"Divide By Zero",
"Debug",
"NMI",
"Breakpoint",
"Overflow",
"Bound Range Exceeded",
"Invalid Opcode",
"FPU Disabled",
"Double Fault",
"",
"Invalid TSS",
"Segment Not Present",
"Stack Segment Fault",
"General Protection Fault",
"Page Fault",
"",
"x87 FPU Exception",
"Alignment Check",
"Machine Check",
"SIMD FPU Exception",
"Virtualization Exception",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"Security Exception",
"",
];
static IDT: Lazy<InterruptDescriptorTable> = Lazy::new(|| {
let mut idt = InterruptDescriptorTable::new();
@ -87,10 +58,14 @@ extern "x86-interrupt" fn page_fault_handler(
#[warn(clippy::expect_used, reason = "FIXME")]
let faulting_addr =
Cr2::read().expect("Cannot handle page faults caused by non-canonical addresses");
#[warn(clippy::panic, reason = "FIXME: This should just abort the current process")]
if let Some(current_pid) = TASKING.current_pid() {
print!("PID {current_pid} ");
} else {
print!("Kernel init ");
}
if error_code.contains(PageFaultErrorCode::PROTECTION_VIOLATION) {
panic!(
"Got Page Fault {error_code:#?} at {:#x}\nEntry flags: {:#?}\n{stack_frame:#?}",
println!(
"page faulted {error_code:#?} at {:#x}\nEntry flags: {:#?}\n{stack_frame:#?}",
faulting_addr,
match ACTIVE_SPACE.lock().translate(faulting_addr) {
TranslateResult::Mapped { flags, .. } => flags,
@ -105,8 +80,9 @@ extern "x86-interrupt" fn page_fault_handler(
},
);
} else {
panic!("Got Page Fault {error_code:#?} at {:#x}\n{stack_frame:#?}", faulting_addr);
println!("page faulted {error_code:#?} at {:#x}\n{stack_frame:#?}", faulting_addr);
}
TASKING.exit();
}
#[expect(clippy::needless_pass_by_value, reason = "Signature dictated by external crate")]
@ -115,20 +91,14 @@ fn general_handler(stack_frame: InterruptStackFrame, index: u8, _error_code: Opt
}
#[expect(clippy::needless_pass_by_value, reason = "Signature dictated by external crate")]
fn exc_handler(stack_frame: InterruptStackFrame, index: u8, error_code: Option<u64>) {
#[warn(clippy::panic, reason = "FIXME: This should just abort the current process")]
#[expect(
clippy::indexing_slicing,
reason = "This function is only called for interrupt numbers 0-32, which all are valid indexes"
)]
if let Some(error_code) = error_code {
panic!(
"Got exception {} with error code {error_code}\n{stack_frame:#?}",
INTERRUPT_NAMES[usize(index)]
);
fn exc_handler(_stack_frame: InterruptStackFrame, _index: u8, _error_code: Option<u64>) {
if let Some(current_pid) = TASKING.current_pid() {
print!("PID {current_pid} ");
} else {
panic!("Got exception {}\n{stack_frame:#?}", INTERRUPT_NAMES[usize(index)]);
};
print!("Kernel init ");
}
println!("had exception, exiting");
TASKING.exit();
}
pub struct EoiGuard(u8);
@ -226,15 +196,34 @@ extern "x86-interrupt" fn syscall_handler_header(stack_frame: InterruptStackFram
}
fn get_buffer(id: u64) -> Option<Box<[u8], &'static ASpaceMutex>> {
TASKING
.lock()
.data_buffers_mut()
.try_remove(usize(id))
.map(|buf| unsafe { Box::from_raw_in(buf, &*KERNEL_SPACE) })
TASKING.data_buffers_mut(|x| {
x.try_remove(usize(id)).map(|buf| unsafe { Box::from_raw_in(buf, &*KERNEL_SPACE) })
})
}
static REGISTERD_PIDS: Lazy<RwLock<HashMap<u64, u64>>> = Lazy::new(|| RwLock::new(HashMap::new()));
static INITRD_BUF: Lazy<&'static [u8]> = Lazy::new(|| {
let initrd = unsafe {
#[warn(clippy::expect_used, reason = "FIXME")]
let ramdisk_start = BOOTINFO.ramdisk_addr.into_option().expect("initrd not present");
let ramdisk_len = BOOTINFO.ramdisk_len;
slice::from_raw_parts(
ptr::with_exposed_provenance::<u8>(usize(ramdisk_start)),
usize(ramdisk_len),
)
};
TASKING.record_heap_alloc(initrd.len().next_multiple_of(4096));
KERNEL_SPACE.lock().alloc_force_user = true;
let initrd = Box::leak(
Vec::with_capacity_in(initrd.len(), &*KERNEL_SPACE)
.tap_mut(|v| v.extend_from_slice(initrd))
.into_boxed_slice(),
);
KERNEL_SPACE.lock().alloc_force_user = false;
initrd
});
#[no_mangle]
extern "C" fn syscall_handler() {
let regs = unsafe { SYSCALL_REGS };
@ -255,47 +244,35 @@ extern "C" fn syscall_handler() {
};
retval = rval;
}
1 => TASKING.lock().exit(),
1 => TASKING.exit(),
2 => {
retval = if regs.rcx == 0 {
ACTIVE_SPACE
.lock()
.map_free(usize(regs.rdx), PageTableFlags::from_bits_truncate(regs.rsi))
.map_or(0, |x| u64(x.expose_provenance()))
} else if let Some(space) = #[warn(
} else {
TASKING.address_spaces_mut(|x| {
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
TASKING
.lock()
.address_spaces_mut()
.get_mut(usize(regs.rcx - 1))
{
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
space
.map_free(usize(regs.rdx), PageTableFlags::from_bits_truncate(regs.rsi))
.map_or(0, |x| u64(x.expose_provenance()))
} else {
0
}
})
};
if retval != 0 {
TASKING.record_alloc(usize(regs.rdx) * 4096);
}
}
3 => {
let initrd = unsafe {
#[warn(clippy::expect_used, reason = "FIXME")]
let ramdisk_start =
BOOTINFO.ramdisk_addr.into_option().expect("initrd not present");
let ramdisk_len = BOOTINFO.ramdisk_len;
slice::from_raw_parts(
ptr::with_exposed_provenance::<u8>(usize(ramdisk_start)),
usize(ramdisk_len),
)
};
let initrd = Box::leak(
Vec::with_capacity_in(initrd.len(), &*ACTIVE_SPACE)
.tap_mut(|v| v.extend_from_slice(initrd))
.into_boxed_slice(),
);
retval = u64(initrd.as_ptr().expose_provenance());
retval2 = u64(initrd.len());
retval = u64(INITRD_BUF.as_ptr().expose_provenance());
retval2 = u64(INITRD_BUF.len());
}
4 => {
#[warn(clippy::expect_used, reason = "FIXME")]
@ -303,11 +280,9 @@ extern "C" fn syscall_handler() {
clippy::arithmetic_side_effects,
reason = "usize::MAX will never be returned as an index, and so incrementing can never overflow."
)]
let address_space = u64(TASKING
.lock()
.address_spaces_mut()
.insert(AddressSpace::new().expect("Failed to create address space"))
+ 1);
let address_space = u64(TASKING.address_spaces_mut(|x| {
x.insert(AddressSpace::new().expect("Failed to create address space")) + 1
}));
retval = address_space;
}
5 => {
@ -315,7 +290,7 @@ extern "C" fn syscall_handler() {
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
TASKING.lock().address_spaces_mut().remove(usize(regs.rcx - 1));
TASKING.address_spaces_mut(|x| x.remove(usize(regs.rcx - 1)));
}
6 => 'call6: {
let Ok(page) = Page::from_start_address(VirtAddr::new(regs.rdx)) else {
@ -325,34 +300,35 @@ extern "C" fn syscall_handler() {
let num_pages = usize(regs.rsi);
let flags = PageTableFlags::from_bits_truncate(regs.rdi);
let failed = if regs.rcx == 0 {
ACTIVE_SPACE.lock().map_assert_unused(page, num_pages, flags)
ACTIVE_SPACE.lock().map_assert_unused(page, num_pages, flags).is_err()
} else {
let mut tasking = TASKING.lock();
let Some(address_space) =
#[warn(clippy::arithmetic_side_effects, reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0.")]
tasking.address_spaces_mut().get_mut(usize(regs.rcx - 1))
else {
retval = 1;
break 'call6;
};
address_space.map_assert_unused(page, num_pages, flags)
}
.is_err();
retval = failed.into();
}
7 => 'call7: {
if let Some(buffer) = get_buffer(regs.rdx) {
let len = usize(regs.rdi);
assert!(len <= buffer.len());
let mut tasking = TASKING.lock();
TASKING.address_spaces_mut(|x| {
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
let Some(space) = tasking.address_spaces_mut().get_mut(usize(regs.rcx - 1)) else {
retval = 1;
break 'call7;
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
space.map_assert_unused(page, num_pages, flags).is_err()
} else {
true
}
})
};
retval = failed.into();
if !failed {
TASKING.record_alloc(num_pages * 4096);
}
}
7 => {
if let Some(buffer) = get_buffer(regs.rdx) {
let len = usize(regs.rdi);
assert!(len <= buffer.len());
TASKING.address_spaces_mut(|x| {
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
space.run(|| unsafe {
(ptr::with_exposed_provenance_mut::<u8>(usize(regs.rsi)))
.copy_from(buffer.as_ptr(), len);
@ -361,19 +337,25 @@ extern "C" fn syscall_handler() {
} else {
retval = 1;
}
});
TASKING.record_buf_dealloc(buffer.len());
} else {
retval = 1;
}
}
8 => {
let args = unsafe {
let argc = usize(regs.rsi);
let argv: &[&[u8]] = slice::from_raw_parts(ptr::with_exposed_provenance(usize(regs.rdi)), argc);
let argv: &[&[u8]] =
slice::from_raw_parts(ptr::with_exposed_provenance(usize(regs.rdi)), argc);
argv.iter().map(|arg| CStr::from_bytes_with_nul_unchecked(arg)).collect::<Vec<_>>()
};
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
let space = TASKING.lock().address_spaces_mut().remove(usize(regs.rdx - 1));
let res = TASKING.lock().new_process(
let space = TASKING.address_spaces_mut(|x| x.remove(usize(regs.rdx - 1)));
let res = TASKING.new_process(
ptr::with_exposed_provenance(usize(regs.rcx)),
space,
args.as_slice(),
@ -390,7 +372,7 @@ extern "C" fn syscall_handler() {
clippy::unwrap_used,
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
)]
REGISTERD_PIDS.write().insert(regs.rcx, u64(TASKING.lock().current_pid().unwrap()));
REGISTERD_PIDS.write().insert(regs.rcx, u64(TASKING.current_pid().unwrap()));
}
10 => {
let id = REGISTERD_PIDS.read().get(&regs.rcx).copied();
@ -406,8 +388,7 @@ extern "C" fn syscall_handler() {
if let Some(buffer) = get_buffer(regs.rdx) {
let len = usize(regs.rsi);
assert!(len <= buffer.len());
let mut tasking = TASKING.lock();
if let Ok(_queue) = tasking.message_queue_mut(pid) {
if TASKING.message_queue_mut(pid, |_| ()).is_ok() {
#[expect(
clippy::unwrap_used,
reason = "The min call guarantees that the value is in the range of a u32 before the cast"
@ -450,43 +431,43 @@ extern "C" fn syscall_handler() {
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
let data_bufs = tasking.proc_data_buffers_mut(pid).unwrap();
let new_buffer_key = data_bufs.insert(buffer);
let new_buffer_key =
TASKING.proc_data_buffers_mut(pid, |x| x.insert(buffer)).unwrap();
#[expect(
clippy::unwrap_used,
reason = "The option was already checked at the start of the if-let"
)]
let queue = tasking.message_queue_mut(pid).unwrap();
queue.push((new_buffer_key, len));
TASKING.message_queue_mut(pid, |x| x.push((new_buffer_key, len))).unwrap();
#[expect(
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
let sleep_status = tasking.proc_sleeping(pid).unwrap();
let sleep_status = TASKING.proc_sleeping(pid).unwrap();
if sleep_status == Some(SleepReason::WaitingForIPC) {
#[expect(
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
tasking.wake(pid).unwrap();
TASKING.wake(pid).unwrap();
}
retval = 0;
} else {
println!("ipc_send: Bad PID ({})", pid);
retval = 1;
}
} else {
println!("ipc_send: Bad buffer ({})", regs.rdx);
retval = 1;
}
}
12 => {
let mut tasking = TASKING.lock();
if let Some(msg) = tasking.current_message_queue_mut().pop() {
if let Some(msg) = TASKING.current_message_queue_mut(|x| x.pop()) {
#[expect(
clippy::unwrap_used,
reason = "The message queue only contains valid buffer IDs"
)]
let buffer_addr =
u64((*tasking.data_buffers_mut().get(msg.0).unwrap()).expose_provenance());
u64(TASKING.data_buffers_mut(|x| *x.get(msg.0).unwrap()).expose_provenance());
retval2 = u64(msg.1);
retval = buffer_addr;
retval3 = u64(msg.0);
@ -499,7 +480,7 @@ extern "C" fn syscall_handler() {
clippy::unwrap_used,
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
)]
let pid = u64(TASKING.lock().current_pid().unwrap());
let pid = u64(TASKING.current_pid().unwrap());
retval = pid;
}
14 => 'call14: {
@ -510,66 +491,161 @@ extern "C" fn syscall_handler() {
let num_pages = usize(regs.rsi);
let flags = PageTableFlags::from_bits_truncate(regs.rdi);
let failed = if regs.rcx == 0 {
ACTIVE_SPACE.lock().map_only_unused(page, num_pages, flags)
ACTIVE_SPACE.lock().map_only_unused(page, num_pages, flags).is_err()
} else {
let mut tasking = TASKING.lock();
let Some(address_space) =
#[warn(clippy::arithmetic_side_effects, reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0.")]
tasking.address_spaces_mut().get_mut(usize(regs.rcx - 1))
else {
retval = 1;
break 'call14;
};
address_space.map_only_unused(page, num_pages, flags)
TASKING.address_spaces_mut(|x| {
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
space.map_only_unused(page, num_pages, flags).is_err()
} else {
true
}
.is_err();
})
};
retval = failed.into();
}
15 => {
get_buffer(regs.rcx);
if let Some(buf) = get_buffer(regs.rcx) {
TASKING.record_buf_dealloc(buf.len());
}
}
16 => {
let size = usize(regs.rcx);
let rounded_size = size.next_multiple_of(4096);
KERNEL_SPACE.lock().alloc_force_user = true;
let mut buffer = Vec::with_capacity_in(rounded_size, &*KERNEL_SPACE);
TASKING.record_buf_alloc(rounded_size);
buffer.resize(rounded_size, 0);
let buffer = buffer.into_boxed_slice();
let buffer = Box::into_raw(buffer);
KERNEL_SPACE.lock().alloc_force_user = false;
retval = u64(TASKING.lock().data_buffers_mut().insert(buffer));
retval = u64(TASKING.data_buffers_mut(|x| x.insert(buffer)));
retval2 = u64(buffer.cast::<u8>().expose_provenance());
retval3 = u64(rounded_size);
}
17 => {
let mut tasking = TASKING.lock();
#[warn(clippy::expect_used, reason = "FIXME")]
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
let space = tasking
.address_spaces_mut()
.get_mut(usize(regs.rcx - 1))
.expect("Invalid address space");
TASKING.address_spaces_mut(|x| {
let space = x.get_mut(usize(regs.rcx - 1)).expect("Invalid address space");
let slice_start: *mut u8 = ptr::with_exposed_provenance_mut(usize(regs.rdx));
space.run(|| unsafe {
slice::from_raw_parts_mut(slice_start, usize(regs.rsi)).fill(0);
});
});
retval = 0;
}
18 => {
let mut tasking = TASKING.lock();
if tasking.current_message_queue_mut().is_empty() {
tasking.sleep(SleepReason::WaitingForIPC);
if TASKING.current_message_queue_mut(|x| x.is_empty()) {
TASKING.sleep(SleepReason::WaitingForIPC);
}
}
19 => {
let tasking = TASKING.lock();
let args = tasking.arguments();
let args = TASKING.arguments();
retval = u64(args.0.expose_provenance());
retval2 = u64(args.1);
}
20 => {
retval = if regs.rcx == 0 {
unsafe {
ACTIVE_SPACE
.lock()
.map_free_to(
PhysFrame::from_start_address(PhysAddr::new(regs.rdx)).unwrap(),
usize(regs.rsi),
PageTableFlags::from_bits_truncate(regs.rdi),
)
.map_or(0, |x| u64(x.expose_provenance()))
}
} else {
TASKING.address_spaces_mut(|x| {
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
unsafe {
space
.map_free_to(
PhysFrame::from_start_address(PhysAddr::new(regs.rdx)).unwrap(),
usize(regs.rsi),
PageTableFlags::from_bits_truncate(regs.rdi),
)
.map_or(0, |x| u64(x.expose_provenance()))
}
} else {
0
}
})
}
}
21 => {
retval = if regs.rcx == 0 {
let (start_virt, start_phys) = ACTIVE_SPACE
.lock()
.map_free_cont_phys(
usize(regs.rdx),
PageTableFlags::from_bits_truncate(regs.rsi),
)
.map_or((0, 0), |(ptr, start_phys)| (u64(ptr.expose_provenance()), start_phys));
retval2 = start_phys;
start_virt
} else {
TASKING.address_spaces_mut(|x| {
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
let (start_virt, start_phys) = space
.map_free_cont_phys(
usize(regs.rdx),
PageTableFlags::from_bits_truncate(regs.rsi),
)
.map_or((0, 0), |(ptr, start_phys)| {
(u64(ptr.expose_provenance()), start_phys)
});
retval2 = start_phys;
start_virt
} else {
0
}
})
};
if retval != 0 {
TASKING.record_alloc(usize(regs.rdx) * 4096);
}
}
22 => 'call22: {
let Ok(page) = Page::from_start_address(VirtAddr::new(regs.rdx)) else {
retval = 1;
break 'call22;
};
retval = if regs.rcx == 0 {
TASKING.record_dealloc(usize(regs.rsi * 4096));
u64::from(ACTIVE_SPACE.lock().unmap(page, usize(regs.rsi)).is_err())
} else {
TASKING.address_spaces_mut(|x| {
#[warn(
clippy::arithmetic_side_effects,
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
TASKING.record_dealloc(usize(regs.rsi*4096));
u64::from(space.unmap(page, usize(regs.rsi)).is_err())
} else {
1
}
})
}
}
_ => (),
};
unsafe { SYSCALL_REGS = regs };

View File

@ -1,4 +1,4 @@
use crate::{println, virtual_memory::KERNEL_SPACE};
use crate::{println, tasking::TASKING, virtual_memory::KERNEL_SPACE};
use core::{
alloc::{GlobalAlloc, Layout},
@ -45,6 +45,7 @@ unsafe impl GlobalAlloc for Heap {
let Ok(ptr) = KERNEL_SPACE.lock().map_free(num_pages, PageTableFlags::empty()) else {
return (ptr::null_mut(), layout);
};
TASKING.record_heap_alloc(num_pages * 4096);
#[expect(clippy::unwrap_used, reason = "
from_size_align requires align to be a nonzero power of two, which it is.
Also, size must be less than isize when rounded up to a multiple of align.

View File

@ -285,7 +285,6 @@ pub fn main() {
reason = "Argument does not contain a null byte, thus this cannot panic"
)]
TASKING
.lock()
.new_process(
ptr::with_exposed_provenance(usize(init.ehdr.e_entry)),
init_addr_space,

View File

@ -15,15 +15,11 @@ use core::panic::PanicInfo;
#[panic_handler]
fn panic(info: &PanicInfo<'_>) -> ! {
print!("Kernel panic in ");
if let Some(tasking) = TASKING.try_lock() {
if let Some(pid) = tasking.current_pid() {
if let Some(pid) = TASKING.current_pid() {
print!("PID {}", pid);
} else {
print!("kernel init");
}
} else {
print!("PID Unknown");
}
println!(": {info}");
#[cfg(debug_assertions)]
print_backtrace();

View File

@ -11,7 +11,9 @@ use humansize::{SizeFormatter, BINARY};
use linked_list_allocator::hole::HoleList;
use spin::{Lazy, Mutex};
use x86_64::{
structures::paging::{FrameAllocator, FrameDeallocator, PhysFrame, Size4KiB},
structures::paging::{
frame::PhysFrameRange, FrameAllocator, FrameDeallocator, PhysFrame, Size4KiB,
},
PhysAddr,
};
@ -39,6 +41,7 @@ enum EfiMemoryTypes {
pub struct PhysicalMemory {
alloc: HoleList,
bytes_used: usize,
peak_used: usize,
}
unsafe impl Send for PhysicalMemory {}
@ -46,7 +49,16 @@ unsafe impl Sync for PhysicalMemory {}
impl PhysicalMemory {
pub fn print_stats(&self) {
println!("[PMM] {} currently allocated", SizeFormatter::new(self.bytes_used, BINARY));
println!(
"[PMM] {} currently allocated ({})",
SizeFormatter::new(self.bytes_used, BINARY),
self.bytes_used / 4096,
);
println!(
"[PMM] {} allocated at peak ({})",
SizeFormatter::new(self.peak_used, BINARY),
self.peak_used / 4096,
);
}
}
@ -55,6 +67,9 @@ const FRAME_LAYOUT: Layout = unsafe { Layout::from_size_align_unchecked(4096, 40
unsafe impl FrameAllocator<Size4KiB> for PhysicalMemory {
fn allocate_frame(&mut self) -> Option<PhysFrame> {
self.bytes_used = self.bytes_used.saturating_add(4096);
if self.bytes_used > self.peak_used {
self.peak_used = self.bytes_used;
}
self.alloc.allocate_first_fit(FRAME_LAYOUT).ok().map(|(ptr, _)| {
#[expect(clippy::unwrap_used, reason = "PhysFrame requires its argument to be 4k aligned, which is guaranteed by the allocator")]
#[expect(clippy::arithmetic_side_effects, reason = "All addresses passed to the allocator are > PHYS_OFFSET, so this cannot underflow")]
@ -79,6 +94,38 @@ impl FrameDeallocator<Size4KiB> for PhysicalMemory {
}
}
impl PhysicalMemory {
pub fn allocate_frame_range(&mut self, len: usize) -> Option<PhysFrameRange> {
self.bytes_used = self.bytes_used.saturating_add(4096 * len);
if self.bytes_used > self.peak_used {
self.peak_used = self.bytes_used;
}
if len >= 0x10_0000_0000_0000 {
return None;
}
#[expect(clippy::arithmetic_side_effects, reason = "The check above makes sure the multiplication cannot overflow.")]
#[expect(clippy::unwrap_used, reason = "4096 is a nonzero power of two, and len is already a multiple of 4096 so rounding cannot overflow.")]
self.alloc.allocate_first_fit(Layout::from_size_align(4096 * len, 4096).unwrap()).ok().map(|(ptr, _)| {
#[expect(clippy::unwrap_used, reason = "PhysFrame requires its argument to be 4k aligned, which is guaranteed by the allocator")]
#[expect(clippy::arithmetic_side_effects, reason = "All addresses passed to the allocator are > PHYS_OFFSET, so this cannot underflow")]
let start = PhysFrame::from_start_address(PhysAddr::new(
(u64(ptr.as_ptr().expose_provenance())) - PHYS_OFFSET.as_u64(),
))
.unwrap();
#[expect(clippy::unwrap_used, reason = "PhysFrame requires its argument to be 4k aligned, which is guaranteed by the allocator")]
#[expect(clippy::arithmetic_side_effects, reason = "All addresses passed to the allocator are > PHYS_OFFSET, so this cannot underflow")]
let end = PhysFrame::from_start_address(PhysAddr::new(
(u64(ptr.as_ptr().expose_provenance()) + (u64(len) * 4096)) - PHYS_OFFSET.as_u64(),
))
.unwrap();
PhysFrameRange {
start,
end
}
})
}
}
pub static PHYSICAL_MEMORY: Lazy<Mutex<PhysicalMemory>> = Lazy::new(|| {
println!("[PMM] Bootloader reports the following regions:");
let mut region_iter = BOOTINFO.memory_regions.iter().peekable();
@ -135,5 +182,5 @@ pub static PHYSICAL_MEMORY: Lazy<Mutex<PhysicalMemory>> = Lazy::new(|| {
SizeFormatter::new(usable_mem, BINARY),
SizeFormatter::new(total_mem, BINARY),
);
Mutex::new(PhysicalMemory { alloc, bytes_used: 0 })
Mutex::new(PhysicalMemory { alloc, bytes_used: 0, peak_used: 0 })
});

View File

@ -48,7 +48,7 @@ pub fn init(mut freq: u32) {
fn handler(_irq: u8, eoi_guard: EoiGuard) {
drop(eoi_guard);
if let Some(mut tasking) = TASKING.try_lock() {
tasking.task_yield();
if TASKING.ok_to_yield() {
TASKING.task_yield();
}
}

View File

@ -13,5 +13,5 @@ entry_point!(start, config = &BOOTLOADER_CONFIG);
fn start(bootinfo: &'static mut BootInfo) -> ! {
BOOTINFO.set(bootinfo);
main();
TASKING.lock().exit();
TASKING.exit();
}

View File

@ -1,12 +1,20 @@
use crate::{
gdt, println, qemu_exit,
gdt,
println, qemu_exit,
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, KERNEL_SPACE},
};
use alloc::{borrow::ToOwned, boxed::Box, collections::VecDeque, ffi::CString, vec::Vec};
use core::{alloc::Layout, arch::asm, ffi::CStr, ptr::addr_of};
use core::{
alloc::Layout,
arch::asm,
ffi::CStr,
ptr::{addr_of, addr_of_mut},
sync::atomic::{AtomicUsize, Ordering},
};
use crossbeam_queue::SegQueue;
use humansize::{SizeFormatter, BINARY};
use slab::Slab;
use spin::{Lazy, Mutex};
use spin::{Lazy, Mutex, RwLock};
use x86_64::{
instructions::interrupts,
structures::paging::{Page, PageTableFlags},
@ -75,9 +83,6 @@ extern "C" fn task_init() {
}
extern "C" fn task_force_unlock() {
// SAFETY: This is safe because this function is only ever used once for new tasks, to replace
// the end of the task_yield function that normally unlocks the mutex.
unsafe { TASKING.force_unlock() }
interrupts::enable();
}
@ -88,48 +93,63 @@ pub enum SleepReason {
#[derive(Debug)]
struct Process {
address_space: Option<AddressSpace>,
kernel_stack: Box<[usize], &'static ASpaceMutex>,
kernel_esp: *mut usize,
kernel_esp_top: VirtAddr,
address_space: Option<AddressSpace>,
address_spaces: Slab<AddressSpace>,
data_buffers: Slab<*mut [u8]>,
message_queue: SegQueue<(usize, usize)>,
sleeping: Option<SleepReason>,
arguments: (*const *const u8, usize),
address_spaces: Mutex<Slab<AddressSpace>>,
data_buffers: Mutex<Slab<*mut [u8]>>,
message_queue: Mutex<SegQueue<(usize, usize)>>,
sleeping: RwLock<Option<SleepReason>>,
bytes_allocated: AtomicUsize,
}
unsafe impl Send for Process {}
unsafe impl Sync for Process {}
#[derive(Copy, Clone, Debug)]
pub struct InvalidPid;
pub static TASKING: Lazy<Mutex<Tasking>> = Lazy::new(|| {
Mutex::new(Tasking {
processes: Slab::new(),
ready_to_run: VecDeque::new(),
current_process: None,
freeable_kstacks: Vec::new(),
})
pub static TASKING: Lazy<Tasking> = Lazy::new(|| Tasking {
processes: RwLock::new(Slab::new()),
ready_to_run: Mutex::new(VecDeque::new()),
current_pid: RwLock::new(None),
freeable_kstacks: Mutex::new(Vec::new()),
alloc_to_account: AtomicUsize::new(0),
dealloc_to_account: AtomicUsize::new(0),
kinit_allocated: AtomicUsize::new(0),
buf_allocated: AtomicUsize::new(0),
kstacks_allocated: AtomicUsize::new(0),
heap_allocated: AtomicUsize::new(0),
});
#[derive(Debug)]
pub struct Tasking {
processes: Slab<Process>,
ready_to_run: VecDeque<usize>,
current_process: Option<usize>,
freeable_kstacks: Vec<Box<[usize], &'static ASpaceMutex>>,
processes: RwLock<Slab<Process>>,
ready_to_run: Mutex<VecDeque<usize>>,
current_pid: RwLock<Option<usize>>,
freeable_kstacks: Mutex<Vec<Box<[usize], &'static ASpaceMutex>>>,
alloc_to_account: AtomicUsize,
dealloc_to_account: AtomicUsize,
kinit_allocated: AtomicUsize,
buf_allocated: AtomicUsize,
kstacks_allocated: AtomicUsize,
heap_allocated: AtomicUsize,
}
pub const KSTACK_SIZE: usize = 0x1_0000 / 8;
impl Tasking {
pub fn new_process(
&mut self,
&self,
entry_point: *const extern "C" fn() -> !,
mut address_space: AddressSpace,
arguments: &[&CStr],
) -> Result<usize, PagingError> {
let mut kernel_stack = Vec::new_in(&*KERNEL_SPACE);
kernel_stack.resize(0x1_0000 - 0x4, 0);
let mut kernel_stack = Vec::with_capacity_in(KSTACK_SIZE, &*KERNEL_SPACE);
kernel_stack.resize(KSTACK_SIZE - 0x4, 0);
#[expect(clippy::as_conversions, reason = "Needed to get address of function")]
{
kernel_stack.push(task_force_unlock as usize);
@ -138,6 +158,7 @@ impl Tasking {
kernel_stack.push(0xFFF_FF80_0000 + (16 * 4096));
kernel_stack.push(entry_point.expose_provenance());
let mut kernel_stack = kernel_stack.into_boxed_slice();
self.kstacks_allocated.fetch_add(KSTACK_SIZE * 8, Ordering::Relaxed);
address_space.map_assert_unused(
#[expect(
clippy::unwrap_used,
@ -147,6 +168,7 @@ impl Tasking {
16,
PageTableFlags::USER_ACCESSIBLE,
)?;
self.kstacks_allocated.fetch_add(16 * 4096, Ordering::Relaxed);
let arguments = arguments.iter().map(|arg| (*arg).to_owned()).collect::<Vec<CString>>();
#[expect(
clippy::unwrap_used,
@ -173,6 +195,7 @@ impl Tasking {
};
let user_arg_mem =
address_space.map_free(args_layout.size() / 4096, PageTableFlags::USER_ACCESSIBLE)?;
self.kstacks_allocated.fetch_add(args_layout.size(), Ordering::Relaxed);
address_space.run(|| unsafe {
let mut ptr_ptr: *mut *const u8 = user_arg_mem.cast();
for (&offset, argument) in arg_offsets.iter().zip(arguments.iter()) {
@ -183,81 +206,132 @@ impl Tasking {
ptr_ptr = ptr_ptr.add(1);
}
});
let pid = self.processes.insert(Process {
let pid = self.processes.write().insert(Process {
#[expect(
clippy::indexing_slicing,
reason = "Stack length is 0x1_0000, this cannot panic"
)]
kernel_esp: &mut kernel_stack[0xFFF6],
kernel_esp: &mut kernel_stack[KSTACK_SIZE - 10],
#[expect(
clippy::indexing_slicing,
reason = "Stack length is 0x1_0000, this cannot panic"
)]
kernel_esp_top: VirtAddr::from_ptr(addr_of!(kernel_stack[0xFFFF]).wrapping_add(1)),
kernel_esp_top: VirtAddr::from_ptr(
addr_of!(kernel_stack[KSTACK_SIZE - 1]).wrapping_add(1),
),
kernel_stack,
address_space: Some(address_space),
address_spaces: Slab::new(),
data_buffers: Slab::new(),
message_queue: SegQueue::new(),
sleeping: None,
address_spaces: Mutex::new(Slab::new()),
data_buffers: Mutex::new(Slab::new()),
message_queue: Mutex::new(SegQueue::new()),
sleeping: RwLock::new(None),
arguments: (user_arg_mem.cast(), arguments.len()),
bytes_allocated: AtomicUsize::new(0),
});
self.ready_to_run.push_back(pid);
self.ready_to_run.lock().push_back(pid);
Ok(pid)
}
pub fn task_yield(&mut self) {
self.freeable_kstacks.clear();
let Some(current_process) = self.current_process else {
pub fn ok_to_yield(&self) -> bool {
!(self.freeable_kstacks.is_locked()
|| (self.current_pid.reader_count() > 0)
|| (self.current_pid.writer_count() > 0)
|| self.ready_to_run.is_locked()
|| (self.processes.reader_count() > 0)
|| (self.processes.writer_count() > 0)
|| (self.current_pid.writer_count() > 0))
}
pub fn task_yield(&self) {
self.kstacks_allocated
.fetch_sub(self.freeable_kstacks.lock().len() * KSTACK_SIZE * 8, Ordering::Relaxed);
self.freeable_kstacks.lock().clear();
let Some(current_pid) = *self.current_pid.read() else {
return;
};
if let Some(next_process_pid) = self.ready_to_run.pop_front() {
let next_process_pid = self.ready_to_run.lock().pop_front();
if let Some(next_process_pid) = next_process_pid {
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
)]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let current_address_space = self.processes[next_process_pid]
let current_address_space = self.processes.write()[next_process_pid]
.address_space
.take()
.expect("Non-current process has active page table")
.activate();
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.processes[current_process].address_space = Some(current_address_space);
self.processes.write()[current_pid].address_space = Some(current_address_space);
let processes = self.processes.read();
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let next_process = &self.processes[next_process_pid];
let current_process = &processes[current_pid];
current_process
.bytes_allocated
.fetch_add(self.alloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed);
current_process
.bytes_allocated
.fetch_sub(self.dealloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed);
self.alloc_to_account.store(0, Ordering::Relaxed);
self.dealloc_to_account.store(0, Ordering::Relaxed);
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let next_process = &processes[next_process_pid];
gdt::set_tss_stack(next_process.kernel_esp_top);
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
if self.processes[current_process].sleeping.is_none() {
self.ready_to_run.push_back(current_process);
if current_process.sleeping.read().is_none() {
self.ready_to_run.lock().push_back(current_pid);
}
let kernel_esp = next_process.kernel_esp;
let previous_process = current_process;
let previous_process = current_pid;
*self.current_pid.write() = Some(next_process_pid);
core::mem::drop(processes);
let mut processes = self.processes.write();
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.current_process = Some(next_process_pid);
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
switch_to_asm(&mut (self.processes[previous_process].kernel_esp), kernel_esp);
let curr_stack = addr_of_mut!(processes[previous_process].kernel_esp);
core::mem::drop(processes);
switch_to_asm(curr_stack, kernel_esp);
} else if {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let res = self.processes[current_process].sleeping.is_some();
let res = self.processes.read()[current_pid].sleeping.read().is_some();
res
} {
println!("All processes sleeping, exiting QEMU");
self.print_stats();
qemu_exit::exit_qemu();
}
}
pub fn current_pid(&self) -> Option<usize> {
self.current_process
*self.current_pid.read()
}
pub fn exit(&mut self) -> ! {
if let Some(next_process_pid) = self.ready_to_run.pop_front() {
if let Some(current_process) = self.current_process {
self.freeable_kstacks.push(self.processes.remove(current_process).kernel_stack);
pub fn exit(&self) -> ! {
let next_process_pid = self.ready_to_run.lock().pop_front();
if let Some(next_process_pid) = next_process_pid {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let mut processes = self.processes.write();
if let Some(current_pid) = *self.current_pid.read() {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let current_process = &processes[current_pid];
current_process
.bytes_allocated
.fetch_add(self.alloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed);
current_process
.bytes_allocated
.fetch_sub(self.dealloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed);
self.alloc_to_account.store(0, Ordering::Relaxed);
self.dealloc_to_account.store(0, Ordering::Relaxed);
println!(
"[TASKING] PID {current_pid} exiting, used {} ({}), this is being leaked.",
SizeFormatter::new(
current_process.bytes_allocated.load(Ordering::Relaxed),
BINARY
),
current_process.bytes_allocated.load(Ordering::Relaxed) / 4096
);
self.freeable_kstacks.lock().push(processes.remove(current_pid).kernel_stack);
}
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let next_process = &mut self.processes[next_process_pid];
let next_process = &mut processes[next_process_pid];
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
@ -269,69 +343,158 @@ impl Tasking {
.activate();
gdt::set_tss_stack(next_process.kernel_esp_top);
let kernel_esp = next_process.kernel_esp;
self.current_process = Some(next_process_pid);
*self.current_pid.write() = Some(next_process_pid);
core::mem::drop(processes);
switch_to_asm_exit(kernel_esp);
unreachable!()
} else {
println!("Last non-sleeping process exited, exiting QEMU");
self.print_stats();
qemu_exit::exit_qemu();
}
}
pub fn address_spaces_mut(&mut self) -> &mut Slab<AddressSpace> {
pub fn address_spaces_mut<F: FnOnce(&mut Slab<AddressSpace>) -> T, T>(&self, func: F) -> T {
let processes = self.processes.read();
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
&mut self.processes[self.current_process.unwrap()].address_spaces
let mut aspaces = processes[self.current_pid.read().unwrap()].address_spaces.lock();
func(&mut aspaces)
}
pub fn data_buffers_mut(&mut self) -> &mut Slab<*mut [u8]> {
pub fn data_buffers_mut<F: FnOnce(&mut Slab<*mut [u8]>) -> T, T>(&self, func: F) -> T {
let processes = self.processes.read();
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
&mut self.processes[self.current_process.unwrap()].data_buffers
let mut dbufs = processes[self.current_pid.read().unwrap()].data_buffers.lock();
func(&mut dbufs)
}
pub fn proc_data_buffers_mut(
&mut self,
pub fn proc_data_buffers_mut<F: FnOnce(&mut Slab<*mut [u8]>) -> T, T>(
&self,
pid: usize,
) -> Result<&mut Slab<*mut [u8]>, InvalidPid> {
Ok(&mut self.processes.get_mut(pid).ok_or(InvalidPid)?.data_buffers)
func: F,
) -> Result<T, InvalidPid> {
let processes = self.processes.read();
let mut dbufs = processes.get(pid).ok_or(InvalidPid)?.data_buffers.lock();
Ok(func(&mut dbufs))
}
pub fn current_message_queue_mut(&mut self) -> &mut SegQueue<(usize, usize)> {
pub fn current_message_queue_mut<F: FnOnce(&mut SegQueue<(usize, usize)>) -> T, T>(
&self,
func: F,
) -> T {
let processes = self.processes.read();
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
&mut self.processes[self.current_process.unwrap()].message_queue
let mut queue = processes[self.current_pid.read().unwrap()].message_queue.lock();
func(&mut queue)
}
pub fn message_queue_mut(
&mut self,
pub fn message_queue_mut<F: FnOnce(&mut SegQueue<(usize, usize)>) -> T, T>(
&self,
pid: usize,
) -> Result<&mut SegQueue<(usize, usize)>, InvalidPid> {
Ok(&mut self.processes.get_mut(pid).ok_or(InvalidPid)?.message_queue)
func: F,
) -> Result<T, InvalidPid> {
let processes = self.processes.read();
let mut queue = processes.get(pid).ok_or(InvalidPid)?.message_queue.lock();
Ok(func(&mut queue))
}
pub fn proc_sleeping(&self, pid: usize) -> Result<Option<SleepReason>, InvalidPid> {
Ok(self.processes.get(pid).ok_or(InvalidPid)?.sleeping)
Ok(*(self.processes.read().get(pid).ok_or(InvalidPid)?.sleeping.read()))
}
pub fn sleep(&mut self, reason: SleepReason) {
pub fn sleep(&self, reason: SleepReason) {
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.processes[self.current_process.unwrap()].sleeping = Some(reason);
*self.processes.read()[self.current_pid.read().unwrap()].sleeping.write() = Some(reason);
self.task_yield();
}
pub fn wake(&mut self, pid: usize) -> Result<(), InvalidPid> {
if self.processes.get(pid).ok_or(InvalidPid)?.sleeping.is_some() {
self.processes.get_mut(pid).ok_or(InvalidPid)?.sleeping = None;
self.ready_to_run.push_back(pid);
}
pub fn wake(&self, pid: usize) -> Result<(), InvalidPid> {
*self.processes.read().get(pid).ok_or(InvalidPid)?.sleeping.write() = None;
self.ready_to_run.lock().push_back(pid);
Ok(())
}
pub fn arguments(&self) -> (*const *const u8, usize) {
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.processes[self.current_process.unwrap()].arguments
self.processes.read()[self.current_pid.read().unwrap()].arguments
}
pub fn record_alloc(&self, size: usize) {
if let Some(pid) = *self.current_pid.read() {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
if self.processes.writer_count() == 0 {
self.processes.read()[pid].bytes_allocated.fetch_add(size, Ordering::Relaxed);
} else {
self.alloc_to_account.fetch_add(size, Ordering::Relaxed);
}
} else {
self.kinit_allocated.fetch_add(size, Ordering::Relaxed);
}
}
pub fn record_dealloc(&self, size: usize) {
if let Some(pid) = *self.current_pid.read() {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
if self.processes.writer_count() == 0 {
self.processes.read()[pid].bytes_allocated.fetch_sub(size, Ordering::Relaxed);
} else {
self.dealloc_to_account.fetch_add(size, Ordering::Relaxed);
}
} else {
self.kinit_allocated.fetch_sub(size, Ordering::Relaxed);
}
}
pub fn record_buf_alloc(&self, size: usize) {
self.buf_allocated.fetch_add(size, Ordering::Relaxed);
}
pub fn record_buf_dealloc(&self, size: usize) {
self.buf_allocated.fetch_sub(size, Ordering::Relaxed);
}
pub fn record_heap_alloc(&self, size: usize) {
self.heap_allocated.fetch_add(size, Ordering::Relaxed);
}
pub fn record_heap_dealloc(&self, size: usize) {
self.heap_allocated.fetch_sub(size, Ordering::Relaxed);
}
pub fn print_stats(&self) {
let mut total = self.kinit_allocated.load(Ordering::Relaxed)
+ self.buf_allocated.load(Ordering::Relaxed)
+ self.kstacks_allocated.load(Ordering::Relaxed)
+ self.heap_allocated.load(Ordering::Relaxed);
println!(
"[TASKING] Kernel init used {}",
SizeFormatter::new(self.kinit_allocated.load(Ordering::Relaxed), BINARY)
);
println!(
"[TASKING] Kernel buffers used {}",
SizeFormatter::new(self.buf_allocated.load(Ordering::Relaxed), BINARY)
);
println!(
"[TASKING] Kernel stacks used {}",
SizeFormatter::new(self.kstacks_allocated.load(Ordering::Relaxed), BINARY)
);
println!(
"[TASKING] Kernel heap used {}",
SizeFormatter::new(self.heap_allocated.load(Ordering::Relaxed), BINARY)
);
for (i, process) in self.processes.read().iter() {
total += process.bytes_allocated.load(Ordering::Relaxed);
println!(
"[TASKING] PID {} used {}",
i,
SizeFormatter::new(process.bytes_allocated.load(Ordering::Relaxed), BINARY)
);
}
println!("[TASKING] Total used {} ({})", SizeFormatter::new(total, BINARY), total / 4096);
}
}

View File

@ -1,4 +1,4 @@
use crate::{bootinfo::BOOTINFO, physical_memory::PHYSICAL_MEMORY};
use crate::{bootinfo::BOOTINFO, physical_memory::PHYSICAL_MEMORY, tasking::TASKING};
use alloc::alloc::{AllocError, Allocator, Layout};
use cast::{u64, usize};
use core::{
@ -167,6 +167,7 @@ pub static KERNEL_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
)]
let (new_child, new_child_phys) =
alloc_pt().expect("Could not allocate new kernel entry");
TASKING.record_alloc(4096);
new_child.write(PageTable::new());
new_child_phys
},
@ -452,25 +453,74 @@ impl AddressSpace {
.flush();
}
}
// unsafe {
// self.mapper
// .map_range_with_table_flags(
// PageRange { start: page, end: page + num_pages as u64 },
// flags | PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
// PageTableFlags::PRESENT
// | PageTableFlags::WRITABLE
// | PageTableFlags::USER_ACCESSIBLE,
// &mut *PHYSICAL_MEMORY.lock(),
// )
// .map_err(|(err, flush_range)| {
// flush_range.flush_range();
// err
// })?
// .flush_range();
// }
Ok(page.start_address().as_mut_ptr())
}
/// Maps virtual pages to newly allocated contiguous physical memory
/// and returns the starting address and frame.
/// The newly allocated physical memory contains garbage data, so the mapping will always be writable,
/// as it doesn't make sense to map garbage data read only.
///
/// ## Safety
///
/// Creating page table mappings is a fundamentally unsafe operation because
/// there are various ways to break memory safety through it. For example,
/// re-mapping an in-use page to a different frame changes and invalidates
/// all values stored in that page, resulting in undefined behavior on the
/// next use.
///
/// The caller must ensure that no undefined behavior or memory safety
/// violations can occur through the new mapping by, among other things, preventing
/// creating uninitalized or invalid values. Rust requires that all values
/// have a correct memory layout. For example, a `bool` must be either a 0
/// or a 1 in memory, but not a 3 or 4. An exception is the `MaybeUninit`
/// wrapper type, which abstracts over possibly uninitialized memory.
/// Note: You only have to worry about this when re-mapping pages to
/// different physical frames. Mapping pages that are not in use yet is fine.
unsafe fn map_cont_phys(
&mut self,
page: Page,
num_pages: usize,
flags: PageTableFlags,
) -> Result<(*mut u8, u64), PagingError> {
self.check_request_valid(page, num_pages)?;
let frame_range = PHYSICAL_MEMORY
.lock()
.allocate_frame_range(num_pages)
.ok_or(PagingError::FrameAllocationFailed)?;
let phys_start = frame_range.start.start_address().as_u64();
#[expect(
clippy::arithmetic_side_effects,
reason = "check_request_valid guarentees this won't overflow"
)]
for (page, frame) in
(PageRange { start: page, end: page + u64(num_pages) }).zip(frame_range)
{
unsafe {
let mut phys_mem = PHYSICAL_MEMORY.lock();
self.mapper
.map_to_with_table_flags(
page,
frame,
flags
| PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| if self.alloc_force_user {
PageTableFlags::USER_ACCESSIBLE
} else {
PageTableFlags::empty()
},
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
&mut *phys_mem,
)?
.flush();
}
}
Ok((page.start_address().as_mut_ptr(), phys_start))
}
/// Maps new virtual pages and returns the starting addresss
///
/// ## Safety
@ -502,6 +552,20 @@ impl AddressSpace {
unsafe { self.map(self.find_free_pages(num_pages)?, num_pages, flags) }
}
/// Maps new virtual pages to new contigous physical memory
/// and returns the starting address and frame.
/// The newly allocated physical memory contains garbage data, so the mapping will always be writable,
/// as it doesn't make sense to map garbage data read only.
pub fn map_free_cont_phys(
&mut self,
num_pages: usize,
flags: PageTableFlags,
) -> Result<(*mut u8, u64), PagingError> {
// SAFETY: &mut aliasing is prevented by using free physical frames, and uninitialized
// values are prevented by using free virtual pages.
unsafe { self.map_cont_phys(self.find_free_pages(num_pages)?, num_pages, flags) }
}
/// Same behavior as `map`, but asserts that the requested virtual page range is unmapped, and
/// thus is safe.
pub fn map_assert_unused(
@ -532,6 +596,9 @@ impl AddressSpace {
if self.translate_addr(page.start_address()).is_some() {
continue;
}
if !self.is_kernel {
TASKING.record_alloc(4096);
}
unsafe {
let mut phys_mem = PHYSICAL_MEMORY.lock();
let frame = phys_mem.allocate_frame().ok_or(PagingError::FrameAllocationFailed)?;
@ -551,6 +618,22 @@ impl AddressSpace {
Ok(page.start_address().as_mut_ptr())
}
pub fn unmap(&mut self, page: Page, num_pages: usize) -> Result<(), PagingError> {
self.check_request_valid(page, num_pages)?;
#[expect(
clippy::arithmetic_side_effects,
reason = "check_request_valid guarentees this won't overflow"
)]
for page in (PageRange { start: page, end: page + u64(num_pages) }) {
unsafe {
let (frame, flush) = self.mapper.unmap(page)?;
PHYSICAL_MEMORY.lock().deallocate_frame(frame);
flush.flush();
}
}
Ok(())
}
/// Finds a range of free pages and returns the starting page
fn find_free_pages(&self, num_pages: usize) -> Result<Page, PagingError> {
if num_pages == 0 {
@ -652,11 +735,7 @@ unsafe impl Allocator for ASpaceMutex {
PageTableFlags::empty()
};
let start = space.map_free(size / 4096, flags).map_err(|_| AllocError)?;
// if space.alloc_force_user {
// dbg!(start);
// }
let res = Ok(unsafe { slice::from_raw_parts_mut(start.cast::<u8>(), size) }.into());
res
Ok(unsafe { slice::from_raw_parts_mut(start.cast::<u8>(), size) }.into())
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
@ -670,12 +749,7 @@ unsafe impl Allocator for ASpaceMutex {
)]
let start_page =
Page::from_start_address(VirtAddr::new(u64(ptr.as_ptr().expose_provenance()))).unwrap();
#[expect(
clippy::arithmetic_side_effects,
reason = "div_ceil always returns at least one, and the call safety requirements means that start_page+<layout size in pages> cannot overflow."
)]
for page in Page::range(start_page, start_page + u64(layout.size().div_ceil(4096) - 1)) {
unsafe {
let length = layout.size().div_ceil(4096);
#[expect(
clippy::unwrap_used,
reason = "
@ -683,18 +757,6 @@ unsafe impl Allocator for ASpaceMutex {
The kernel doesn't use huge pages and the mapping must be valid to be returned from allocate, so unmap cannot fail.
"
)]
let (frame, flush) = self.0.lock().mapper.unmap(page).unwrap();
PHYSICAL_MEMORY.lock().deallocate_frame(frame);
flush.flush();
}
}
// unsafe {
// self.0.lock().mapper.unmap_range(
// Page::range(start_page, start_page + (layout.size().div_ceil(4096) - 1) as u64),
// &mut *PHYSICAL_MEMORY.lock(),
// )
// }
// .unwrap()
// .flush_all();
self.0.lock().unmap(start_page, length).unwrap();
}
}