Move kernel buffers to the per-process address spaces

This commit is contained in:
pjht 2024-08-17 15:19:24 -05:00
parent f258f4ed71
commit e805a2de0c
Signed by: pjht
GPG Key ID: 7B5F6AFBEC7EE78E
3 changed files with 108 additions and 89 deletions

View File

@ -197,7 +197,7 @@ extern "x86-interrupt" fn syscall_handler_header(stack_frame: InterruptStackFram
fn get_buffer(id: u64) -> Option<Box<[u8], &'static ASpaceMutex>> {
TASKING.data_buffers_mut(|x| {
x.try_remove(usize(id)).map(|buf| unsafe { Box::from_raw_in(buf, &*KERNEL_SPACE) })
x.try_remove(usize(id)).map(|buf| unsafe { Box::from_raw_in(buf, &*ACTIVE_SPACE) })
})
}
@ -219,7 +219,6 @@ static INITRD_BUF: Lazy<&'static [u8]> = Lazy::new(|| {
.tap_mut(|v| v.extend_from_slice(initrd))
.into_boxed_slice(),
);
KERNEL_SPACE.lock().record_alloc(initrd.len().next_multiple_of(4096));
KERNEL_SPACE.lock().alloc_force_user = false;
initrd
});
@ -314,7 +313,7 @@ extern "C" fn syscall_handler() {
retval = failed.into();
}
7 => {
if let Some(buffer) = get_buffer(regs.rdx) {
if let Some(mut buffer) = get_buffer(regs.rdx) {
let len = usize(regs.rdi);
assert!(len <= buffer.len());
TASKING.address_spaces_mut(|x| {
@ -323,16 +322,29 @@ extern "C" fn syscall_handler() {
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
let buffer_num_pages = buffer.len() / 4096;
let buffer_raw = Box::into_raw(buffer);
let page = ACTIVE_SPACE
.lock()
.move_mappings_free(
Page::from_start_address(VirtAddr::new(u64(
buffer_raw.expose_provenance()
)))
.unwrap(),
buffer_num_pages,
space,
)
.unwrap();
space.run(|| unsafe {
(ptr::with_exposed_provenance_mut::<u8>(usize(regs.rsi)))
.copy_from(buffer.as_ptr(), len);
.copy_from(page.start_address().as_mut_ptr::<u8>(), len);
});
space.unmap_and_free(page, buffer_num_pages).unwrap();
retval = 0;
} else {
retval = 1;
}
});
TASKING.record_curr_buf_dealloc(buffer.len());
} else {
retval = 1;
}
@ -420,19 +432,34 @@ extern "C" fn syscall_handler() {
SECOND_PORT.write_u32s(&[
total_len, // Total block length
]);
TASKING.record_curr_buf_dealloc(buffer.len());
#[expect(
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
TASKING.record_buf_alloc(pid, buffer.len()).unwrap();
let buf_num_pages = buffer.len() / 4096;
let buffer = Box::into_raw(buffer);
let buf_start_page =
Page::from_start_address(VirtAddr::new(u64(buffer.expose_provenance())))
.unwrap();
let dest_buffer = TASKING
.address_space_mut(pid, |aspace| {
// This is None only if the destiniation is the current process. If so,
// no remapping is necessary so just retyurn the old buffer.
let Some(aspace) = aspace else {
return buffer;
};
let page = ACTIVE_SPACE
.lock()
.move_mappings_free(buf_start_page, buf_num_pages, aspace)
.unwrap();
ptr::slice_from_raw_parts_mut::<u8>(
page.start_address().as_mut_ptr(),
buffer.len(),
)
})
.unwrap();
#[expect(
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
let new_buffer_key =
TASKING.proc_data_buffers_mut(pid, |x| x.insert(buffer)).unwrap();
TASKING.proc_data_buffers_mut(pid, |x| x.insert(dest_buffer)).unwrap();
#[expect(
clippy::unwrap_used,
reason = "The option was already checked at the start of the if-let"
@ -508,20 +535,15 @@ extern "C" fn syscall_handler() {
retval = failed.into();
}
15 => {
if let Some(buf) = get_buffer(regs.rcx) {
TASKING.record_curr_buf_dealloc(buf.len());
}
get_buffer(regs.rcx);
}
16 => {
let size = usize(regs.rcx);
let rounded_size = size.next_multiple_of(4096);
KERNEL_SPACE.lock().alloc_force_user = true;
let mut buffer = Vec::with_capacity_in(rounded_size, &*KERNEL_SPACE);
TASKING.record_curr_buf_alloc(rounded_size);
let mut buffer = Vec::with_capacity_in(rounded_size, &*ACTIVE_SPACE);
buffer.resize(rounded_size, 0);
let buffer = buffer.into_boxed_slice();
let buffer = Box::into_raw(buffer);
KERNEL_SPACE.lock().alloc_force_user = false;
retval = u64(TASKING.data_buffers_mut(|x| x.insert(buffer)));
retval2 = u64(buffer.cast::<u8>().expose_provenance());
retval3 = u64(rounded_size);
@ -626,7 +648,7 @@ extern "C" fn syscall_handler() {
break 'call22;
};
retval = if regs.rcx == 0 {
u64::from(ACTIVE_SPACE.lock().unmap(page, usize(regs.rsi)).is_err())
u64::from(ACTIVE_SPACE.lock().unmap_and_free(page, usize(regs.rsi)).is_err())
} else {
TASKING.address_spaces_mut(|x| {
#[warn(
@ -634,7 +656,7 @@ extern "C" fn syscall_handler() {
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
)]
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
u64::from(space.unmap(page, usize(regs.rsi)).is_err())
u64::from(space.unmap_and_free(page, usize(regs.rsi)).is_err())
} else {
1
}

View File

@ -101,7 +101,6 @@ struct Process {
data_buffers: Mutex<Slab<*mut [u8]>>,
message_queue: Mutex<SegQueue<(usize, usize)>>,
sleeping: RwLock<Option<SleepReason>>,
buf_allocated: AtomicUsize,
}
unsafe impl Send for Process {}
@ -125,7 +124,7 @@ pub struct Tasking {
freeable_kstacks: Mutex<Vec<Box<[usize], &'static ASpaceMutex>>>,
}
pub const KSTACK_SIZE: usize = 0x1_0000 / 8;
pub const KSTACK_SIZE: usize = (16 * 4096) / 8;
impl Tasking {
pub fn new_process(
@ -144,7 +143,6 @@ impl Tasking {
kernel_stack.push(0xFFF_FF80_0000 + (16 * 4096));
kernel_stack.push(entry_point.expose_provenance());
let mut kernel_stack = kernel_stack.into_boxed_slice();
KERNEL_SPACE.lock().record_alloc(KSTACK_SIZE * 8);
address_space.map_assert_unused(
#[expect(
clippy::unwrap_used,
@ -210,7 +208,6 @@ impl Tasking {
message_queue: Mutex::new(SegQueue::new()),
sleeping: RwLock::new(None),
arguments: (user_arg_mem.cast(), arguments.len()),
buf_allocated: AtomicUsize::new(0),
});
self.ready_to_run.lock().push_back(pid);
Ok(pid)
@ -227,7 +224,6 @@ impl Tasking {
}
pub fn task_yield(&self) {
KERNEL_SPACE.lock().record_dealloc(self.freeable_kstacks.lock().len() * KSTACK_SIZE * 8);
self.freeable_kstacks.lock().clear();
let Some(current_pid) = *self.current_pid.read() else {
return;
@ -285,14 +281,6 @@ impl Tasking {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let mut processes = self.processes.write();
if let Some(current_pid) = *self.current_pid.read() {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let current_process = &processes[current_pid];
let buf_bytes_used = current_process.buf_allocated.load(Ordering::Relaxed);
println!(
"[TASKING] PID {current_pid} exiting, used {} ({}) of kernel buffers, this is being leaked.",
SizeFormatter::new(buf_bytes_used, BINARY),
buf_bytes_used / 4096,
);
self.freeable_kstacks.lock().push(processes.remove(current_pid).kernel_stack);
}
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
@ -366,6 +354,16 @@ impl Tasking {
Ok(func(&mut queue))
}
pub fn address_space_mut<F: FnOnce(Option<&mut AddressSpace>) -> T, T>(
&self,
pid: usize,
func: F,
) -> Result<T, InvalidPid> {
let mut processes = self.processes.write();
let aspace = processes.get_mut(pid).ok_or(InvalidPid)?.address_space.as_mut();
Ok(func(aspace))
}
pub fn proc_sleeping(&self, pid: usize) -> Result<Option<SleepReason>, InvalidPid> {
Ok(*(self.processes.read().get(pid).ok_or(InvalidPid)?.sleeping.read()))
}
@ -389,42 +387,6 @@ impl Tasking {
self.processes.read()[self.current_pid.read().unwrap()].arguments
}
pub fn record_curr_buf_alloc(&self, size: usize) {
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.processes.read()[self.current_pid.read().unwrap()]
.buf_allocated
.fetch_add(size, Ordering::Relaxed);
}
pub fn record_curr_buf_dealloc(&self, size: usize) {
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.processes.read()[self.current_pid.read().unwrap()]
.buf_allocated
.fetch_sub(size, Ordering::Relaxed);
}
pub fn record_buf_alloc(&self, pid: usize, size: usize) -> Result<(), InvalidPid> {
self.processes
.read()
.get(pid)
.ok_or(InvalidPid)?
.buf_allocated
.fetch_add(size, Ordering::Relaxed);
Ok(())
}
pub fn record_buf_dealloc(&self, pid: usize, size: usize) -> Result<(), InvalidPid> {
self.processes
.read()
.get(pid)
.ok_or(InvalidPid)?
.buf_allocated
.fetch_sub(size, Ordering::Relaxed);
Ok(())
}
pub fn print_stats(&self) {
let mut total = KERNEL_SPACE.lock().get_bytes_allocated();
println!(
@ -436,15 +398,8 @@ impl Tasking {
|| ACTIVE_SPACE.lock().get_bytes_allocated(),
|space| space.get_bytes_allocated(),
);
let buf_bytes_used = process.buf_allocated.load(Ordering::Relaxed);
total += bytes_used;
total += buf_bytes_used;
println!(
"[TASKING] PID {} used {} + {} of kernel buffers",
i,
SizeFormatter::new(bytes_used, BINARY),
SizeFormatter::new(buf_bytes_used, BINARY)
);
println!("[TASKING] PID {} used {}", i, SizeFormatter::new(bytes_used, BINARY),);
}
println!("[TASKING] Total used {} ({})", SizeFormatter::new(total, BINARY), total / 4096);
}

View File

@ -17,7 +17,7 @@ use x86_64::{
registers::control::Cr3,
structures::paging::{
frame::PhysFrameRange,
mapper::{MapToError, TranslateResult, UnmapError},
mapper::{MapToError, MappedFrame, TranslateResult, UnmapError},
page::PageRange,
FrameAllocator, FrameDeallocator, Mapper, OffsetPageTable, Page, PageTable, PageTableFlags,
PhysFrame, Size4KiB, Translate,
@ -643,6 +643,18 @@ impl AddressSpace {
}
pub fn unmap(&mut self, page: Page, num_pages: usize) -> Result<(), PagingError> {
self.check_request_valid(page, num_pages)?;
#[expect(
clippy::arithmetic_side_effects,
reason = "check_request_valid guarentees this won't overflow"
)]
for page in (PageRange { start: page, end: page + u64(num_pages) }) {
self.mapper.unmap(page)?.1.flush();
}
Ok(())
}
pub fn unmap_and_free(&mut self, page: Page, num_pages: usize) -> Result<(), PagingError> {
self.check_request_valid(page, num_pages)?;
#[expect(
clippy::arithmetic_side_effects,
@ -659,6 +671,36 @@ impl AddressSpace {
Ok(())
}
pub fn move_mappings_free(
&mut self,
page: Page,
num_pages: usize,
dest: &mut Self,
) -> Result<Page, PagingError> {
self.check_request_valid(page, num_pages)?;
let dest_range_start = dest.find_free_pages(num_pages)?;
#[expect(
clippy::arithmetic_side_effects,
reason = "check_request_valid guarentees this won't overflow"
)]
for (src_page, dest_page) in (PageRange { start: page, end: page + u64(num_pages) })
.zip(PageRange { start: dest_range_start, end: dest_range_start + u64(num_pages) })
{
let TranslateResult::Mapped { frame: MappedFrame::Size4KiB(frame), offset: 0, flags } =
self.mapper.translate(src_page.start_address())
else {
continue;
};
unsafe { dest.map_to(dest_page, frame, 1, flags)? };
self.unmap(src_page, 1)?;
if flags.contains(PageTableFlags::BIT_9) {
self.record_dealloc(4096);
dest.record_alloc(4096);
}
}
Ok(dest_range_start)
}
/// Finds a range of free pages and returns the starting page
fn find_free_pages(&self, num_pages: usize) -> Result<Page, PagingError> {
if num_pages == 0 {
@ -734,27 +776,29 @@ fn drop_table(table: &PageTable, level: u8) {
reason = "level is at minimum 2 here, thus this can't underflow"
)]
drop_table(
unsafe { PhysFrame::from_start_address(entry.addr()).unwrap().as_virt_ref() },
unsafe {
PhysFrame::from_start_address(entry.addr()).unwrap().as_virt_ref()
},
level - 1,
);
}
}
},
}
core::cmp::Ordering::Equal => {
for entry in table.iter() {
if entry.flags().contains(PageTableFlags::PRESENT)
&& entry.flags().contains(PageTableFlags::BIT_9)
{
//#[expect(
// clippy::unwrap_used,
// reason = "PhysFrame requires its argument to be 4k aligned, which is guaranteed since page tables must be 4k aligned"
//)]
#[expect(
clippy::unwrap_used,
reason = "This fails if the entry isn't present or it's a huge page. Presence was verified above, and level 1 tables cannot have huge entries."
)]
unsafe {
PHYSICAL_MEMORY.lock().deallocate_frame(entry.frame().unwrap());
};
}
}
},
}
core::cmp::Ordering::Less => unreachable!(),
}
#[expect(
@ -790,7 +834,6 @@ unsafe impl Allocator for ASpaceMutex {
PageTableFlags::empty()
};
let start = space.map_free(size / 4096, flags).map_err(|_| AllocError)?;
space.record_dealloc(size); // don't track allocates with this via the regular methods
Ok(unsafe { slice::from_raw_parts_mut(start.cast::<u8>(), size) }.into())
}
@ -806,7 +849,6 @@ unsafe impl Allocator for ASpaceMutex {
let start_page =
Page::from_start_address(VirtAddr::new(u64(ptr.as_ptr().expose_provenance()))).unwrap();
let length = layout.size().div_ceil(4096);
self.0.lock().record_alloc(length * 4096); //don't track allocates with this via the regular
#[expect(
clippy::unwrap_used,
reason = "
@ -814,6 +856,6 @@ unsafe impl Allocator for ASpaceMutex {
The kernel doesn't use huge pages and the mapping must be valid to be returned from allocate, so unmap cannot fail.
"
)]
self.0.lock().unmap(start_page, length).unwrap();
self.0.lock().unmap_and_free(start_page, length).unwrap();
}
}