Don't preallocate L4 kernel entries and copy them when necessary instead

This commit is contained in:
pjht 2024-08-17 16:35:09 -05:00
parent 9b7e36056a
commit 84e0a4d593
Signed by: pjht
GPG Key ID: 7B5F6AFBEC7EE78E
2 changed files with 37 additions and 34 deletions

View File

@ -20,7 +20,7 @@ use x86_64::{
set_general_handler,
structures::{
idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode},
paging::{mapper::TranslateResult, Page, PageTableFlags, PhysFrame, Translate},
paging::{mapper::TranslateResult, Page, PageTableFlags, PageTableIndex, PhysFrame, Translate},
},
PhysAddr, PrivilegeLevel, VirtAddr,
};
@ -58,6 +58,18 @@ extern "x86-interrupt" fn page_fault_handler(
#[warn(clippy::expect_used, reason = "FIXME")]
let faulting_addr =
Cr2::read().expect("Cannot handle page faults caused by non-canonical addresses");
if faulting_addr.p4_index() >= PageTableIndex::new(256)
&& !ACTIVE_SPACE.lock().level_4_table()[faulting_addr.p4_index()]
.flags()
.contains(PageTableFlags::PRESENT)
&& KERNEL_SPACE.lock().level_4_table()[faulting_addr.p4_index()]
.flags()
.contains(PageTableFlags::PRESENT)
{
ACTIVE_SPACE.lock().level_4_table_mut()[faulting_addr.p4_index()] =
KERNEL_SPACE.lock().level_4_table()[faulting_addr.p4_index()].clone();
return;
}
if let Some(current_pid) = TASKING.current_pid() {
print!("PID {current_pid} ");
} else {

View File

@ -187,36 +187,15 @@ pub static KERNEL_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
for i in 0..256 {
table[i].set_addr(PhysAddr::new(0), PageTableFlags::empty());
}
let mut num_pts_alloced = 0;
for i in 256..512 {
if table[i].flags().contains(PageTableFlags::PRESENT) {
let new_flags =
table[i].flags() | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE;
table[i].set_flags(new_flags);
} else {
// SAFETY: We initialize the newly allocated table before we make a reference to it, so
// the reference cannot point to uninitialized data.
table[i].set_addr(
unsafe {
#[expect(
clippy::expect_used,
reason = "If we fail to allocate the kernel page table, it's fatal."
)]
let (new_child, new_child_phys) =
alloc_pt().expect("Could not allocate new kernel entry");
num_pts_alloced += 1;
new_child.write(PageTable::new());
new_child_phys
},
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
);
}
}
let mut kernel_space = AddressSpace::new_with_addr(table);
kernel_space.is_kernel = true;
kernel_space.record_alloc(num_pts_alloced * 4096);
let l4_virt = VirtAddr::from_ptr(ptr::from_ref(kernel_space.mapper.level_4_table()));
#[expect(
clippy::unwrap_used,
@ -292,6 +271,14 @@ impl AddressSpace {
core::mem::replace(&mut *ACTIVE_SPACE.lock(), self)
}
pub fn level_4_table(&self) -> &PageTable {
self.mapper.level_4_table()
}
pub fn level_4_table_mut(&mut self) -> &mut PageTable {
self.mapper.level_4_table_mut()
}
fn check_request_valid(&self, start: Page, length: usize) -> Result<(), PagingError> {
if (self.is_kernel
&& (start < KERNEL_PAGE_RANGE.start
@ -725,18 +712,22 @@ impl AddressSpace {
};
unsafe {
self.mapper.update_flags(page, (flags | set_flags) & !(clear_flags))?.flush();
self.mapper.set_flags_p3_entry(
page,
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
)?.flush_all();
self.mapper.set_flags_p2_entry(
page,
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
)?.flush_all();
self.mapper
.set_flags_p3_entry(
page,
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
)?
.flush_all();
self.mapper
.set_flags_p2_entry(
page,
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
)?
.flush_all();
}
}
Ok(())