Lazily save and restore FPU state as needed

This commit is contained in:
pjht 2025-02-27 11:25:27 -06:00
parent cc18a3f7f0
commit 9cbe50e9be
Signed by: pjht
GPG Key ID: CA239FC6934E6F3A
3 changed files with 55 additions and 12 deletions

View File

@ -1,10 +1,5 @@
use crate::{
bootinfo::BOOTINFO,
pit::NUM_INTERRUPTS,
print, println,
tasking::{InvalidPid, IpcMessage, SleepReason},
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE},
TASKING,
bootinfo::BOOTINFO, dbg, pit::NUM_INTERRUPTS, print, println, tasking::{InvalidPid, IpcMessage, SleepReason}, virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE}, TASKING
};
use alloc::{boxed::Box, vec::Vec};
use az::WrappingCast;
@ -46,6 +41,7 @@ static IDT: Lazy<InterruptDescriptorTable> = Lazy::new(|| {
set_general_handler!(&mut idt, irq_handler, 32..48);
idt[0x80].set_handler_fn(syscall_handler_header).set_privilege_level(PrivilegeLevel::Ring3);
idt.page_fault.set_handler_fn(page_fault_handler);
idt.device_not_available.set_handler_fn(device_not_available_handler);
idt
});
static PICS: Mutex<ChainedPics> = Mutex::new(unsafe { ChainedPics::new(IRQ_BASE, IRQ_BASE + 8) });
@ -120,6 +116,12 @@ extern "x86-interrupt" fn page_fault_handler(
TASKING.exit(254);
}
extern "x86-interrupt" fn device_not_available_handler(
_stack_frame: InterruptStackFrame,
) {
TASKING.switch_fpu_context();
}
#[expect(clippy::needless_pass_by_value, reason = "Signature dictated by external crate")]
fn general_handler(stack_frame: InterruptStackFrame, index: u8, _error_code: Option<u64>) {
println!("Other interrupt {index}\n{stack_frame:#?}");

View File

@ -144,6 +144,9 @@ pub fn main() {
});
asm!("FNINIT");
tasking::store_initial_fpu_state();
Cr0::update(|cr0| {
cr0.insert(Cr0Flags::TASK_SWITCHED);
})
}
gdt::init();
Lazy::force(&PHYSICAL_MEMORY);

View File

@ -10,8 +10,7 @@ use crossbeam_queue::SegQueue;
use slab::Slab;
use spin::{Lazy, Mutex, Once, RwLock};
use x86_64::{
structures::paging::{Page, PageTableFlags},
VirtAddr,
registers::control::{Cr0, Cr0Flags}, structures::paging::{Page, PageTableFlags}, VirtAddr
};
#[naked]
@ -260,6 +259,7 @@ pub static TASKING: Lazy<Tasking> = Lazy::new(|| Tasking {
current_tid: RwLock::new(None),
freeable_kstacks: Mutex::new(Vec::new()),
wfi_loop: AtomicBool::new(false),
current_fpu_context: Mutex::new(None),
});
#[derive(Debug)]
@ -270,6 +270,7 @@ pub struct Tasking {
current_tid: RwLock<Option<usize>>,
freeable_kstacks: Mutex<Vec<Box<[usize], &'static ASpaceMutex>>>,
wfi_loop: AtomicBool,
current_fpu_context: Mutex<Option<(usize, usize)>>,
}
pub const KSTACK_SIZE: usize = (4 * 4096) / 8;
@ -378,15 +379,18 @@ impl Tasking {
if self.current_thread(|thread| thread.sleeping.read().is_none()) {
self.ready_to_run.lock().push_back((current_pid, self.current_tid().unwrap()));
}
self.current_thread(|thread| thread.fpu_state.lock().save());
let curr_stack = self.current_thread_mut(|thread| addr_of_mut!(thread.kernel_esp));
*self.current_pid.write() = Some(next_process_pid);
*self.current_tid.write() = Some(next_process_tid);
self.current_thread(|thread| thread.fpu_state.lock().load());
let kernel_esp = self.current_thread(|thread| {
gdt::set_tss_stack(thread.kernel_esp_top);
thread.kernel_esp
});
unsafe {
Cr0::update(|cr0| {
cr0.insert(Cr0Flags::TASK_SWITCHED);
});
}
switch_to_asm(curr_stack, kernel_esp);
break;
} else if self.current_thread(|thread| thread.sleeping.read().is_some()) {
@ -410,6 +414,12 @@ impl Tasking {
pub fn exit(&self, code: u8) -> ! {
if let Some(current_pid) = self.current_pid() {
let current_fpu_context = *self.current_fpu_context.lock();
if let Some((fpu_pid, _fpu_tid)) = current_fpu_context {
if current_pid == fpu_pid {
*self.current_fpu_context.lock() = None;
}
}
self.ready_to_run.lock().retain(|&(ent_pid, _ent_tid)| ent_pid != current_pid);
if let Some(&proc_man_pid) = REGISTERD_PIDS.read().get(&3) {
let mut varint_buf = unsigned_varint::encode::u64_buffer();
@ -466,11 +476,15 @@ impl Tasking {
.expect("Non-current process has active page table")
.activate();
});
self.current_thread(|thread| thread.fpu_state.lock().load());
let kernel_esp = self.current_thread_mut(|thread| {
gdt::set_tss_stack(thread.kernel_esp_top);
thread.kernel_esp
});
unsafe {
Cr0::update(|cr0| {
cr0.insert(Cr0Flags::TASK_SWITCHED);
});
}
switch_to_asm_exit(kernel_esp);
unreachable!()
} else {
@ -485,6 +499,12 @@ impl Tasking {
loop {
#[warn(clippy::unwrap_used, reason = "FIXME(?)")]
let current_pid = self.current_pid().unwrap();
let current_fpu_context = *self.current_fpu_context.lock();
if let Some((fpu_pid, fpu_tid)) = current_fpu_context {
if current_pid == fpu_pid && self.current_tid().unwrap() == fpu_tid {
*self.current_fpu_context.lock() = None;
}
}
let rtr_head = self.ready_to_run.lock().pop_front();
if let Some((next_process_pid, next_process_tid)) = rtr_head {
self.wfi_loop.store(false, Ordering::Relaxed);
@ -523,11 +543,15 @@ impl Tasking {
}
*self.current_pid.write() = Some(next_process_pid);
*self.current_tid.write() = Some(next_process_tid);
self.current_thread(|thread| thread.fpu_state.lock().load());
let kernel_esp = self.current_thread_mut(|thread| {
gdt::set_tss_stack(thread.kernel_esp_top);
thread.kernel_esp
});
unsafe {
Cr0::update(|cr0| {
cr0.insert(Cr0Flags::TASK_SWITCHED);
});
}
switch_to_asm_exit(kernel_esp);
unreachable!()
} else {
@ -756,4 +780,18 @@ impl Tasking {
// }
// println!("[TASKING] Total used {} ({})", SizeFormatter::new(total, BINARY), total / 4096);
//}
pub fn switch_fpu_context(&self) {
unsafe {
Cr0::update(|cr0| {
cr0.remove(Cr0Flags::TASK_SWITCHED);
});
}
let current_fpu_context = *self.current_fpu_context.lock();
if let Some((fpu_pid, fpu_tid)) = current_fpu_context {
self.processes.read()[fpu_pid].threads.read()[fpu_tid].fpu_state.lock().save();
}
self.current_thread(|thread| thread.fpu_state.lock().load());
*self.current_fpu_context.lock() = Some((self.current_pid().unwrap(), self.current_tid().unwrap()));
}
}