use crate::{ gdt, interrupts::{send_ipc_to, REGISTERD_PIDS}, println, qemu_exit, virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE} }; use alloc::{ borrow::ToOwned, boxed::Box, collections::VecDeque, ffi::CString, string::ToString, vec::Vec, }; use core::{ alloc::Layout, arch::naked_asm, ffi::CStr, ptr::{addr_of, addr_of_mut}, sync::atomic::{AtomicBool, Ordering}, }; use crossbeam_queue::SegQueue; use humansize::{SizeFormatter, BINARY}; use slab::Slab; use spin::{Lazy, Mutex, RwLock}; use x86_64::{ structures::paging::{Page, PageTableFlags}, VirtAddr, }; use cast::{u64, usize}; #[naked] extern "C" fn switch_to_asm(current_stack: *mut *mut usize, next_stack: *mut usize) { unsafe { naked_asm!( "push rbp", "push rbx", "push r12", "push r13", "push r14", "push r15", "mov [rdi], rsp", "mov rsp, rsi", "pop r15", "pop r14", "pop r13", "pop r12", "pop rbx", "pop rbp", "ret", ); } } #[naked] extern "C" fn switch_to_asm_exit(next_stack: *mut usize) { unsafe { naked_asm!( "mov rsp, rdi", "pop r15", "pop r14", "pop r13", "pop r12", "pop rbx", "pop rbp", "ret", ); } } #[naked] extern "C" fn task_init() { unsafe { naked_asm!( "pop rcx", // Get the user stack pointer "pop rbx", // Get the entry point "push 43", // Push the stack segment selector - same as data "push rcx", // Push the stack pointer "pushfq", // Get the flags into RAX "pop rax", "or rax, 0x200", // Enable interrupts in the stored copy "push rax", // Push the flags "push 51", // Push the code selector "push rbx", // Push the entry point "iretq", // Return from the fake interrupt and enter user mode ) } } #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum SleepReason { WaitingForIPC, NewProcess, Exited, } #[derive(Debug)] struct Process { address_space: Option, kernel_stack: Box<[usize], &'static ASpaceMutex>, kernel_esp: *mut usize, kernel_esp_top: VirtAddr, arguments: (*const *const u8, usize), address_spaces: Mutex>, data_buffers: Mutex>, message_queue: Mutex>, sleeping: RwLock>, } unsafe impl Send for Process {} unsafe impl Sync for Process {} #[derive(Copy, Clone, Debug)] pub struct InvalidPid; pub static TASKING: Lazy = Lazy::new(|| Tasking { processes: RwLock::new(Slab::new()), ready_to_run: Mutex::new(VecDeque::new()), current_pid: RwLock::new(None), freeable_kstacks: Mutex::new(Vec::new()), wfi_loop: AtomicBool::new(false), }); #[derive(Debug)] pub struct Tasking { processes: RwLock>, ready_to_run: Mutex>, current_pid: RwLock>, freeable_kstacks: Mutex>>, wfi_loop: AtomicBool, } pub const KSTACK_SIZE: usize = (4 * 4096) / 8; impl Tasking { pub fn new_process( &self, entry_point: *const extern "C" fn() -> !, mut address_space: AddressSpace, arguments: &[&CStr], ) -> Result { let mut kernel_stack = Vec::with_capacity_in(KSTACK_SIZE, &*KERNEL_SPACE); kernel_stack.resize(KSTACK_SIZE - 0x3, 0); #[expect(clippy::as_conversions, reason = "Needed to get address of function")] kernel_stack.push(task_init as usize); kernel_stack.push(0xFFF_FF80_0000 + (16 * 4096)); kernel_stack.push(entry_point.expose_provenance()); let mut kernel_stack = kernel_stack.into_boxed_slice(); address_space.map_assert_unused( #[expect( clippy::unwrap_used, reason = "from_start_address requires the address to be page aligned, which it is." )] Page::from_start_address(VirtAddr::new(0xFFF_FF80_0000)).unwrap(), 16, PageTableFlags::USER_ACCESSIBLE, )?; let arguments = arguments.iter().map(|arg| (*arg).to_owned()).collect::>(); #[expect( clippy::unwrap_used, reason = "This fails if the byte size of the array exceeds isize::MAX, which with 48-bit virtual addresses cannot happen" )] let mut args_layout = Layout::array::<*const u8>(arguments.len()).unwrap(); let mut arg_offsets = Vec::new(); for argument in &arguments { #[expect( clippy::unwrap_used, reason = "This fails if the total size of the layout exceeds isize::MAX, which with 48-bit virtual addresses cannot happen" )] let (new_layout, offset) = args_layout.extend(Layout::for_value(argument.to_bytes_with_nul())).unwrap(); args_layout = new_layout; arg_offsets.push(offset); } args_layout = { #[expect( clippy::unwrap_used, reason = "This fails if the aligned size of the layout exceeds isize::MAX, which with 48-bit virtual addresses cannot happen" )] args_layout.align_to(4096).unwrap().pad_to_align() }; let user_arg_mem = KERNEL_SPACE .lock() .map_free(args_layout.size() / 4096, PageTableFlags::USER_ACCESSIBLE)?; address_space.run(|| unsafe { let mut ptr_ptr: *mut *const u8 = user_arg_mem.cast(); for (&offset, argument) in arg_offsets.iter().zip(arguments.iter()) { let arg_ptr = user_arg_mem.add(offset); #[expect(clippy::arithmetic_side_effects, reason = "This can never overflow as count_bytes is always one less than the bytes used for the string, which can be at most 2^64-1.")] arg_ptr.copy_from(argument.as_ptr().cast(), argument.count_bytes() + 1); ptr_ptr.write(arg_ptr); ptr_ptr = ptr_ptr.add(1); } }); let pid = self.processes.write().insert(Process { #[expect( clippy::indexing_slicing, reason = "Stack length is 0x1_0000, this cannot panic" )] kernel_esp: &mut kernel_stack[KSTACK_SIZE - 9], #[expect( clippy::indexing_slicing, reason = "Stack length is 0x1_0000, this cannot panic" )] kernel_esp_top: VirtAddr::from_ptr( addr_of!(kernel_stack[KSTACK_SIZE - 1]).wrapping_add(1), ), kernel_stack, address_space: Some(address_space), address_spaces: Mutex::new(Slab::new()), data_buffers: Mutex::new(Slab::new()), message_queue: Mutex::new(SegQueue::new()), sleeping: RwLock::new(Some(SleepReason::NewProcess)), arguments: (user_arg_mem.cast(), arguments.len()), }); if let Some(&proc_man_pid) = REGISTERD_PIDS.read().get(&3) { let mut len: usize; let rounded_size = 32usize.next_multiple_of(4096); let mut buffer = Vec::with_capacity_in(rounded_size, &*ACTIVE_SPACE); buffer.resize(rounded_size, 0); let mut buffer = buffer.into_boxed_slice(); buffer[0..8].copy_from_slice(&u64::MAX.to_le_bytes()); buffer[8..10].copy_from_slice(&0u16.to_le_bytes()); buffer[10] = 0; buffer[11..19].copy_from_slice(&0u64.to_le_bytes()); buffer[19..21].copy_from_slice(&8u16.to_le_bytes()); buffer[21..23].copy_from_slice(&6u16.to_le_bytes()); len = 23; len += unsigned_varint::encode::u64(u64(pid), (&mut buffer[len..len+10]).try_into().unwrap()).len(); if let Some(current_pid) = *(self.current_pid.read()) { buffer[len] = 1; len+=1; len += unsigned_varint::encode::u64(u64(current_pid), (&mut buffer[len..len+10]).try_into().unwrap()).len(); } else { buffer[len] = 0; len+=1; } send_ipc_to(usize(proc_man_pid), buffer, len); } else { println!("[TASKING] No process manager when creating PID {pid}"); } self.ready_to_run.lock().push_back(pid); Ok(pid) } pub fn ok_to_yield(&self) -> bool { !(self.freeable_kstacks.is_locked() || (self.current_pid.reader_count() > 0) || (self.current_pid.writer_count() > 0) || self.ready_to_run.is_locked() || (self.processes.reader_count() > 0) || (self.processes.writer_count() > 0) || KERNEL_SPACE.is_locked() || self.wfi_loop.load(Ordering::Relaxed)) } pub fn task_yield(&self) { loop { self.freeable_kstacks.lock().clear(); let Some(current_pid) = *self.current_pid.read() else { self.wfi_loop.store(false, Ordering::Relaxed); break; }; let next_process_pid = self.ready_to_run.lock().pop_front(); if let Some(next_process_pid) = next_process_pid { self.wfi_loop.store(false, Ordering::Relaxed); if next_process_pid == self.current_pid().unwrap() { println!("Yielding to currect process! Returning"); break; } #[expect( clippy::expect_used, reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic" )] #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let current_address_space = self.processes.write()[next_process_pid] .address_space .take() .expect("Non-current process has active page table") .activate(); #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] self.processes.write()[current_pid].address_space = Some(current_address_space); let processes = self.processes.read(); #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let current_process = &processes[current_pid]; #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let next_process = &processes[next_process_pid]; gdt::set_tss_stack(next_process.kernel_esp_top); if current_process.sleeping.read().is_none() { self.ready_to_run.lock().push_back(current_pid); } let kernel_esp = next_process.kernel_esp; let previous_process = current_pid; *self.current_pid.write() = Some(next_process_pid); core::mem::drop(processes); let mut processes = self.processes.write(); #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let curr_stack = addr_of_mut!(processes[previous_process].kernel_esp); core::mem::drop(processes); switch_to_asm(curr_stack, kernel_esp); break; } else if { #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let res = self.processes.read()[current_pid].sleeping.read().is_some(); res } { //println!("All processes sleeping, exiting QEMU"); //self.print_stats(); //qemu_exit::exit_qemu(); //println!("All processes sleeping, waiting for interrupt"); self.wfi_loop.store(true, Ordering::Relaxed); x86_64::instructions::interrupts::enable_and_hlt(); x86_64::instructions::interrupts::disable(); } else { self.wfi_loop.store(false, Ordering::Relaxed); break; } } } pub fn current_pid(&self) -> Option { *self.current_pid.read() } pub fn exit(&self) -> ! { loop { let next_process_pid = self.ready_to_run.lock().pop_front(); if let Some(next_process_pid) = next_process_pid { self.wfi_loop.store(false, Ordering::Relaxed); #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let mut processes = self.processes.write(); if let Some(current_pid) = *self.current_pid.read() { //self.freeable_kstacks.lock().push(processes.remove(current_pid).kernel_stack); *processes[current_pid].sleeping.write() = Some(SleepReason::Exited); } #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let next_process = &mut processes[next_process_pid]; #[expect( clippy::expect_used, reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic" )] next_process .address_space .take() .expect("Non-current process has active page table") .activate(); gdt::set_tss_stack(next_process.kernel_esp_top); let kernel_esp = next_process.kernel_esp; *self.current_pid.write() = Some(next_process_pid); core::mem::drop(processes); switch_to_asm_exit(kernel_esp); unreachable!() } else { //println!("Last non-sleeping process exited, exiting QEMU"); self.wfi_loop.store(true, Ordering::Relaxed); x86_64::instructions::interrupts::enable_and_hlt(); x86_64::instructions::interrupts::disable(); //self.exit(); } } } pub fn clear_exited_pid(&self, pid: usize) -> Result<(), ()> { let mut processes = self.processes.write(); let process = processes.get(pid).ok_or(())?; if *process.sleeping.read() != Some(SleepReason::Exited) { return Err(()); } self.freeable_kstacks.lock().push(processes.remove(pid).kernel_stack); Ok(()) } pub fn address_spaces_mut) -> T, T>(&self, func: F) -> T { let processes = self.processes.read(); #[warn(clippy::unwrap_used, reason = "FIXME")] #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let mut aspaces = processes[self.current_pid.read().unwrap()].address_spaces.lock(); func(&mut aspaces) } pub fn data_buffers_mut) -> T, T>(&self, func: F) -> T { let processes = self.processes.read(); #[warn(clippy::unwrap_used, reason = "FIXME")] #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let mut dbufs = processes[self.current_pid.read().unwrap()].data_buffers.lock(); func(&mut dbufs) } pub fn proc_data_buffers_mut) -> T, T>( &self, pid: usize, func: F, ) -> Result { let processes = self.processes.read(); let mut dbufs = processes.get(pid).ok_or(InvalidPid)?.data_buffers.lock(); Ok(func(&mut dbufs)) } pub fn current_message_queue_mut) -> T, T>( &self, func: F, ) -> T { let processes = self.processes.read(); #[warn(clippy::unwrap_used, reason = "FIXME")] #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let mut queue = processes[self.current_pid.read().unwrap()].message_queue.lock(); func(&mut queue) } pub fn message_queue_mut) -> T, T>( &self, pid: usize, func: F, ) -> Result { let processes = self.processes.read(); let mut queue = processes.get(pid).ok_or(InvalidPid)?.message_queue.lock(); Ok(func(&mut queue)) } pub fn address_space_mut) -> T, T>( &self, pid: usize, func: F, ) -> Result { let mut processes = self.processes.write(); let aspace = processes.get_mut(pid).ok_or(InvalidPid)?.address_space.as_mut(); Ok(func(aspace)) } pub fn proc_sleeping(&self, pid: usize) -> Result, InvalidPid> { Ok(*(self.processes.read().get(pid).ok_or(InvalidPid)?.sleeping.read())) } pub fn sleep(&self, reason: SleepReason) { #[warn(clippy::unwrap_used, reason = "FIXME")] #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] *self.processes.read()[self.current_pid.read().unwrap()].sleeping.write() = Some(reason); self.task_yield(); } pub fn wake(&self, pid: usize, reason: SleepReason) -> Result<(), InvalidPid> { let processes = self.processes.read(); let process = processes.get(pid).ok_or(InvalidPid)?; let mut sleeping = process.sleeping.write(); if *sleeping == Some(reason) { if Some(pid) != self.current_pid() { self.ready_to_run.lock().push_back(pid); } *sleeping = None; } Ok(()) } pub fn arguments(&self) -> (*const *const u8, usize) { #[warn(clippy::unwrap_used, reason = "FIXME")] #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] self.processes.read()[self.current_pid.read().unwrap()].arguments } pub fn print_stats(&self) { let mut total = KERNEL_SPACE.lock().get_bytes_allocated(); println!( "[TASKING] Kernel used {}", SizeFormatter::new(KERNEL_SPACE.lock().get_bytes_allocated(), BINARY) ); for (i, process) in self.processes.read().iter() { let bytes_used = process.address_space.as_ref().map_or_else( || ACTIVE_SPACE.lock().get_bytes_allocated(), |space| space.get_bytes_allocated(), ); total += bytes_used; let name = if process.arguments.1 > 0 { unsafe { CStr::from_ptr(process.arguments.0.read().cast()) } .to_string_lossy() .into_owned() } else { "UNKNOWN".to_string() }; println!( "[TASKING] PID {} ({}) used {}", i, name, SizeFormatter::new(bytes_used, BINARY), ); } println!("[TASKING] Total used {} ({})", SizeFormatter::new(total, BINARY), total / 4096); } }