Compare commits

...

2 Commits

3 changed files with 124 additions and 110 deletions

View File

@ -188,14 +188,11 @@ fn irq_handler(_stack_frame: InterruptStackFrame, index: u8, _error_code: Option
clippy::unwrap_used, clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition" reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)] )]
let sleep_status = TASKING.proc_sleeping(pid).unwrap(); #[expect(
if sleep_status == Some(SleepReason::WaitingForIPC) { clippy::unwrap_used,
#[expect( reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
clippy::unwrap_used, )]
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition" TASKING.wake(pid, SleepReason::WaitingForIPC).unwrap();
)]
TASKING.wake(pid).unwrap();
}
} else { } else {
println!("irq 1 msg: Bad PID ({})", pid); println!("irq 1 msg: Bad PID ({})", pid);
} }
@ -549,14 +546,11 @@ extern "C" fn syscall_handler() {
clippy::unwrap_used, clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition" reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)] )]
let sleep_status = TASKING.proc_sleeping(pid).unwrap(); #[expect(
if sleep_status == Some(SleepReason::WaitingForIPC) { clippy::unwrap_used,
#[expect( reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
clippy::unwrap_used, )]
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition" TASKING.wake(pid, SleepReason::WaitingForIPC).unwrap();
)]
TASKING.wake(pid).unwrap();
}
retval = 0; retval = 0;
} else { } else {
println!("ipc_send: Bad PID ({})", pid); println!("ipc_send: Bad PID ({})", pid);
@ -752,6 +746,14 @@ extern "C" fn syscall_handler() {
retval = 1; retval = 1;
} }
} }
24 => {
let pid = usize(regs.rcx);
if TASKING.wake(pid, SleepReason::NewProcess).is_err() {
retval = 1;
} else {
retval = 0;
}
}
_ => (), _ => (),
}; };
unsafe { SYSCALL_REGS = regs }; unsafe { SYSCALL_REGS = regs };

View File

@ -102,7 +102,7 @@ use physical_memory::PHYSICAL_MEMORY;
use serial::SECOND_PORT; use serial::SECOND_PORT;
use spin::lazy::Lazy; use spin::lazy::Lazy;
use tar_no_std::TarArchiveRef; use tar_no_std::TarArchiveRef;
use tasking::TASKING; use tasking::{SleepReason, TASKING};
use virtual_memory::{ACTIVE_SPACE, KERNEL_SPACE}; use virtual_memory::{ACTIVE_SPACE, KERNEL_SPACE};
use x86_64::{ use x86_64::{
registers::rflags::{self, RFlags}, registers::rflags::{self, RFlags},
@ -290,11 +290,12 @@ pub fn main() {
clippy::unwrap_used, clippy::unwrap_used,
reason = "Argument does not contain a null byte, thus this cannot panic" reason = "Argument does not contain a null byte, thus this cannot panic"
)] )]
TASKING let init_pid = TASKING
.new_process( .new_process(
ptr::with_exposed_provenance(usize(init.ehdr.e_entry)), ptr::with_exposed_provenance(usize(init.ehdr.e_entry)),
init_addr_space, init_addr_space,
&[&CString::new(b"init").unwrap()], &[&CString::new(b"init").unwrap()],
) )
.expect("Failed to create init process"); .expect("Failed to create init process");
TASKING.wake(init_pid, SleepReason::NewProcess).unwrap();
} }

View File

@ -85,6 +85,7 @@ extern "C" fn task_init() {
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum SleepReason { pub enum SleepReason {
WaitingForIPC, WaitingForIPC,
NewProcess,
} }
#[derive(Debug)] #[derive(Debug)]
@ -203,10 +204,10 @@ impl Tasking {
address_spaces: Mutex::new(Slab::new()), address_spaces: Mutex::new(Slab::new()),
data_buffers: Mutex::new(Slab::new()), data_buffers: Mutex::new(Slab::new()),
message_queue: Mutex::new(SegQueue::new()), message_queue: Mutex::new(SegQueue::new()),
sleeping: RwLock::new(None), sleeping: RwLock::new(Some(SleepReason::NewProcess)),
arguments: (user_arg_mem.cast(), arguments.len()), arguments: (user_arg_mem.cast(), arguments.len()),
}); });
self.ready_to_run.lock().push_back(pid); //self.ready_to_run.lock().push_back(pid);
Ok(pid) Ok(pid)
} }
@ -222,63 +223,66 @@ impl Tasking {
} }
pub fn task_yield(&self) { pub fn task_yield(&self) {
self.freeable_kstacks.lock().clear(); loop {
let Some(current_pid) = *self.current_pid.read() else { self.freeable_kstacks.lock().clear();
self.wfi_loop.store(false, Ordering::Relaxed); let Some(current_pid) = *self.current_pid.read() else {
return; self.wfi_loop.store(false, Ordering::Relaxed);
}; break;
let next_process_pid = self.ready_to_run.lock().pop_front(); };
if let Some(next_process_pid) = next_process_pid { let next_process_pid = self.ready_to_run.lock().pop_front();
self.wfi_loop.store(false, Ordering::Relaxed); if let Some(next_process_pid) = next_process_pid {
if next_process_pid == self.current_pid().unwrap() { self.wfi_loop.store(false, Ordering::Relaxed);
println!("Yielding to currect process! Returning"); if next_process_pid == self.current_pid().unwrap() {
return; println!("Yielding to currect process! Returning");
break;
}
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
)]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let current_address_space = self.processes.write()[next_process_pid]
.address_space
.take()
.expect("Non-current process has active page table")
.activate();
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.processes.write()[current_pid].address_space = Some(current_address_space);
let processes = self.processes.read();
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let current_process = &processes[current_pid];
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let next_process = &processes[next_process_pid];
gdt::set_tss_stack(next_process.kernel_esp_top);
if current_process.sleeping.read().is_none() {
self.ready_to_run.lock().push_back(current_pid);
}
let kernel_esp = next_process.kernel_esp;
let previous_process = current_pid;
*self.current_pid.write() = Some(next_process_pid);
core::mem::drop(processes);
let mut processes = self.processes.write();
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let curr_stack = addr_of_mut!(processes[previous_process].kernel_esp);
core::mem::drop(processes);
switch_to_asm(curr_stack, kernel_esp);
break;
} else if {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let res = self.processes.read()[current_pid].sleeping.read().is_some();
res
} {
//println!("All processes sleeping, exiting QEMU");
//self.print_stats();
//qemu_exit::exit_qemu();
//println!("All processes sleeping, waiting for interrupt");
self.wfi_loop.store(true, Ordering::Relaxed);
x86_64::instructions::interrupts::enable_and_hlt();
x86_64::instructions::interrupts::disable();
} else {
self.wfi_loop.store(false, Ordering::Relaxed);
break;
} }
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
)]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let current_address_space = self.processes.write()[next_process_pid]
.address_space
.take()
.expect("Non-current process has active page table")
.activate();
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.processes.write()[current_pid].address_space = Some(current_address_space);
let processes = self.processes.read();
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let current_process = &processes[current_pid];
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let next_process = &processes[next_process_pid];
gdt::set_tss_stack(next_process.kernel_esp_top);
if current_process.sleeping.read().is_none() {
self.ready_to_run.lock().push_back(current_pid);
}
let kernel_esp = next_process.kernel_esp;
let previous_process = current_pid;
*self.current_pid.write() = Some(next_process_pid);
core::mem::drop(processes);
let mut processes = self.processes.write();
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let curr_stack = addr_of_mut!(processes[previous_process].kernel_esp);
core::mem::drop(processes);
switch_to_asm(curr_stack, kernel_esp);
} else if {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let res = self.processes.read()[current_pid].sleeping.read().is_some();
res
} {
//println!("All processes sleeping, exiting QEMU");
//self.print_stats();
//qemu_exit::exit_qemu();
//println!("All processes sleeping, waiting for interrupt");
self.wfi_loop.store(true, Ordering::Relaxed);
x86_64::instructions::interrupts::enable_and_hlt();
x86_64::instructions::interrupts::disable();
self.task_yield();
} else {
self.wfi_loop.store(false, Ordering::Relaxed);
} }
} }
@ -287,37 +291,39 @@ impl Tasking {
} }
pub fn exit(&self) -> ! { pub fn exit(&self) -> ! {
let next_process_pid = self.ready_to_run.lock().pop_front(); loop {
if let Some(next_process_pid) = next_process_pid { let next_process_pid = self.ready_to_run.lock().pop_front();
self.wfi_loop.store(false, Ordering::Relaxed); if let Some(next_process_pid) = next_process_pid {
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")] self.wfi_loop.store(false, Ordering::Relaxed);
let mut processes = self.processes.write(); #[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
if let Some(current_pid) = *self.current_pid.read() { let mut processes = self.processes.write();
self.freeable_kstacks.lock().push(processes.remove(current_pid).kernel_stack); if let Some(current_pid) = *self.current_pid.read() {
self.freeable_kstacks.lock().push(processes.remove(current_pid).kernel_stack);
}
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let next_process = &mut processes[next_process_pid];
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
)]
next_process
.address_space
.take()
.expect("Non-current process has active page table")
.activate();
gdt::set_tss_stack(next_process.kernel_esp_top);
let kernel_esp = next_process.kernel_esp;
*self.current_pid.write() = Some(next_process_pid);
core::mem::drop(processes);
switch_to_asm_exit(kernel_esp);
unreachable!()
} else {
//println!("Last non-sleeping process exited, exiting QEMU");
self.wfi_loop.store(true, Ordering::Relaxed);
x86_64::instructions::interrupts::enable_and_hlt();
x86_64::instructions::interrupts::disable();
//self.exit();
} }
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
let next_process = &mut processes[next_process_pid];
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
)]
next_process
.address_space
.take()
.expect("Non-current process has active page table")
.activate();
gdt::set_tss_stack(next_process.kernel_esp_top);
let kernel_esp = next_process.kernel_esp;
*self.current_pid.write() = Some(next_process_pid);
core::mem::drop(processes);
switch_to_asm_exit(kernel_esp);
unreachable!()
} else {
//println!("Last non-sleeping process exited, exiting QEMU");
self.wfi_loop.store(true, Ordering::Relaxed);
x86_64::instructions::interrupts::enable_and_hlt();
x86_64::instructions::interrupts::disable();
self.exit();
} }
} }
@ -389,10 +395,15 @@ impl Tasking {
self.task_yield(); self.task_yield();
} }
pub fn wake(&self, pid: usize) -> Result<(), InvalidPid> { pub fn wake(&self, pid: usize, reason: SleepReason) -> Result<(), InvalidPid> {
*self.processes.read().get(pid).ok_or(InvalidPid)?.sleeping.write() = None; let processes = self.processes.read();
if Some(pid) != self.current_pid() { let process = processes.get(pid).ok_or(InvalidPid)?;
self.ready_to_run.lock().push_back(pid); let mut sleeping = process.sleeping.write();
if *sleeping == Some(reason) {
if Some(pid) != self.current_pid() {
self.ready_to_run.lock().push_back(pid);
}
*sleeping = None;
} }
Ok(()) Ok(())
} }