Add basic support for sending messages to processes on IRQs

This commit is contained in:
pjht 2024-09-11 10:14:55 -05:00
parent 1885db4b67
commit fc8c2c5748
Signed by: pjht
GPG Key ID: 7B5F6AFBEC7EE78E
2 changed files with 95 additions and 6 deletions

View File

@ -15,6 +15,7 @@ use pic8259::ChainedPics;
use saturating_cast::SaturatingCast;
use spin::{Lazy, Mutex, RwLock};
use x86_64::{
instructions::port::{Port, PortReadOnly},
registers::control::Cr2,
set_general_handler,
structures::{
@ -39,6 +40,7 @@ static IDT: Lazy<InterruptDescriptorTable> = Lazy::new(|| {
});
static PICS: Mutex<ChainedPics> = Mutex::new(unsafe { ChainedPics::new(IRQ_BASE, IRQ_BASE + 8) });
static IRQ_HANDLERS: RwLock<[Option<IrqHandler>; 16]> = RwLock::new([None; 16]);
static IRQ_TASKS: RwLock<[Option<usize>; 16]> = RwLock::new([None; 16]);
pub type IrqHandler = fn(irq_num: u8, eoi_guard: EoiGuard);
@ -138,7 +140,66 @@ fn irq_handler(_stack_frame: InterruptStackFrame, index: u8, _error_code: Option
)]
if let Some(handler) = IRQ_HANDLERS.read()[usize(irq_num)] {
handler(irq_num, eoi_guard);
};
} else if let Some(handler_task) = IRQ_TASKS.read()[usize(irq_num)] {
let pid = handler_task;
let len: usize = 11;
let rounded_size = len.next_multiple_of(4096);
let mut buffer = Vec::with_capacity_in(rounded_size, &*ACTIVE_SPACE);
buffer.resize(rounded_size, 0);
let mut buffer = buffer.into_boxed_slice();
buffer[0..8].copy_from_slice(&u64::MAX.to_le_bytes());
buffer[8..10].copy_from_slice(&2u16.to_le_bytes());
buffer[10] = irq_num;
assert!(len <= buffer.len());
if TASKING.message_queue_mut(pid, |_| ()).is_ok() {
let buf_num_pages = buffer.len() / 4096;
let buffer = Box::into_raw(buffer);
let buf_start_page =
Page::from_start_address(VirtAddr::new(u64(buffer.expose_provenance()))).unwrap();
let dest_buffer = TASKING
.address_space_mut(pid, |aspace| {
// This is None only if the destiniation is the current process. If so,
// no remapping is necessary so just return the old buffer.
let Some(aspace) = aspace else {
return buffer;
};
let page = ACTIVE_SPACE
.lock()
.move_mappings_free(buf_start_page, buf_num_pages, aspace)
.unwrap();
ptr::slice_from_raw_parts_mut::<u8>(
page.start_address().as_mut_ptr(),
buffer.len(),
)
})
.unwrap();
#[expect(
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
let new_buffer_key =
TASKING.proc_data_buffers_mut(pid, |x| x.insert(dest_buffer)).unwrap();
#[expect(
clippy::unwrap_used,
reason = "The option was already checked at the start of the if-let"
)]
TASKING.message_queue_mut(pid, |x| x.push((new_buffer_key, len))).unwrap();
#[expect(
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
let sleep_status = TASKING.proc_sleeping(pid).unwrap();
if sleep_status == Some(SleepReason::WaitingForIPC) {
#[expect(
clippy::unwrap_used,
reason = "The PID is known valid due to using it in message_queue_mut in the if-let condition"
)]
TASKING.wake(pid).unwrap();
}
} else {
println!("irq 1 msg: Bad PID ({})", pid);
}
}
}
#[repr(C)]
@ -682,6 +743,15 @@ extern "C" fn syscall_handler() {
})
}
}
23 => {
let irq_num = regs.rcx;
if irq_num < 16 {
IRQ_TASKS.write()[usize(irq_num)] = Some(TASKING.current_pid().unwrap());
retval = 0;
} else {
retval = 1;
}
}
_ => (),
};
unsafe { SYSCALL_REGS = regs };

View File

@ -10,6 +10,7 @@ use core::{
arch::asm,
ffi::CStr,
ptr::{addr_of, addr_of_mut},
sync::atomic::{AtomicBool, Ordering},
};
use crossbeam_queue::SegQueue;
use humansize::{SizeFormatter, BINARY};
@ -110,6 +111,7 @@ pub static TASKING: Lazy<Tasking> = Lazy::new(|| Tasking {
ready_to_run: Mutex::new(VecDeque::new()),
current_pid: RwLock::new(None),
freeable_kstacks: Mutex::new(Vec::new()),
wfi_loop: AtomicBool::new(false),
});
#[derive(Debug)]
@ -118,6 +120,7 @@ pub struct Tasking {
ready_to_run: Mutex<VecDeque<usize>>,
current_pid: RwLock<Option<usize>>,
freeable_kstacks: Mutex<Vec<Box<[usize], &'static ASpaceMutex>>>,
wfi_loop: AtomicBool,
}
pub const KSTACK_SIZE: usize = (4 * 4096) / 8;
@ -214,16 +217,23 @@ impl Tasking {
|| self.ready_to_run.is_locked()
|| (self.processes.reader_count() > 0)
|| (self.processes.writer_count() > 0)
|| KERNEL_SPACE.is_locked())
|| KERNEL_SPACE.is_locked()
|| self.wfi_loop.load(Ordering::Relaxed))
}
pub fn task_yield(&self) {
self.freeable_kstacks.lock().clear();
let Some(current_pid) = *self.current_pid.read() else {
self.wfi_loop.store(false, Ordering::Relaxed);
return;
};
let next_process_pid = self.ready_to_run.lock().pop_front();
if let Some(next_process_pid) = next_process_pid {
self.wfi_loop.store(false, Ordering::Relaxed);
if next_process_pid == self.current_pid().unwrap() {
println!("Yielding to currect process! Returning");
return;
}
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
@ -259,9 +269,16 @@ impl Tasking {
let res = self.processes.read()[current_pid].sleeping.read().is_some();
res
} {
println!("All processes sleeping, exiting QEMU");
self.print_stats();
qemu_exit::exit_qemu();
//println!("All processes sleeping, exiting QEMU");
//self.print_stats();
//qemu_exit::exit_qemu();
//println!("All processes sleeping, waiting for interrupt");
self.wfi_loop.store(true, Ordering::Relaxed);
x86_64::instructions::interrupts::enable_and_hlt();
x86_64::instructions::interrupts::disable();
self.task_yield();
} else {
self.wfi_loop.store(false, Ordering::Relaxed);
}
}
@ -371,7 +388,9 @@ impl Tasking {
pub fn wake(&self, pid: usize) -> Result<(), InvalidPid> {
*self.processes.read().get(pid).ok_or(InvalidPid)?.sleeping.write() = None;
self.ready_to_run.lock().push_back(pid);
if Some(pid) != self.current_pid() {
self.ready_to_run.lock().push_back(pid);
}
Ok(())
}