Unify PID and TID together in a new type

This commit is contained in:
pjht 2025-03-06 12:09:03 -06:00
parent bf5b289359
commit 5f7c6f4183
Signed by: pjht
GPG Key ID: CA239FC6934E6F3A
5 changed files with 175 additions and 156 deletions

View File

@ -2,7 +2,7 @@ use crate::{
bootinfo::BOOTINFO,
pit::NUM_INTERRUPTS,
print, println,
tasking::{InvalidPid, IpcMessage, SleepReason},
tasking::{InvalidPid, IpcMessage, Pid, SleepReason, Tid},
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE},
TASKING,
};
@ -51,7 +51,7 @@ static IDT: Lazy<InterruptDescriptorTable> = Lazy::new(|| {
});
static PICS: Mutex<ChainedPics> = Mutex::new(unsafe { ChainedPics::new(IRQ_BASE, IRQ_BASE + 8) });
static IRQ_HANDLERS: RwLock<[Option<IrqHandler>; 16]> = RwLock::new([None; 16]);
static IRQ_TASKS: RwLock<[Option<usize>; 16]> = RwLock::new([None; 16]);
static IRQ_TASKS: RwLock<[Option<Pid>; 16]> = RwLock::new([None; 16]);
pub type IrqHandler = fn(irq_num: u8, eoi_guard: EoiGuard);
@ -83,8 +83,8 @@ extern "x86-interrupt" fn page_fault_handler(
KERNEL_SPACE.lock().level_4_table()[faulting_addr.p4_index()].clone();
return;
}
if let Some(current_pid) = TASKING.current_pid() {
print!("PID {current_pid} ");
if let Some(current_tid) = TASKING.current_tid() {
print!("PID {} ", current_tid.pid().as_usize());
} else {
print!("Kernel init ");
}
@ -108,8 +108,8 @@ extern "x86-interrupt" fn page_fault_handler(
println!("page faulted {error_code:#?} at {:#x}\n{stack_frame:#?}", faulting_addr);
}
} else {
if let Some(current_pid) = TASKING.current_pid() {
print!("PID {current_pid} ");
if let Some(current_tid) = TASKING.current_tid() {
print!("PID {} ", current_tid.pid().as_usize());
} else {
print!("Kernel init ");
}
@ -131,8 +131,8 @@ fn general_handler(stack_frame: InterruptStackFrame, index: u8, _error_code: Opt
}
fn exc_handler(_stack_frame: InterruptStackFrame, _index: u8, _error_code: Option<u64>) {
if let Some(current_pid) = TASKING.current_pid() {
print!("PID {current_pid} ");
if let Some(current_tid) = TASKING.current_tid() {
print!("PID {} ", current_tid.pid().as_usize());
} else {
print!("Kernel init ");
}
@ -170,7 +170,7 @@ impl From<PagingError> for SendIpcError {
}
pub fn send_ipc_to(
pid: usize,
pid: Pid,
buffer: Box<[u8], &'static ASpaceMutex>,
len: usize,
from_kernel: bool,
@ -182,7 +182,7 @@ pub fn send_ipc_to(
clippy::unwrap_used,
reason = "from_kernel=false is used from the send_ipc syscall, which can only be called in usermode where PIDs are always present"
)]
TASKING.current_pid().unwrap()
TASKING.current_tid().unwrap().pid().as_usize()
};
#[cfg(feature = "log-rpc-text")]
dump_ipc_message(from, pid, &buffer[0..len]);
@ -263,7 +263,7 @@ pub fn send_ipc_to(
TASKING
.process(pid, |process| {
for (tid, _) in &*process.threads().read() {
TASKING.wake(pid, tid, SleepReason::WaitingForIPC).unwrap();
TASKING.wake(Tid::from_pid_tid(pid, tid), SleepReason::WaitingForIPC).unwrap();
}
})
.unwrap();
@ -383,7 +383,7 @@ fn get_buffer(id: u64) -> Option<Box<[u8], &'static ASpaceMutex>> {
})
}
pub static REGISTERD_PIDS: Lazy<RwLock<HashMap<u64, u64>>> =
pub static REGISTERD_PIDS: Lazy<RwLock<HashMap<u64, Pid>>> =
Lazy::new(|| RwLock::new(HashMap::new()));
static INITRD_BUF: Lazy<&'static [u8]> = Lazy::new(|| {
@ -544,7 +544,7 @@ extern "C" fn syscall_handler() {
let res = TASKING.new_process(ptr::with_exposed_provenance(usize(regs.rcx)), space);
if let Ok(pid) = res {
retval = 0;
retval2 = u64(pid);
retval2 = u64(pid.as_usize());
} else {
retval = 1;
}
@ -554,19 +554,21 @@ extern "C" fn syscall_handler() {
clippy::unwrap_used,
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
)]
REGISTERD_PIDS.write().insert(regs.rcx, u64(TASKING.current_pid().unwrap()));
REGISTERD_PIDS
.write()
.insert(regs.rcx, TASKING.current_tid().unwrap().pid());
}
10 => {
let id = REGISTERD_PIDS.read().get(&regs.rcx).copied();
if let Some(id) = id {
let pid = REGISTERD_PIDS.read().get(&regs.rcx).copied();
if let Some(pid) = pid {
retval = 0;
retval2 = id;
retval2 = u64(pid.as_usize());
} else {
retval = 1;
}
}
11 => {
let pid = usize(regs.rcx);
let pid = Pid::from_usize(usize(regs.rcx));
if let Some(buffer) = get_buffer(regs.rdx) {
let len = usize(regs.rsi);
assert!(len <= buffer.len());
@ -608,7 +610,7 @@ extern "C" fn syscall_handler() {
clippy::unwrap_used,
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
)]
let pid = u64(TASKING.current_pid().unwrap());
let pid = u64(TASKING.current_tid().unwrap().pid().as_usize());
retval = pid;
}
14 => 'call14: {
@ -768,7 +770,7 @@ extern "C" fn syscall_handler() {
clippy::unwrap_used,
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
)]
TASKING.current_pid().unwrap(),
TASKING.current_tid().unwrap().pid(),
);
retval = 0;
} else {
@ -776,15 +778,15 @@ extern "C" fn syscall_handler() {
}
}
24 => {
let pid = usize(regs.rcx);
if TASKING.wake(pid, 0, SleepReason::NewProcess).is_err() {
let pid = Pid::from_usize(usize(regs.rcx));
if TASKING.wake(Tid::main_thread(pid), SleepReason::NewProcess).is_err() {
retval = 1;
} else {
retval = 0;
}
}
25 => {
let pid = usize(regs.rcx);
let pid = Pid::from_usize(usize(regs.rcx));
if TASKING.clear_exited_pid(pid).is_err() {
retval = 1;
} else {
@ -796,14 +798,16 @@ extern "C" fn syscall_handler() {
TASKING.new_thread(ptr::with_exposed_provenance(usize(regs.rcx)), usize(regs.rdx));
if let Ok(tid) = res {
retval = 0;
retval2 = u64(tid);
retval2 = u64(tid.tid());
} else {
retval = 1;
}
}
27 => TASKING.exit_thread(),
28 => {
TASKING.join_thread(usize(regs.rcx));
let current_tid = TASKING.current_tid().unwrap();
let join_tid = Tid::from_pid_tid(current_tid.pid(), usize(regs.rcx));
TASKING.join_thread(join_tid);
}
29 => {
retval = u64(TASKING.new_tls_key());

View File

@ -102,7 +102,7 @@ use mutually_exclusive_features::none_or_one_of;
use physical_memory::PHYSICAL_MEMORY;
use spin::lazy::Lazy;
use tar_no_std::TarArchiveRef;
use tasking::{SleepReason, TASKING};
use tasking::{SleepReason, Tid, TASKING};
use virtual_memory::{ACTIVE_SPACE, KERNEL_SPACE};
use x86_64::{
registers::{
@ -314,5 +314,5 @@ pub fn main() {
let init_pid = TASKING
.new_process(ptr::with_exposed_provenance(usize(init.ehdr.e_entry)), init_addr_space)
.expect("Failed to create init process");
TASKING.wake(init_pid, 0, SleepReason::NewProcess).expect("Failed to wake new init process");
TASKING.wake(Tid::main_thread(init_pid), SleepReason::NewProcess).expect("Failed to wake new init process");
}

View File

@ -15,7 +15,7 @@ use core::panic::PanicInfo;
#[panic_handler]
fn panic(info: &PanicInfo<'_>) -> ! {
print!("Kernel panic in ");
if let Some(pid) = TASKING.current_pid() {
if let Some(pid) = TASKING.current_tid().map(|x| x.pid().as_usize()) {
print!("PID {}", pid);
} else {
print!("kernel init");

View File

@ -2,7 +2,9 @@ use core::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use crate::{
interrupts::{self, EoiGuard},
println, TASKING,
println,
tasking::Tid,
TASKING,
};
use alloc::vec::Vec;
use spin::Mutex;
@ -15,7 +17,7 @@ const MAX_FREQ: u32 = 1_193_180;
pub static NUM_INTERRUPTS: AtomicU64 = AtomicU64::new(0);
pub static THREAD_SLEEP_LIST: Mutex<Vec<(u64, usize, usize)>> = Mutex::new(Vec::new());
pub static THREAD_SLEEP_LIST: Mutex<Vec<(u64, Tid)>> = Mutex::new(Vec::new());
pub static CHECK_SLEEP_LIST: AtomicBool = AtomicBool::new(false);
pub fn init(mut freq: u32) {
@ -61,7 +63,7 @@ fn handler(_irq: u8, eoi_guard: EoiGuard) {
let mut list = THREAD_SLEEP_LIST.lock();
while list.last().is_some_and(|x| x.0 <= ticks) {
let entry = list.pop().unwrap();
TASKING.force_wake(entry.1, entry.2).unwrap();
TASKING.force_wake(entry.1).unwrap();
}
}
drop(eoi_guard);
@ -70,13 +72,13 @@ fn handler(_irq: u8, eoi_guard: EoiGuard) {
}
}
pub fn thread_sleeping(pid: usize, tid: usize, duration: u64) {
pub fn thread_sleeping(tid: Tid, duration: u64) {
let end = NUM_INTERRUPTS.load(Ordering::Relaxed) + duration;
let mut list = THREAD_SLEEP_LIST.lock();
list.push((end, pid, tid));
list.push((end, tid));
list.sort_by(|x, y| (y.0).cmp(&x.0));
}
pub fn cancel_sleep(pid: usize, tid: usize) {
THREAD_SLEEP_LIST.lock().retain(|x| x.1 != pid || x.2 != tid);
pub fn cancel_sleep(tid: Tid) {
THREAD_SLEEP_LIST.lock().retain(|x| x.1 != tid);
}

View File

@ -8,6 +8,7 @@ use alloc::{boxed::Box, collections::VecDeque, vec, vec::Vec};
use cast::{u64, usize};
use core::{
arch::{asm, naked_asm},
mem,
ptr::{addr_of, addr_of_mut},
sync::atomic::{AtomicBool, Ordering},
};
@ -84,7 +85,7 @@ pub enum SleepReason {
WaitingForIPC,
NewProcess,
Exited,
JoinThread(usize),
JoinThread(Tid),
LockedMutex(usize),
TimeSleep,
}
@ -130,6 +131,50 @@ pub struct Process {
mutexes: Mutex<Slab<bool>>,
}
#[repr(transparent)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Pid(usize);
impl Pid {
pub fn as_usize(self) -> usize {
self.0
}
pub fn from_usize(pid: usize) -> Self {
Self(pid)
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Tid {
pid: Pid,
tid: usize,
}
impl Tid {
pub fn from_pid_tid(pid: Pid, tid: usize) -> Self {
Self {
pid,
tid,
}
}
pub fn main_thread(pid: Pid) -> Self {
Self {
pid,
tid: 0
}
}
pub fn pid(&self) -> Pid {
self.pid
}
pub fn tid(&self) -> usize {
self.tid
}
}
impl Process {
fn new(
address_space: AddressSpace,
@ -260,7 +305,6 @@ pub struct InvalidPid;
pub static TASKING: Lazy<Tasking> = Lazy::new(|| Tasking {
processes: RwLock::new(Slab::new()),
ready_to_run: Mutex::new(VecDeque::new()),
current_pid: RwLock::new(None),
current_tid: RwLock::new(None),
freeable_kstacks: Mutex::new(Vec::new()),
wfi_loop: AtomicBool::new(false),
@ -270,12 +314,11 @@ pub static TASKING: Lazy<Tasking> = Lazy::new(|| Tasking {
#[derive(Debug)]
pub struct Tasking {
processes: RwLock<Slab<Process>>,
ready_to_run: Mutex<VecDeque<(usize, usize)>>,
current_pid: RwLock<Option<usize>>,
current_tid: RwLock<Option<usize>>,
ready_to_run: Mutex<VecDeque<Tid>>,
current_tid: RwLock<Option<Tid>>,
freeable_kstacks: Mutex<Vec<Box<[usize], &'static ASpaceMutex>>>,
wfi_loop: AtomicBool,
current_fpu_context: Mutex<Option<(usize, usize)>>,
current_fpu_context: Mutex<Option<Tid>>,
}
pub const KSTACK_SIZE: usize = (4 * 4096) / 8;
@ -285,9 +328,9 @@ impl Tasking {
&self,
entry_point: *const extern "C" fn(usize) -> !,
address_space: AddressSpace,
) -> Result<usize, PagingError> {
) -> Result<Pid, PagingError> {
let process = Process::new(address_space, entry_point)?;
let pid = self.processes.write().insert(process);
let pid = Pid::from_usize(self.processes.write().insert(process));
if let Some(&proc_man_pid) = REGISTERD_PIDS.read().get(&3) {
let mut varint_buf = unsigned_varint::encode::u64_buffer();
let mut buffer = Vec::new_in(&*ACTIVE_SPACE);
@ -296,11 +339,11 @@ impl Tasking {
buffer.extend_from_slice(&0u64.to_le_bytes());
buffer.extend_from_slice(&8u16.to_le_bytes());
buffer.extend_from_slice(&6u16.to_le_bytes());
buffer.extend_from_slice(unsigned_varint::encode::u64(u64(pid), &mut varint_buf));
if let Some(current_pid) = *(self.current_pid.read()) {
buffer.extend_from_slice(unsigned_varint::encode::u64(u64(pid.as_usize()), &mut varint_buf));
if let Some(current_tid) = *(self.current_tid.read()) {
buffer.push(1);
buffer.extend_from_slice(unsigned_varint::encode::u64(
u64(current_pid),
u64(current_tid.pid().as_usize()),
&mut varint_buf,
));
} else {
@ -313,10 +356,10 @@ impl Tasking {
clippy::expect_used,
reason = "The tasking code in the kernel and proc_man CANNOT lose sync. Failure to communicate is fatal."
)]
send_ipc_to(usize(proc_man_pid), buffer, len, true)
send_ipc_to(proc_man_pid, buffer, len, true)
.expect("Failed to send exit message to proc_man");
} else {
println!("[TASKING] No process manager when creating PID {pid}");
println!("[TASKING] No process manager when creating PID {}", pid.as_usize());
}
Ok(pid)
}
@ -325,18 +368,17 @@ impl Tasking {
&self,
entry_point: *const extern "C" fn(usize) -> !,
argument: usize,
) -> Result<usize, PagingError> {
let current_pid = self.current_pid().unwrap();
let new_tid =
) -> Result<Tid, PagingError> {
let current_tid = self.current_tid().unwrap();
let new_tid_internal =
self.current_process_mut(|process| process.new_thread(entry_point, argument))?;
self.ready_to_run.lock().push_back((current_pid, new_tid));
let new_tid = Tid::from_pid_tid(current_tid.pid(), new_tid_internal);
self.ready_to_run.lock().push_back(new_tid);
Ok(new_tid)
}
pub fn ok_to_yield(&self) -> bool {
!(self.freeable_kstacks.is_locked()
|| (self.current_pid.reader_count() > 0)
|| (self.current_pid.writer_count() > 0)
|| (self.current_tid.reader_count() > 0)
|| (self.current_tid.writer_count() > 0)
|| self.ready_to_run.is_locked()
@ -349,23 +391,21 @@ impl Tasking {
pub fn task_yield(&self) {
loop {
self.freeable_kstacks.lock().clear();
let Some(current_pid) = self.current_pid() else {
let Some(current_tid) = self.current_tid() else {
self.wfi_loop.store(false, Ordering::Relaxed);
break;
};
let rtr_head = self.ready_to_run.lock().pop_front();
if let Some((next_process_pid, next_process_tid)) = rtr_head {
if let Some(next_process_tid) = rtr_head {
self.wfi_loop.store(false, Ordering::Relaxed);
if Some(next_process_pid) == self.current_pid()
&& Some(next_process_tid) == self.current_tid()
{
if rtr_head == self.current_tid() {
println!("Yielding to current thread! Returning");
break;
}
#[warn(clippy::unwrap_used, reason = "FIXME(?)")]
if current_pid != next_process_pid {
if current_tid.pid() != next_process_tid.pid() {
let current_address_space = self
.process_mut(next_process_pid, |process| {
.process_mut(next_process_tid.pid(), |process| {
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
@ -382,10 +422,9 @@ impl Tasking {
});
}
if self.current_thread(|thread| thread.sleeping.read().is_none()) {
self.ready_to_run.lock().push_back((current_pid, self.current_tid().unwrap()));
self.ready_to_run.lock().push_back(current_tid);
}
let curr_stack = self.current_thread_mut(|thread| addr_of_mut!(thread.kernel_esp));
*self.current_pid.write() = Some(next_process_pid);
*self.current_tid.write() = Some(next_process_tid);
let kernel_esp = self.current_thread(|thread| {
gdt::set_tss_stack(thread.kernel_esp_top);
@ -409,23 +448,19 @@ impl Tasking {
}
}
pub fn current_pid(&self) -> Option<usize> {
*self.current_pid.read()
}
pub fn current_tid(&self) -> Option<usize> {
pub fn current_tid(&self) -> Option<Tid> {
*self.current_tid.read()
}
pub fn exit(&self, code: u8) -> ! {
if let Some(current_pid) = self.current_pid() {
if let Some(current_tid) = self.current_tid() {
let current_fpu_context = *self.current_fpu_context.lock();
if let Some((fpu_pid, _fpu_tid)) = current_fpu_context {
if current_pid == fpu_pid {
if let Some(fpu_tid) = current_fpu_context {
if current_tid.pid() == fpu_tid.pid() {
*self.current_fpu_context.lock() = None;
}
}
self.ready_to_run.lock().retain(|&(ent_pid, _ent_tid)| ent_pid != current_pid);
self.ready_to_run.lock().retain(|&ent_tid| ent_tid.pid() != current_tid.pid());
if let Some(&proc_man_pid) = REGISTERD_PIDS.read().get(&3) {
let mut varint_buf = unsigned_varint::encode::u64_buffer();
let mut buffer = Vec::new_in(&*ACTIVE_SPACE);
@ -435,7 +470,7 @@ impl Tasking {
buffer.extend_from_slice(&8u16.to_le_bytes());
buffer.extend_from_slice(&4u16.to_le_bytes());
buffer.extend_from_slice(unsigned_varint::encode::u64(
u64(current_pid),
u64(current_tid.pid().as_usize()),
&mut varint_buf,
));
buffer.push(code);
@ -446,28 +481,27 @@ impl Tasking {
clippy::expect_used,
reason = "The tasking code in the kernel and proc_man CANNOT lose sync. Failure to communicate is fatal."
)]
send_ipc_to(usize(proc_man_pid), buffer, len, true)
send_ipc_to(proc_man_pid, buffer, len, true)
.expect("Failed to send exit message to proc_man");
} else {
println!(
"[TASKING] No process manager when PID {} exited with code {code}",
current_pid
current_tid.pid().as_usize()
);
}
}
loop {
let rtr_head = self.ready_to_run.lock().pop_front();
if let Some((next_process_pid, next_process_tid)) = rtr_head {
if let Some(next_process_tid) = rtr_head {
self.wfi_loop.store(false, Ordering::Relaxed);
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
if self.current_pid.read().is_some() {
if self.current_tid.read().is_some() {
self.current_process(|process| {
for (_, thread) in &*process.threads().read() {
*thread.sleeping.write() = Some(SleepReason::Exited);
}
});
}
*self.current_pid.write() = Some(next_process_pid);
*self.current_tid.write() = Some(next_process_tid);
self
.current_process_mut(|process| {
@ -503,34 +537,33 @@ impl Tasking {
pub fn exit_thread(&self) -> ! {
loop {
#[warn(clippy::unwrap_used, reason = "FIXME(?)")]
let current_pid = self.current_pid().unwrap();
let current_tid = self.current_tid().unwrap();
let current_fpu_context = *self.current_fpu_context.lock();
if let Some((fpu_pid, fpu_tid)) = current_fpu_context {
if current_pid == fpu_pid && self.current_tid().unwrap() == fpu_tid {
if let Some(fpu_tid) = current_fpu_context {
if current_tid == fpu_tid {
*self.current_fpu_context.lock() = None;
}
}
let rtr_head = self.ready_to_run.lock().pop_front();
if let Some((next_process_pid, next_process_tid)) = rtr_head {
if let Some(next_process_tid) = rtr_head {
self.wfi_loop.store(false, Ordering::Relaxed);
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
self.current_thread(|thread| {
*thread.sleeping.write() = Some(SleepReason::Exited);
});
self.current_process(|process| {
for (tid, _) in &*process.threads().read() {
for (int_tid, _) in &*process.threads().read() {
self.wake(
current_pid,
tid,
Tid { pid: current_tid.pid(), tid: int_tid },
SleepReason::JoinThread(self.current_tid().unwrap()),
)
.unwrap();
}
});
#[warn(clippy::unwrap_used, reason = "FIXME(?)")]
if current_pid != next_process_pid {
if current_tid != next_process_tid {
let current_address_space = self
.process_mut(next_process_pid, |process| {
.process_mut(next_process_tid.pid(), |process| {
#[expect(
clippy::expect_used,
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
@ -546,7 +579,6 @@ impl Tasking {
process.address_space = Some(current_address_space);
});
}
*self.current_pid.write() = Some(next_process_pid);
*self.current_tid.write() = Some(next_process_tid);
let kernel_esp = self.current_thread_mut(|thread| {
gdt::set_tss_stack(thread.kernel_esp_top);
@ -567,23 +599,21 @@ impl Tasking {
}
}
pub fn join_thread(&self, tid: usize) {
if self
.curr_proc_thread(tid, |thread| *thread.sleeping.read() == Some(SleepReason::Exited))
.unwrap()
pub fn join_thread(&self, tid: Tid) {
if self.thread(tid, |thread| *thread.sleeping.read() == Some(SleepReason::Exited)).unwrap()
{
return;
}
self.sleep(SleepReason::JoinThread(tid));
}
pub fn clear_exited_pid(&self, pid: usize) -> Result<(), ()> {
if self.current_pid() == Some(pid) {
pub fn clear_exited_pid(&self, pid: Pid) -> Result<(), ()> {
if self.current_tid().map(|x| x.pid()) == Some(pid) {
return Err(());
}
let mut processes = self.processes.write();
let process = processes.remove(pid);
self.ready_to_run.lock().retain(|&(ent_pid, _ent_tid)| ent_pid != pid);
let process = processes.remove(pid.as_usize());
self.ready_to_run.lock().retain(|&ent_tid| ent_tid.pid() != pid);
let threads = process.threads.into_inner();
for (_, thread) in threads {
self.freeable_kstacks.lock().push(thread.kernel_stack);
@ -594,25 +624,21 @@ impl Tasking {
pub fn current_thread<F: FnOnce(&Thread) -> T, T>(&self, func: F) -> T {
self.current_process(|process| {
let threads = process.threads.read();
func(&threads[self.current_tid().unwrap()])
func(&threads[self.current_tid().unwrap().tid])
})
}
pub fn curr_proc_thread<F: FnOnce(&Thread) -> T, T>(
&self,
tid: usize,
func: F,
) -> Result<T, InvalidPid> {
self.current_process(|process| {
pub fn thread<F: FnOnce(&Thread) -> T, T>(&self, tid: Tid, func: F) -> Result<T, InvalidPid> {
self.process(tid.pid(), |process| {
let threads = process.threads.read();
Ok(func(threads.get(tid).ok_or(InvalidPid)?))
})
Ok(func(threads.get(tid.tid).ok_or(InvalidPid)?))
})?
}
pub fn current_thread_mut<F: FnOnce(&mut Thread) -> T, T>(&self, func: F) -> T {
self.current_process_mut(|process| {
let mut threads = process.threads.write();
func(&mut threads[self.current_tid().unwrap()])
func(&mut threads[self.current_tid().unwrap().tid])
})
}
@ -620,34 +646,30 @@ impl Tasking {
let processes = self.processes.read();
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
func(&processes[self.current_pid().unwrap()])
func(&processes[self.current_tid().unwrap().pid().as_usize()])
}
pub fn current_process_mut<F: FnOnce(&mut Process) -> T, T>(&self, func: F) -> T {
let mut processes = self.processes.write();
#[warn(clippy::unwrap_used, reason = "FIXME")]
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
func(&mut processes[self.current_pid().unwrap()])
func(&mut processes[self.current_tid().unwrap().pid().as_usize()])
}
pub fn process<F: FnOnce(&Process) -> T, T>(
&self,
pid: usize,
func: F,
) -> Result<T, InvalidPid> {
pub fn process<F: FnOnce(&Process) -> T, T>(&self, pid: Pid, func: F) -> Result<T, InvalidPid> {
let processes = self.processes.read();
#[warn(clippy::unwrap_used, reason = "FIXME")]
Ok(func(processes.get(pid).ok_or(InvalidPid)?))
Ok(func(processes.get(pid.as_usize()).ok_or(InvalidPid)?))
}
pub fn process_mut<F: FnOnce(&mut Process) -> T, T>(
&self,
pid: usize,
pid: Pid,
func: F,
) -> Result<T, InvalidPid> {
let mut processes = self.processes.write();
#[warn(clippy::unwrap_used, reason = "FIXME")]
Ok(func(processes.get_mut(pid).ok_or(InvalidPid)?))
Ok(func(processes.get_mut(pid.as_usize()).ok_or(InvalidPid)?))
}
pub fn sleep(&self, reason: SleepReason) {
@ -659,35 +681,29 @@ impl Tasking {
self.task_yield();
}
pub fn wake(&self, pid: usize, tid: usize, reason: SleepReason) -> Result<(), InvalidPid> {
let processes = self.processes.read();
let process = processes.get(pid).ok_or(InvalidPid)?;
let threads = process.threads.read();
let thread = threads.get(tid).ok_or(InvalidPid)?;
let mut sleeping = thread.sleeping.write();
if *sleeping == Some(reason) {
if Some(pid) != self.current_pid() || Some(tid) != self.current_tid() {
self.ready_to_run.lock().push_back((pid, tid));
pub fn wake(&self, tid: Tid, reason: SleepReason) -> Result<(), InvalidPid> {
self.thread(tid, |thread| {
let mut sleeping = thread.sleeping.write();
if *sleeping == Some(reason) {
if Some(tid) != self.current_tid() {
self.ready_to_run.lock().push_back(tid);
}
*sleeping = None;
}
*sleeping = None;
}
Ok(())
})
}
pub fn force_wake(&self, pid: usize, tid: usize) -> Result<(), InvalidPid> {
let processes = self.processes.read();
let process = processes.get(pid).ok_or(InvalidPid)?;
let threads = process.threads.read();
let thread = threads.get(tid).ok_or(InvalidPid)?;
let mut sleeping = thread.sleeping.write();
if *sleeping != Some(SleepReason::Exited) {
// only thing we cannot force a wakeup from
if Some(pid) != self.current_pid() || Some(tid) != self.current_tid() {
self.ready_to_run.lock().push_back((pid, tid));
pub fn force_wake(&self, tid: Tid) -> Result<(), InvalidPid> {
self.thread(tid, |thread| {
let mut sleeping = thread.sleeping.write();
if *sleeping != Some(SleepReason::Exited) {
// only thing we cannot force a wakeup from
if Some(tid) != self.current_tid() {
self.ready_to_run.lock().push_back(tid);
}
*sleeping = None;
}
*sleeping = None;
}
Ok(())
})
}
pub fn new_tls_key(&self) -> usize {
@ -728,21 +744,16 @@ impl Tasking {
if self.try_lock_mutex(mutex) {
return true;
}
pit::thread_sleeping(self.current_pid().unwrap(), self.current_tid().unwrap(), duration);
pit::thread_sleeping(self.current_tid().unwrap(), duration);
self.sleep(SleepReason::LockedMutex(mutex));
pit::cancel_sleep(self.current_pid().unwrap(), self.current_tid().unwrap());
pit::cancel_sleep(self.current_tid().unwrap());
self.try_lock_mutex(mutex)
}
pub fn try_lock_mutex(&self, mutex: usize) -> bool {
self.current_process(|process| {
let mut mutexes = process.mutexes.lock();
if !mutexes[mutex] {
mutexes[mutex] = true;
true
} else {
false
}
!mem::replace(&mut mutexes[mutex], true)
})
}
@ -752,8 +763,11 @@ impl Tasking {
mutexes[mutex] = false;
for (tid, thread) in &*process.threads().read() {
if *thread.sleeping.read() == Some(SleepReason::LockedMutex(mutex)) {
self.wake(self.current_pid().unwrap(), tid, SleepReason::LockedMutex(mutex))
.unwrap();
self.wake(
Tid { pid: self.current_tid().unwrap().pid(), tid },
SleepReason::LockedMutex(mutex),
)
.unwrap();
break;
}
}
@ -765,7 +779,7 @@ impl Tasking {
}
pub fn time_sleep(&self, timeout: u64) {
pit::thread_sleeping(self.current_pid().unwrap(), self.current_tid().unwrap(), timeout);
pit::thread_sleeping(self.current_tid().unwrap(), timeout);
self.sleep(SleepReason::TimeSleep);
}
@ -793,11 +807,10 @@ impl Tasking {
});
}
let current_fpu_context = *self.current_fpu_context.lock();
if let Some((fpu_pid, fpu_tid)) = current_fpu_context {
self.processes.read()[fpu_pid].threads.read()[fpu_tid].fpu_state.lock().save();
if let Some(fpu_tid) = current_fpu_context {
self.thread(fpu_tid, |thread| thread.fpu_state.lock().save()).unwrap();
}
self.current_thread(|thread| thread.fpu_state.lock().load());
*self.current_fpu_context.lock() =
Some((self.current_pid().unwrap(), self.current_tid().unwrap()));
*self.current_fpu_context.lock() = self.current_tid();
}
}