Unify PID and TID together in a new type
This commit is contained in:
parent
bf5b289359
commit
5f7c6f4183
@ -2,7 +2,7 @@ use crate::{
|
|||||||
bootinfo::BOOTINFO,
|
bootinfo::BOOTINFO,
|
||||||
pit::NUM_INTERRUPTS,
|
pit::NUM_INTERRUPTS,
|
||||||
print, println,
|
print, println,
|
||||||
tasking::{InvalidPid, IpcMessage, SleepReason},
|
tasking::{InvalidPid, IpcMessage, Pid, SleepReason, Tid},
|
||||||
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE},
|
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE},
|
||||||
TASKING,
|
TASKING,
|
||||||
};
|
};
|
||||||
@ -51,7 +51,7 @@ static IDT: Lazy<InterruptDescriptorTable> = Lazy::new(|| {
|
|||||||
});
|
});
|
||||||
static PICS: Mutex<ChainedPics> = Mutex::new(unsafe { ChainedPics::new(IRQ_BASE, IRQ_BASE + 8) });
|
static PICS: Mutex<ChainedPics> = Mutex::new(unsafe { ChainedPics::new(IRQ_BASE, IRQ_BASE + 8) });
|
||||||
static IRQ_HANDLERS: RwLock<[Option<IrqHandler>; 16]> = RwLock::new([None; 16]);
|
static IRQ_HANDLERS: RwLock<[Option<IrqHandler>; 16]> = RwLock::new([None; 16]);
|
||||||
static IRQ_TASKS: RwLock<[Option<usize>; 16]> = RwLock::new([None; 16]);
|
static IRQ_TASKS: RwLock<[Option<Pid>; 16]> = RwLock::new([None; 16]);
|
||||||
|
|
||||||
pub type IrqHandler = fn(irq_num: u8, eoi_guard: EoiGuard);
|
pub type IrqHandler = fn(irq_num: u8, eoi_guard: EoiGuard);
|
||||||
|
|
||||||
@ -83,8 +83,8 @@ extern "x86-interrupt" fn page_fault_handler(
|
|||||||
KERNEL_SPACE.lock().level_4_table()[faulting_addr.p4_index()].clone();
|
KERNEL_SPACE.lock().level_4_table()[faulting_addr.p4_index()].clone();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if let Some(current_pid) = TASKING.current_pid() {
|
if let Some(current_tid) = TASKING.current_tid() {
|
||||||
print!("PID {current_pid} ");
|
print!("PID {} ", current_tid.pid().as_usize());
|
||||||
} else {
|
} else {
|
||||||
print!("Kernel init ");
|
print!("Kernel init ");
|
||||||
}
|
}
|
||||||
@ -108,8 +108,8 @@ extern "x86-interrupt" fn page_fault_handler(
|
|||||||
println!("page faulted {error_code:#?} at {:#x}\n{stack_frame:#?}", faulting_addr);
|
println!("page faulted {error_code:#?} at {:#x}\n{stack_frame:#?}", faulting_addr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if let Some(current_pid) = TASKING.current_pid() {
|
if let Some(current_tid) = TASKING.current_tid() {
|
||||||
print!("PID {current_pid} ");
|
print!("PID {} ", current_tid.pid().as_usize());
|
||||||
} else {
|
} else {
|
||||||
print!("Kernel init ");
|
print!("Kernel init ");
|
||||||
}
|
}
|
||||||
@ -131,8 +131,8 @@ fn general_handler(stack_frame: InterruptStackFrame, index: u8, _error_code: Opt
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn exc_handler(_stack_frame: InterruptStackFrame, _index: u8, _error_code: Option<u64>) {
|
fn exc_handler(_stack_frame: InterruptStackFrame, _index: u8, _error_code: Option<u64>) {
|
||||||
if let Some(current_pid) = TASKING.current_pid() {
|
if let Some(current_tid) = TASKING.current_tid() {
|
||||||
print!("PID {current_pid} ");
|
print!("PID {} ", current_tid.pid().as_usize());
|
||||||
} else {
|
} else {
|
||||||
print!("Kernel init ");
|
print!("Kernel init ");
|
||||||
}
|
}
|
||||||
@ -170,7 +170,7 @@ impl From<PagingError> for SendIpcError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn send_ipc_to(
|
pub fn send_ipc_to(
|
||||||
pid: usize,
|
pid: Pid,
|
||||||
buffer: Box<[u8], &'static ASpaceMutex>,
|
buffer: Box<[u8], &'static ASpaceMutex>,
|
||||||
len: usize,
|
len: usize,
|
||||||
from_kernel: bool,
|
from_kernel: bool,
|
||||||
@ -182,7 +182,7 @@ pub fn send_ipc_to(
|
|||||||
clippy::unwrap_used,
|
clippy::unwrap_used,
|
||||||
reason = "from_kernel=false is used from the send_ipc syscall, which can only be called in usermode where PIDs are always present"
|
reason = "from_kernel=false is used from the send_ipc syscall, which can only be called in usermode where PIDs are always present"
|
||||||
)]
|
)]
|
||||||
TASKING.current_pid().unwrap()
|
TASKING.current_tid().unwrap().pid().as_usize()
|
||||||
};
|
};
|
||||||
#[cfg(feature = "log-rpc-text")]
|
#[cfg(feature = "log-rpc-text")]
|
||||||
dump_ipc_message(from, pid, &buffer[0..len]);
|
dump_ipc_message(from, pid, &buffer[0..len]);
|
||||||
@ -263,7 +263,7 @@ pub fn send_ipc_to(
|
|||||||
TASKING
|
TASKING
|
||||||
.process(pid, |process| {
|
.process(pid, |process| {
|
||||||
for (tid, _) in &*process.threads().read() {
|
for (tid, _) in &*process.threads().read() {
|
||||||
TASKING.wake(pid, tid, SleepReason::WaitingForIPC).unwrap();
|
TASKING.wake(Tid::from_pid_tid(pid, tid), SleepReason::WaitingForIPC).unwrap();
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -383,7 +383,7 @@ fn get_buffer(id: u64) -> Option<Box<[u8], &'static ASpaceMutex>> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub static REGISTERD_PIDS: Lazy<RwLock<HashMap<u64, u64>>> =
|
pub static REGISTERD_PIDS: Lazy<RwLock<HashMap<u64, Pid>>> =
|
||||||
Lazy::new(|| RwLock::new(HashMap::new()));
|
Lazy::new(|| RwLock::new(HashMap::new()));
|
||||||
|
|
||||||
static INITRD_BUF: Lazy<&'static [u8]> = Lazy::new(|| {
|
static INITRD_BUF: Lazy<&'static [u8]> = Lazy::new(|| {
|
||||||
@ -544,7 +544,7 @@ extern "C" fn syscall_handler() {
|
|||||||
let res = TASKING.new_process(ptr::with_exposed_provenance(usize(regs.rcx)), space);
|
let res = TASKING.new_process(ptr::with_exposed_provenance(usize(regs.rcx)), space);
|
||||||
if let Ok(pid) = res {
|
if let Ok(pid) = res {
|
||||||
retval = 0;
|
retval = 0;
|
||||||
retval2 = u64(pid);
|
retval2 = u64(pid.as_usize());
|
||||||
} else {
|
} else {
|
||||||
retval = 1;
|
retval = 1;
|
||||||
}
|
}
|
||||||
@ -554,19 +554,21 @@ extern "C" fn syscall_handler() {
|
|||||||
clippy::unwrap_used,
|
clippy::unwrap_used,
|
||||||
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
|
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
|
||||||
)]
|
)]
|
||||||
REGISTERD_PIDS.write().insert(regs.rcx, u64(TASKING.current_pid().unwrap()));
|
REGISTERD_PIDS
|
||||||
|
.write()
|
||||||
|
.insert(regs.rcx, TASKING.current_tid().unwrap().pid());
|
||||||
}
|
}
|
||||||
10 => {
|
10 => {
|
||||||
let id = REGISTERD_PIDS.read().get(®s.rcx).copied();
|
let pid = REGISTERD_PIDS.read().get(®s.rcx).copied();
|
||||||
if let Some(id) = id {
|
if let Some(pid) = pid {
|
||||||
retval = 0;
|
retval = 0;
|
||||||
retval2 = id;
|
retval2 = u64(pid.as_usize());
|
||||||
} else {
|
} else {
|
||||||
retval = 1;
|
retval = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
11 => {
|
11 => {
|
||||||
let pid = usize(regs.rcx);
|
let pid = Pid::from_usize(usize(regs.rcx));
|
||||||
if let Some(buffer) = get_buffer(regs.rdx) {
|
if let Some(buffer) = get_buffer(regs.rdx) {
|
||||||
let len = usize(regs.rsi);
|
let len = usize(regs.rsi);
|
||||||
assert!(len <= buffer.len());
|
assert!(len <= buffer.len());
|
||||||
@ -608,7 +610,7 @@ extern "C" fn syscall_handler() {
|
|||||||
clippy::unwrap_used,
|
clippy::unwrap_used,
|
||||||
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
|
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
|
||||||
)]
|
)]
|
||||||
let pid = u64(TASKING.current_pid().unwrap());
|
let pid = u64(TASKING.current_tid().unwrap().pid().as_usize());
|
||||||
retval = pid;
|
retval = pid;
|
||||||
}
|
}
|
||||||
14 => 'call14: {
|
14 => 'call14: {
|
||||||
@ -768,7 +770,7 @@ extern "C" fn syscall_handler() {
|
|||||||
clippy::unwrap_used,
|
clippy::unwrap_used,
|
||||||
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
|
reason = "Syscalls cannot be called during early boot, the only time when there is no current PID"
|
||||||
)]
|
)]
|
||||||
TASKING.current_pid().unwrap(),
|
TASKING.current_tid().unwrap().pid(),
|
||||||
);
|
);
|
||||||
retval = 0;
|
retval = 0;
|
||||||
} else {
|
} else {
|
||||||
@ -776,15 +778,15 @@ extern "C" fn syscall_handler() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
24 => {
|
24 => {
|
||||||
let pid = usize(regs.rcx);
|
let pid = Pid::from_usize(usize(regs.rcx));
|
||||||
if TASKING.wake(pid, 0, SleepReason::NewProcess).is_err() {
|
if TASKING.wake(Tid::main_thread(pid), SleepReason::NewProcess).is_err() {
|
||||||
retval = 1;
|
retval = 1;
|
||||||
} else {
|
} else {
|
||||||
retval = 0;
|
retval = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
25 => {
|
25 => {
|
||||||
let pid = usize(regs.rcx);
|
let pid = Pid::from_usize(usize(regs.rcx));
|
||||||
if TASKING.clear_exited_pid(pid).is_err() {
|
if TASKING.clear_exited_pid(pid).is_err() {
|
||||||
retval = 1;
|
retval = 1;
|
||||||
} else {
|
} else {
|
||||||
@ -796,14 +798,16 @@ extern "C" fn syscall_handler() {
|
|||||||
TASKING.new_thread(ptr::with_exposed_provenance(usize(regs.rcx)), usize(regs.rdx));
|
TASKING.new_thread(ptr::with_exposed_provenance(usize(regs.rcx)), usize(regs.rdx));
|
||||||
if let Ok(tid) = res {
|
if let Ok(tid) = res {
|
||||||
retval = 0;
|
retval = 0;
|
||||||
retval2 = u64(tid);
|
retval2 = u64(tid.tid());
|
||||||
} else {
|
} else {
|
||||||
retval = 1;
|
retval = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
27 => TASKING.exit_thread(),
|
27 => TASKING.exit_thread(),
|
||||||
28 => {
|
28 => {
|
||||||
TASKING.join_thread(usize(regs.rcx));
|
let current_tid = TASKING.current_tid().unwrap();
|
||||||
|
let join_tid = Tid::from_pid_tid(current_tid.pid(), usize(regs.rcx));
|
||||||
|
TASKING.join_thread(join_tid);
|
||||||
}
|
}
|
||||||
29 => {
|
29 => {
|
||||||
retval = u64(TASKING.new_tls_key());
|
retval = u64(TASKING.new_tls_key());
|
||||||
|
@ -102,7 +102,7 @@ use mutually_exclusive_features::none_or_one_of;
|
|||||||
use physical_memory::PHYSICAL_MEMORY;
|
use physical_memory::PHYSICAL_MEMORY;
|
||||||
use spin::lazy::Lazy;
|
use spin::lazy::Lazy;
|
||||||
use tar_no_std::TarArchiveRef;
|
use tar_no_std::TarArchiveRef;
|
||||||
use tasking::{SleepReason, TASKING};
|
use tasking::{SleepReason, Tid, TASKING};
|
||||||
use virtual_memory::{ACTIVE_SPACE, KERNEL_SPACE};
|
use virtual_memory::{ACTIVE_SPACE, KERNEL_SPACE};
|
||||||
use x86_64::{
|
use x86_64::{
|
||||||
registers::{
|
registers::{
|
||||||
@ -314,5 +314,5 @@ pub fn main() {
|
|||||||
let init_pid = TASKING
|
let init_pid = TASKING
|
||||||
.new_process(ptr::with_exposed_provenance(usize(init.ehdr.e_entry)), init_addr_space)
|
.new_process(ptr::with_exposed_provenance(usize(init.ehdr.e_entry)), init_addr_space)
|
||||||
.expect("Failed to create init process");
|
.expect("Failed to create init process");
|
||||||
TASKING.wake(init_pid, 0, SleepReason::NewProcess).expect("Failed to wake new init process");
|
TASKING.wake(Tid::main_thread(init_pid), SleepReason::NewProcess).expect("Failed to wake new init process");
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@ use core::panic::PanicInfo;
|
|||||||
#[panic_handler]
|
#[panic_handler]
|
||||||
fn panic(info: &PanicInfo<'_>) -> ! {
|
fn panic(info: &PanicInfo<'_>) -> ! {
|
||||||
print!("Kernel panic in ");
|
print!("Kernel panic in ");
|
||||||
if let Some(pid) = TASKING.current_pid() {
|
if let Some(pid) = TASKING.current_tid().map(|x| x.pid().as_usize()) {
|
||||||
print!("PID {}", pid);
|
print!("PID {}", pid);
|
||||||
} else {
|
} else {
|
||||||
print!("kernel init");
|
print!("kernel init");
|
||||||
|
16
src/pit.rs
16
src/pit.rs
@ -2,7 +2,9 @@ use core::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
interrupts::{self, EoiGuard},
|
interrupts::{self, EoiGuard},
|
||||||
println, TASKING,
|
println,
|
||||||
|
tasking::Tid,
|
||||||
|
TASKING,
|
||||||
};
|
};
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use spin::Mutex;
|
use spin::Mutex;
|
||||||
@ -15,7 +17,7 @@ const MAX_FREQ: u32 = 1_193_180;
|
|||||||
|
|
||||||
pub static NUM_INTERRUPTS: AtomicU64 = AtomicU64::new(0);
|
pub static NUM_INTERRUPTS: AtomicU64 = AtomicU64::new(0);
|
||||||
|
|
||||||
pub static THREAD_SLEEP_LIST: Mutex<Vec<(u64, usize, usize)>> = Mutex::new(Vec::new());
|
pub static THREAD_SLEEP_LIST: Mutex<Vec<(u64, Tid)>> = Mutex::new(Vec::new());
|
||||||
pub static CHECK_SLEEP_LIST: AtomicBool = AtomicBool::new(false);
|
pub static CHECK_SLEEP_LIST: AtomicBool = AtomicBool::new(false);
|
||||||
|
|
||||||
pub fn init(mut freq: u32) {
|
pub fn init(mut freq: u32) {
|
||||||
@ -61,7 +63,7 @@ fn handler(_irq: u8, eoi_guard: EoiGuard) {
|
|||||||
let mut list = THREAD_SLEEP_LIST.lock();
|
let mut list = THREAD_SLEEP_LIST.lock();
|
||||||
while list.last().is_some_and(|x| x.0 <= ticks) {
|
while list.last().is_some_and(|x| x.0 <= ticks) {
|
||||||
let entry = list.pop().unwrap();
|
let entry = list.pop().unwrap();
|
||||||
TASKING.force_wake(entry.1, entry.2).unwrap();
|
TASKING.force_wake(entry.1).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drop(eoi_guard);
|
drop(eoi_guard);
|
||||||
@ -70,13 +72,13 @@ fn handler(_irq: u8, eoi_guard: EoiGuard) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn thread_sleeping(pid: usize, tid: usize, duration: u64) {
|
pub fn thread_sleeping(tid: Tid, duration: u64) {
|
||||||
let end = NUM_INTERRUPTS.load(Ordering::Relaxed) + duration;
|
let end = NUM_INTERRUPTS.load(Ordering::Relaxed) + duration;
|
||||||
let mut list = THREAD_SLEEP_LIST.lock();
|
let mut list = THREAD_SLEEP_LIST.lock();
|
||||||
list.push((end, pid, tid));
|
list.push((end, tid));
|
||||||
list.sort_by(|x, y| (y.0).cmp(&x.0));
|
list.sort_by(|x, y| (y.0).cmp(&x.0));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cancel_sleep(pid: usize, tid: usize) {
|
pub fn cancel_sleep(tid: Tid) {
|
||||||
THREAD_SLEEP_LIST.lock().retain(|x| x.1 != pid || x.2 != tid);
|
THREAD_SLEEP_LIST.lock().retain(|x| x.1 != tid);
|
||||||
}
|
}
|
||||||
|
255
src/tasking.rs
255
src/tasking.rs
@ -8,6 +8,7 @@ use alloc::{boxed::Box, collections::VecDeque, vec, vec::Vec};
|
|||||||
use cast::{u64, usize};
|
use cast::{u64, usize};
|
||||||
use core::{
|
use core::{
|
||||||
arch::{asm, naked_asm},
|
arch::{asm, naked_asm},
|
||||||
|
mem,
|
||||||
ptr::{addr_of, addr_of_mut},
|
ptr::{addr_of, addr_of_mut},
|
||||||
sync::atomic::{AtomicBool, Ordering},
|
sync::atomic::{AtomicBool, Ordering},
|
||||||
};
|
};
|
||||||
@ -84,7 +85,7 @@ pub enum SleepReason {
|
|||||||
WaitingForIPC,
|
WaitingForIPC,
|
||||||
NewProcess,
|
NewProcess,
|
||||||
Exited,
|
Exited,
|
||||||
JoinThread(usize),
|
JoinThread(Tid),
|
||||||
LockedMutex(usize),
|
LockedMutex(usize),
|
||||||
TimeSleep,
|
TimeSleep,
|
||||||
}
|
}
|
||||||
@ -130,6 +131,50 @@ pub struct Process {
|
|||||||
mutexes: Mutex<Slab<bool>>,
|
mutexes: Mutex<Slab<bool>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[repr(transparent)]
|
||||||
|
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub struct Pid(usize);
|
||||||
|
|
||||||
|
impl Pid {
|
||||||
|
pub fn as_usize(self) -> usize {
|
||||||
|
self.0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_usize(pid: usize) -> Self {
|
||||||
|
Self(pid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub struct Tid {
|
||||||
|
pid: Pid,
|
||||||
|
tid: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Tid {
|
||||||
|
pub fn from_pid_tid(pid: Pid, tid: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
pid,
|
||||||
|
tid,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn main_thread(pid: Pid) -> Self {
|
||||||
|
Self {
|
||||||
|
pid,
|
||||||
|
tid: 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pid(&self) -> Pid {
|
||||||
|
self.pid
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tid(&self) -> usize {
|
||||||
|
self.tid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Process {
|
impl Process {
|
||||||
fn new(
|
fn new(
|
||||||
address_space: AddressSpace,
|
address_space: AddressSpace,
|
||||||
@ -260,7 +305,6 @@ pub struct InvalidPid;
|
|||||||
pub static TASKING: Lazy<Tasking> = Lazy::new(|| Tasking {
|
pub static TASKING: Lazy<Tasking> = Lazy::new(|| Tasking {
|
||||||
processes: RwLock::new(Slab::new()),
|
processes: RwLock::new(Slab::new()),
|
||||||
ready_to_run: Mutex::new(VecDeque::new()),
|
ready_to_run: Mutex::new(VecDeque::new()),
|
||||||
current_pid: RwLock::new(None),
|
|
||||||
current_tid: RwLock::new(None),
|
current_tid: RwLock::new(None),
|
||||||
freeable_kstacks: Mutex::new(Vec::new()),
|
freeable_kstacks: Mutex::new(Vec::new()),
|
||||||
wfi_loop: AtomicBool::new(false),
|
wfi_loop: AtomicBool::new(false),
|
||||||
@ -270,12 +314,11 @@ pub static TASKING: Lazy<Tasking> = Lazy::new(|| Tasking {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Tasking {
|
pub struct Tasking {
|
||||||
processes: RwLock<Slab<Process>>,
|
processes: RwLock<Slab<Process>>,
|
||||||
ready_to_run: Mutex<VecDeque<(usize, usize)>>,
|
ready_to_run: Mutex<VecDeque<Tid>>,
|
||||||
current_pid: RwLock<Option<usize>>,
|
current_tid: RwLock<Option<Tid>>,
|
||||||
current_tid: RwLock<Option<usize>>,
|
|
||||||
freeable_kstacks: Mutex<Vec<Box<[usize], &'static ASpaceMutex>>>,
|
freeable_kstacks: Mutex<Vec<Box<[usize], &'static ASpaceMutex>>>,
|
||||||
wfi_loop: AtomicBool,
|
wfi_loop: AtomicBool,
|
||||||
current_fpu_context: Mutex<Option<(usize, usize)>>,
|
current_fpu_context: Mutex<Option<Tid>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const KSTACK_SIZE: usize = (4 * 4096) / 8;
|
pub const KSTACK_SIZE: usize = (4 * 4096) / 8;
|
||||||
@ -285,9 +328,9 @@ impl Tasking {
|
|||||||
&self,
|
&self,
|
||||||
entry_point: *const extern "C" fn(usize) -> !,
|
entry_point: *const extern "C" fn(usize) -> !,
|
||||||
address_space: AddressSpace,
|
address_space: AddressSpace,
|
||||||
) -> Result<usize, PagingError> {
|
) -> Result<Pid, PagingError> {
|
||||||
let process = Process::new(address_space, entry_point)?;
|
let process = Process::new(address_space, entry_point)?;
|
||||||
let pid = self.processes.write().insert(process);
|
let pid = Pid::from_usize(self.processes.write().insert(process));
|
||||||
if let Some(&proc_man_pid) = REGISTERD_PIDS.read().get(&3) {
|
if let Some(&proc_man_pid) = REGISTERD_PIDS.read().get(&3) {
|
||||||
let mut varint_buf = unsigned_varint::encode::u64_buffer();
|
let mut varint_buf = unsigned_varint::encode::u64_buffer();
|
||||||
let mut buffer = Vec::new_in(&*ACTIVE_SPACE);
|
let mut buffer = Vec::new_in(&*ACTIVE_SPACE);
|
||||||
@ -296,11 +339,11 @@ impl Tasking {
|
|||||||
buffer.extend_from_slice(&0u64.to_le_bytes());
|
buffer.extend_from_slice(&0u64.to_le_bytes());
|
||||||
buffer.extend_from_slice(&8u16.to_le_bytes());
|
buffer.extend_from_slice(&8u16.to_le_bytes());
|
||||||
buffer.extend_from_slice(&6u16.to_le_bytes());
|
buffer.extend_from_slice(&6u16.to_le_bytes());
|
||||||
buffer.extend_from_slice(unsigned_varint::encode::u64(u64(pid), &mut varint_buf));
|
buffer.extend_from_slice(unsigned_varint::encode::u64(u64(pid.as_usize()), &mut varint_buf));
|
||||||
if let Some(current_pid) = *(self.current_pid.read()) {
|
if let Some(current_tid) = *(self.current_tid.read()) {
|
||||||
buffer.push(1);
|
buffer.push(1);
|
||||||
buffer.extend_from_slice(unsigned_varint::encode::u64(
|
buffer.extend_from_slice(unsigned_varint::encode::u64(
|
||||||
u64(current_pid),
|
u64(current_tid.pid().as_usize()),
|
||||||
&mut varint_buf,
|
&mut varint_buf,
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
@ -313,10 +356,10 @@ impl Tasking {
|
|||||||
clippy::expect_used,
|
clippy::expect_used,
|
||||||
reason = "The tasking code in the kernel and proc_man CANNOT lose sync. Failure to communicate is fatal."
|
reason = "The tasking code in the kernel and proc_man CANNOT lose sync. Failure to communicate is fatal."
|
||||||
)]
|
)]
|
||||||
send_ipc_to(usize(proc_man_pid), buffer, len, true)
|
send_ipc_to(proc_man_pid, buffer, len, true)
|
||||||
.expect("Failed to send exit message to proc_man");
|
.expect("Failed to send exit message to proc_man");
|
||||||
} else {
|
} else {
|
||||||
println!("[TASKING] No process manager when creating PID {pid}");
|
println!("[TASKING] No process manager when creating PID {}", pid.as_usize());
|
||||||
}
|
}
|
||||||
Ok(pid)
|
Ok(pid)
|
||||||
}
|
}
|
||||||
@ -325,18 +368,17 @@ impl Tasking {
|
|||||||
&self,
|
&self,
|
||||||
entry_point: *const extern "C" fn(usize) -> !,
|
entry_point: *const extern "C" fn(usize) -> !,
|
||||||
argument: usize,
|
argument: usize,
|
||||||
) -> Result<usize, PagingError> {
|
) -> Result<Tid, PagingError> {
|
||||||
let current_pid = self.current_pid().unwrap();
|
let current_tid = self.current_tid().unwrap();
|
||||||
let new_tid =
|
let new_tid_internal =
|
||||||
self.current_process_mut(|process| process.new_thread(entry_point, argument))?;
|
self.current_process_mut(|process| process.new_thread(entry_point, argument))?;
|
||||||
self.ready_to_run.lock().push_back((current_pid, new_tid));
|
let new_tid = Tid::from_pid_tid(current_tid.pid(), new_tid_internal);
|
||||||
|
self.ready_to_run.lock().push_back(new_tid);
|
||||||
Ok(new_tid)
|
Ok(new_tid)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ok_to_yield(&self) -> bool {
|
pub fn ok_to_yield(&self) -> bool {
|
||||||
!(self.freeable_kstacks.is_locked()
|
!(self.freeable_kstacks.is_locked()
|
||||||
|| (self.current_pid.reader_count() > 0)
|
|
||||||
|| (self.current_pid.writer_count() > 0)
|
|
||||||
|| (self.current_tid.reader_count() > 0)
|
|| (self.current_tid.reader_count() > 0)
|
||||||
|| (self.current_tid.writer_count() > 0)
|
|| (self.current_tid.writer_count() > 0)
|
||||||
|| self.ready_to_run.is_locked()
|
|| self.ready_to_run.is_locked()
|
||||||
@ -349,23 +391,21 @@ impl Tasking {
|
|||||||
pub fn task_yield(&self) {
|
pub fn task_yield(&self) {
|
||||||
loop {
|
loop {
|
||||||
self.freeable_kstacks.lock().clear();
|
self.freeable_kstacks.lock().clear();
|
||||||
let Some(current_pid) = self.current_pid() else {
|
let Some(current_tid) = self.current_tid() else {
|
||||||
self.wfi_loop.store(false, Ordering::Relaxed);
|
self.wfi_loop.store(false, Ordering::Relaxed);
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
let rtr_head = self.ready_to_run.lock().pop_front();
|
let rtr_head = self.ready_to_run.lock().pop_front();
|
||||||
if let Some((next_process_pid, next_process_tid)) = rtr_head {
|
if let Some(next_process_tid) = rtr_head {
|
||||||
self.wfi_loop.store(false, Ordering::Relaxed);
|
self.wfi_loop.store(false, Ordering::Relaxed);
|
||||||
if Some(next_process_pid) == self.current_pid()
|
if rtr_head == self.current_tid() {
|
||||||
&& Some(next_process_tid) == self.current_tid()
|
|
||||||
{
|
|
||||||
println!("Yielding to current thread! Returning");
|
println!("Yielding to current thread! Returning");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#[warn(clippy::unwrap_used, reason = "FIXME(?)")]
|
#[warn(clippy::unwrap_used, reason = "FIXME(?)")]
|
||||||
if current_pid != next_process_pid {
|
if current_tid.pid() != next_process_tid.pid() {
|
||||||
let current_address_space = self
|
let current_address_space = self
|
||||||
.process_mut(next_process_pid, |process| {
|
.process_mut(next_process_tid.pid(), |process| {
|
||||||
#[expect(
|
#[expect(
|
||||||
clippy::expect_used,
|
clippy::expect_used,
|
||||||
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
|
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
|
||||||
@ -382,10 +422,9 @@ impl Tasking {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
if self.current_thread(|thread| thread.sleeping.read().is_none()) {
|
if self.current_thread(|thread| thread.sleeping.read().is_none()) {
|
||||||
self.ready_to_run.lock().push_back((current_pid, self.current_tid().unwrap()));
|
self.ready_to_run.lock().push_back(current_tid);
|
||||||
}
|
}
|
||||||
let curr_stack = self.current_thread_mut(|thread| addr_of_mut!(thread.kernel_esp));
|
let curr_stack = self.current_thread_mut(|thread| addr_of_mut!(thread.kernel_esp));
|
||||||
*self.current_pid.write() = Some(next_process_pid);
|
|
||||||
*self.current_tid.write() = Some(next_process_tid);
|
*self.current_tid.write() = Some(next_process_tid);
|
||||||
let kernel_esp = self.current_thread(|thread| {
|
let kernel_esp = self.current_thread(|thread| {
|
||||||
gdt::set_tss_stack(thread.kernel_esp_top);
|
gdt::set_tss_stack(thread.kernel_esp_top);
|
||||||
@ -409,23 +448,19 @@ impl Tasking {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn current_pid(&self) -> Option<usize> {
|
pub fn current_tid(&self) -> Option<Tid> {
|
||||||
*self.current_pid.read()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn current_tid(&self) -> Option<usize> {
|
|
||||||
*self.current_tid.read()
|
*self.current_tid.read()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn exit(&self, code: u8) -> ! {
|
pub fn exit(&self, code: u8) -> ! {
|
||||||
if let Some(current_pid) = self.current_pid() {
|
if let Some(current_tid) = self.current_tid() {
|
||||||
let current_fpu_context = *self.current_fpu_context.lock();
|
let current_fpu_context = *self.current_fpu_context.lock();
|
||||||
if let Some((fpu_pid, _fpu_tid)) = current_fpu_context {
|
if let Some(fpu_tid) = current_fpu_context {
|
||||||
if current_pid == fpu_pid {
|
if current_tid.pid() == fpu_tid.pid() {
|
||||||
*self.current_fpu_context.lock() = None;
|
*self.current_fpu_context.lock() = None;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.ready_to_run.lock().retain(|&(ent_pid, _ent_tid)| ent_pid != current_pid);
|
self.ready_to_run.lock().retain(|&ent_tid| ent_tid.pid() != current_tid.pid());
|
||||||
if let Some(&proc_man_pid) = REGISTERD_PIDS.read().get(&3) {
|
if let Some(&proc_man_pid) = REGISTERD_PIDS.read().get(&3) {
|
||||||
let mut varint_buf = unsigned_varint::encode::u64_buffer();
|
let mut varint_buf = unsigned_varint::encode::u64_buffer();
|
||||||
let mut buffer = Vec::new_in(&*ACTIVE_SPACE);
|
let mut buffer = Vec::new_in(&*ACTIVE_SPACE);
|
||||||
@ -435,7 +470,7 @@ impl Tasking {
|
|||||||
buffer.extend_from_slice(&8u16.to_le_bytes());
|
buffer.extend_from_slice(&8u16.to_le_bytes());
|
||||||
buffer.extend_from_slice(&4u16.to_le_bytes());
|
buffer.extend_from_slice(&4u16.to_le_bytes());
|
||||||
buffer.extend_from_slice(unsigned_varint::encode::u64(
|
buffer.extend_from_slice(unsigned_varint::encode::u64(
|
||||||
u64(current_pid),
|
u64(current_tid.pid().as_usize()),
|
||||||
&mut varint_buf,
|
&mut varint_buf,
|
||||||
));
|
));
|
||||||
buffer.push(code);
|
buffer.push(code);
|
||||||
@ -446,28 +481,27 @@ impl Tasking {
|
|||||||
clippy::expect_used,
|
clippy::expect_used,
|
||||||
reason = "The tasking code in the kernel and proc_man CANNOT lose sync. Failure to communicate is fatal."
|
reason = "The tasking code in the kernel and proc_man CANNOT lose sync. Failure to communicate is fatal."
|
||||||
)]
|
)]
|
||||||
send_ipc_to(usize(proc_man_pid), buffer, len, true)
|
send_ipc_to(proc_man_pid, buffer, len, true)
|
||||||
.expect("Failed to send exit message to proc_man");
|
.expect("Failed to send exit message to proc_man");
|
||||||
} else {
|
} else {
|
||||||
println!(
|
println!(
|
||||||
"[TASKING] No process manager when PID {} exited with code {code}",
|
"[TASKING] No process manager when PID {} exited with code {code}",
|
||||||
current_pid
|
current_tid.pid().as_usize()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
loop {
|
loop {
|
||||||
let rtr_head = self.ready_to_run.lock().pop_front();
|
let rtr_head = self.ready_to_run.lock().pop_front();
|
||||||
if let Some((next_process_pid, next_process_tid)) = rtr_head {
|
if let Some(next_process_tid) = rtr_head {
|
||||||
self.wfi_loop.store(false, Ordering::Relaxed);
|
self.wfi_loop.store(false, Ordering::Relaxed);
|
||||||
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
||||||
if self.current_pid.read().is_some() {
|
if self.current_tid.read().is_some() {
|
||||||
self.current_process(|process| {
|
self.current_process(|process| {
|
||||||
for (_, thread) in &*process.threads().read() {
|
for (_, thread) in &*process.threads().read() {
|
||||||
*thread.sleeping.write() = Some(SleepReason::Exited);
|
*thread.sleeping.write() = Some(SleepReason::Exited);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
*self.current_pid.write() = Some(next_process_pid);
|
|
||||||
*self.current_tid.write() = Some(next_process_tid);
|
*self.current_tid.write() = Some(next_process_tid);
|
||||||
self
|
self
|
||||||
.current_process_mut(|process| {
|
.current_process_mut(|process| {
|
||||||
@ -503,34 +537,33 @@ impl Tasking {
|
|||||||
pub fn exit_thread(&self) -> ! {
|
pub fn exit_thread(&self) -> ! {
|
||||||
loop {
|
loop {
|
||||||
#[warn(clippy::unwrap_used, reason = "FIXME(?)")]
|
#[warn(clippy::unwrap_used, reason = "FIXME(?)")]
|
||||||
let current_pid = self.current_pid().unwrap();
|
let current_tid = self.current_tid().unwrap();
|
||||||
let current_fpu_context = *self.current_fpu_context.lock();
|
let current_fpu_context = *self.current_fpu_context.lock();
|
||||||
if let Some((fpu_pid, fpu_tid)) = current_fpu_context {
|
if let Some(fpu_tid) = current_fpu_context {
|
||||||
if current_pid == fpu_pid && self.current_tid().unwrap() == fpu_tid {
|
if current_tid == fpu_tid {
|
||||||
*self.current_fpu_context.lock() = None;
|
*self.current_fpu_context.lock() = None;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let rtr_head = self.ready_to_run.lock().pop_front();
|
let rtr_head = self.ready_to_run.lock().pop_front();
|
||||||
if let Some((next_process_pid, next_process_tid)) = rtr_head {
|
if let Some(next_process_tid) = rtr_head {
|
||||||
self.wfi_loop.store(false, Ordering::Relaxed);
|
self.wfi_loop.store(false, Ordering::Relaxed);
|
||||||
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
||||||
self.current_thread(|thread| {
|
self.current_thread(|thread| {
|
||||||
*thread.sleeping.write() = Some(SleepReason::Exited);
|
*thread.sleeping.write() = Some(SleepReason::Exited);
|
||||||
});
|
});
|
||||||
self.current_process(|process| {
|
self.current_process(|process| {
|
||||||
for (tid, _) in &*process.threads().read() {
|
for (int_tid, _) in &*process.threads().read() {
|
||||||
self.wake(
|
self.wake(
|
||||||
current_pid,
|
Tid { pid: current_tid.pid(), tid: int_tid },
|
||||||
tid,
|
|
||||||
SleepReason::JoinThread(self.current_tid().unwrap()),
|
SleepReason::JoinThread(self.current_tid().unwrap()),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
#[warn(clippy::unwrap_used, reason = "FIXME(?)")]
|
#[warn(clippy::unwrap_used, reason = "FIXME(?)")]
|
||||||
if current_pid != next_process_pid {
|
if current_tid != next_process_tid {
|
||||||
let current_address_space = self
|
let current_address_space = self
|
||||||
.process_mut(next_process_pid, |process| {
|
.process_mut(next_process_tid.pid(), |process| {
|
||||||
#[expect(
|
#[expect(
|
||||||
clippy::expect_used,
|
clippy::expect_used,
|
||||||
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
|
reason = "This expect checks a critical invariant. If this fails, the kernel MUST panic"
|
||||||
@ -546,7 +579,6 @@ impl Tasking {
|
|||||||
process.address_space = Some(current_address_space);
|
process.address_space = Some(current_address_space);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
*self.current_pid.write() = Some(next_process_pid);
|
|
||||||
*self.current_tid.write() = Some(next_process_tid);
|
*self.current_tid.write() = Some(next_process_tid);
|
||||||
let kernel_esp = self.current_thread_mut(|thread| {
|
let kernel_esp = self.current_thread_mut(|thread| {
|
||||||
gdt::set_tss_stack(thread.kernel_esp_top);
|
gdt::set_tss_stack(thread.kernel_esp_top);
|
||||||
@ -567,23 +599,21 @@ impl Tasking {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn join_thread(&self, tid: usize) {
|
pub fn join_thread(&self, tid: Tid) {
|
||||||
if self
|
if self.thread(tid, |thread| *thread.sleeping.read() == Some(SleepReason::Exited)).unwrap()
|
||||||
.curr_proc_thread(tid, |thread| *thread.sleeping.read() == Some(SleepReason::Exited))
|
|
||||||
.unwrap()
|
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
self.sleep(SleepReason::JoinThread(tid));
|
self.sleep(SleepReason::JoinThread(tid));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear_exited_pid(&self, pid: usize) -> Result<(), ()> {
|
pub fn clear_exited_pid(&self, pid: Pid) -> Result<(), ()> {
|
||||||
if self.current_pid() == Some(pid) {
|
if self.current_tid().map(|x| x.pid()) == Some(pid) {
|
||||||
return Err(());
|
return Err(());
|
||||||
}
|
}
|
||||||
let mut processes = self.processes.write();
|
let mut processes = self.processes.write();
|
||||||
let process = processes.remove(pid);
|
let process = processes.remove(pid.as_usize());
|
||||||
self.ready_to_run.lock().retain(|&(ent_pid, _ent_tid)| ent_pid != pid);
|
self.ready_to_run.lock().retain(|&ent_tid| ent_tid.pid() != pid);
|
||||||
let threads = process.threads.into_inner();
|
let threads = process.threads.into_inner();
|
||||||
for (_, thread) in threads {
|
for (_, thread) in threads {
|
||||||
self.freeable_kstacks.lock().push(thread.kernel_stack);
|
self.freeable_kstacks.lock().push(thread.kernel_stack);
|
||||||
@ -594,25 +624,21 @@ impl Tasking {
|
|||||||
pub fn current_thread<F: FnOnce(&Thread) -> T, T>(&self, func: F) -> T {
|
pub fn current_thread<F: FnOnce(&Thread) -> T, T>(&self, func: F) -> T {
|
||||||
self.current_process(|process| {
|
self.current_process(|process| {
|
||||||
let threads = process.threads.read();
|
let threads = process.threads.read();
|
||||||
func(&threads[self.current_tid().unwrap()])
|
func(&threads[self.current_tid().unwrap().tid])
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn curr_proc_thread<F: FnOnce(&Thread) -> T, T>(
|
pub fn thread<F: FnOnce(&Thread) -> T, T>(&self, tid: Tid, func: F) -> Result<T, InvalidPid> {
|
||||||
&self,
|
self.process(tid.pid(), |process| {
|
||||||
tid: usize,
|
|
||||||
func: F,
|
|
||||||
) -> Result<T, InvalidPid> {
|
|
||||||
self.current_process(|process| {
|
|
||||||
let threads = process.threads.read();
|
let threads = process.threads.read();
|
||||||
Ok(func(threads.get(tid).ok_or(InvalidPid)?))
|
Ok(func(threads.get(tid.tid).ok_or(InvalidPid)?))
|
||||||
})
|
})?
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn current_thread_mut<F: FnOnce(&mut Thread) -> T, T>(&self, func: F) -> T {
|
pub fn current_thread_mut<F: FnOnce(&mut Thread) -> T, T>(&self, func: F) -> T {
|
||||||
self.current_process_mut(|process| {
|
self.current_process_mut(|process| {
|
||||||
let mut threads = process.threads.write();
|
let mut threads = process.threads.write();
|
||||||
func(&mut threads[self.current_tid().unwrap()])
|
func(&mut threads[self.current_tid().unwrap().tid])
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -620,34 +646,30 @@ impl Tasking {
|
|||||||
let processes = self.processes.read();
|
let processes = self.processes.read();
|
||||||
#[warn(clippy::unwrap_used, reason = "FIXME")]
|
#[warn(clippy::unwrap_used, reason = "FIXME")]
|
||||||
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
||||||
func(&processes[self.current_pid().unwrap()])
|
func(&processes[self.current_tid().unwrap().pid().as_usize()])
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn current_process_mut<F: FnOnce(&mut Process) -> T, T>(&self, func: F) -> T {
|
pub fn current_process_mut<F: FnOnce(&mut Process) -> T, T>(&self, func: F) -> T {
|
||||||
let mut processes = self.processes.write();
|
let mut processes = self.processes.write();
|
||||||
#[warn(clippy::unwrap_used, reason = "FIXME")]
|
#[warn(clippy::unwrap_used, reason = "FIXME")]
|
||||||
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
||||||
func(&mut processes[self.current_pid().unwrap()])
|
func(&mut processes[self.current_tid().unwrap().pid().as_usize()])
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn process<F: FnOnce(&Process) -> T, T>(
|
pub fn process<F: FnOnce(&Process) -> T, T>(&self, pid: Pid, func: F) -> Result<T, InvalidPid> {
|
||||||
&self,
|
|
||||||
pid: usize,
|
|
||||||
func: F,
|
|
||||||
) -> Result<T, InvalidPid> {
|
|
||||||
let processes = self.processes.read();
|
let processes = self.processes.read();
|
||||||
#[warn(clippy::unwrap_used, reason = "FIXME")]
|
#[warn(clippy::unwrap_used, reason = "FIXME")]
|
||||||
Ok(func(processes.get(pid).ok_or(InvalidPid)?))
|
Ok(func(processes.get(pid.as_usize()).ok_or(InvalidPid)?))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn process_mut<F: FnOnce(&mut Process) -> T, T>(
|
pub fn process_mut<F: FnOnce(&mut Process) -> T, T>(
|
||||||
&self,
|
&self,
|
||||||
pid: usize,
|
pid: Pid,
|
||||||
func: F,
|
func: F,
|
||||||
) -> Result<T, InvalidPid> {
|
) -> Result<T, InvalidPid> {
|
||||||
let mut processes = self.processes.write();
|
let mut processes = self.processes.write();
|
||||||
#[warn(clippy::unwrap_used, reason = "FIXME")]
|
#[warn(clippy::unwrap_used, reason = "FIXME")]
|
||||||
Ok(func(processes.get_mut(pid).ok_or(InvalidPid)?))
|
Ok(func(processes.get_mut(pid.as_usize()).ok_or(InvalidPid)?))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sleep(&self, reason: SleepReason) {
|
pub fn sleep(&self, reason: SleepReason) {
|
||||||
@ -659,35 +681,29 @@ impl Tasking {
|
|||||||
self.task_yield();
|
self.task_yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn wake(&self, pid: usize, tid: usize, reason: SleepReason) -> Result<(), InvalidPid> {
|
pub fn wake(&self, tid: Tid, reason: SleepReason) -> Result<(), InvalidPid> {
|
||||||
let processes = self.processes.read();
|
self.thread(tid, |thread| {
|
||||||
let process = processes.get(pid).ok_or(InvalidPid)?;
|
let mut sleeping = thread.sleeping.write();
|
||||||
let threads = process.threads.read();
|
if *sleeping == Some(reason) {
|
||||||
let thread = threads.get(tid).ok_or(InvalidPid)?;
|
if Some(tid) != self.current_tid() {
|
||||||
let mut sleeping = thread.sleeping.write();
|
self.ready_to_run.lock().push_back(tid);
|
||||||
if *sleeping == Some(reason) {
|
}
|
||||||
if Some(pid) != self.current_pid() || Some(tid) != self.current_tid() {
|
*sleeping = None;
|
||||||
self.ready_to_run.lock().push_back((pid, tid));
|
|
||||||
}
|
}
|
||||||
*sleeping = None;
|
})
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn force_wake(&self, pid: usize, tid: usize) -> Result<(), InvalidPid> {
|
pub fn force_wake(&self, tid: Tid) -> Result<(), InvalidPid> {
|
||||||
let processes = self.processes.read();
|
self.thread(tid, |thread| {
|
||||||
let process = processes.get(pid).ok_or(InvalidPid)?;
|
let mut sleeping = thread.sleeping.write();
|
||||||
let threads = process.threads.read();
|
if *sleeping != Some(SleepReason::Exited) {
|
||||||
let thread = threads.get(tid).ok_or(InvalidPid)?;
|
// only thing we cannot force a wakeup from
|
||||||
let mut sleeping = thread.sleeping.write();
|
if Some(tid) != self.current_tid() {
|
||||||
if *sleeping != Some(SleepReason::Exited) {
|
self.ready_to_run.lock().push_back(tid);
|
||||||
// only thing we cannot force a wakeup from
|
}
|
||||||
if Some(pid) != self.current_pid() || Some(tid) != self.current_tid() {
|
*sleeping = None;
|
||||||
self.ready_to_run.lock().push_back((pid, tid));
|
|
||||||
}
|
}
|
||||||
*sleeping = None;
|
})
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_tls_key(&self) -> usize {
|
pub fn new_tls_key(&self) -> usize {
|
||||||
@ -728,21 +744,16 @@ impl Tasking {
|
|||||||
if self.try_lock_mutex(mutex) {
|
if self.try_lock_mutex(mutex) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
pit::thread_sleeping(self.current_pid().unwrap(), self.current_tid().unwrap(), duration);
|
pit::thread_sleeping(self.current_tid().unwrap(), duration);
|
||||||
self.sleep(SleepReason::LockedMutex(mutex));
|
self.sleep(SleepReason::LockedMutex(mutex));
|
||||||
pit::cancel_sleep(self.current_pid().unwrap(), self.current_tid().unwrap());
|
pit::cancel_sleep(self.current_tid().unwrap());
|
||||||
self.try_lock_mutex(mutex)
|
self.try_lock_mutex(mutex)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_lock_mutex(&self, mutex: usize) -> bool {
|
pub fn try_lock_mutex(&self, mutex: usize) -> bool {
|
||||||
self.current_process(|process| {
|
self.current_process(|process| {
|
||||||
let mut mutexes = process.mutexes.lock();
|
let mut mutexes = process.mutexes.lock();
|
||||||
if !mutexes[mutex] {
|
!mem::replace(&mut mutexes[mutex], true)
|
||||||
mutexes[mutex] = true;
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -752,8 +763,11 @@ impl Tasking {
|
|||||||
mutexes[mutex] = false;
|
mutexes[mutex] = false;
|
||||||
for (tid, thread) in &*process.threads().read() {
|
for (tid, thread) in &*process.threads().read() {
|
||||||
if *thread.sleeping.read() == Some(SleepReason::LockedMutex(mutex)) {
|
if *thread.sleeping.read() == Some(SleepReason::LockedMutex(mutex)) {
|
||||||
self.wake(self.current_pid().unwrap(), tid, SleepReason::LockedMutex(mutex))
|
self.wake(
|
||||||
.unwrap();
|
Tid { pid: self.current_tid().unwrap().pid(), tid },
|
||||||
|
SleepReason::LockedMutex(mutex),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -765,7 +779,7 @@ impl Tasking {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn time_sleep(&self, timeout: u64) {
|
pub fn time_sleep(&self, timeout: u64) {
|
||||||
pit::thread_sleeping(self.current_pid().unwrap(), self.current_tid().unwrap(), timeout);
|
pit::thread_sleeping(self.current_tid().unwrap(), timeout);
|
||||||
self.sleep(SleepReason::TimeSleep);
|
self.sleep(SleepReason::TimeSleep);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -793,11 +807,10 @@ impl Tasking {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
let current_fpu_context = *self.current_fpu_context.lock();
|
let current_fpu_context = *self.current_fpu_context.lock();
|
||||||
if let Some((fpu_pid, fpu_tid)) = current_fpu_context {
|
if let Some(fpu_tid) = current_fpu_context {
|
||||||
self.processes.read()[fpu_pid].threads.read()[fpu_tid].fpu_state.lock().save();
|
self.thread(fpu_tid, |thread| thread.fpu_state.lock().save()).unwrap();
|
||||||
}
|
}
|
||||||
self.current_thread(|thread| thread.fpu_state.lock().load());
|
self.current_thread(|thread| thread.fpu_state.lock().load());
|
||||||
*self.current_fpu_context.lock() =
|
*self.current_fpu_context.lock() = self.current_tid();
|
||||||
Some((self.current_pid().unwrap(), self.current_tid().unwrap()));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user