Add thread_sleep and mutex_lock_timeout syscalls

This commit is contained in:
pjht 2024-11-28 20:59:20 -06:00
parent a4a8bf0152
commit f81452f431
Signed by: pjht
GPG Key ID: 7B5F6AFBEC7EE78E
3 changed files with 74 additions and 8 deletions

View File

@ -1,12 +1,18 @@
use crate::{
bootinfo::BOOTINFO, pit::NUM_INTERRUPTS, print, println, tasking::{InvalidPid, IpcMessage, SleepReason}, virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE}, TASKING
bootinfo::BOOTINFO,
pit::NUM_INTERRUPTS,
print, println,
tasking::{InvalidPid, IpcMessage, SleepReason},
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE},
TASKING,
};
use alloc::{boxed::Box, vec::Vec};
use az::WrappingCast;
use cast::{u64, usize};
use core::{
arch::{asm, naked_asm},
ptr, slice, sync::atomic::Ordering,
ptr, slice,
sync::atomic::Ordering,
};
use hashbrown::HashMap;
use pic8259::ChainedPics;
@ -812,6 +818,10 @@ extern "C" fn syscall_handler() {
38 => {
retval = NUM_INTERRUPTS.load(Ordering::Relaxed);
}
39 => TASKING.time_sleep(regs.rcx),
40 => {
retval = u64::from(TASKING.lock_mutex_timeout(usize(regs.rcx), regs.rdx));
}
_ => (),
};
unsafe { SYSCALL_REGS = regs };

View File

@ -1,9 +1,12 @@
use core::sync::atomic::{AtomicU64, Ordering};
use core::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use crate::{
interrupts::{self, EoiGuard},
println, TASKING,
println,
tasking::SleepReason,
TASKING,
};
use alloc::collections::vec_deque::VecDeque;
use spin::Mutex;
use x86_64::instructions::port::{Port, PortWriteOnly};
@ -14,6 +17,9 @@ const MAX_FREQ: u32 = 1_193_180;
pub static NUM_INTERRUPTS: AtomicU64 = AtomicU64::new(0);
pub static THREAD_SLEEP_LIST: Mutex<VecDeque<(u64, usize, usize)>> = Mutex::new(VecDeque::new());
pub static CHECK_SLEEP_LIST: AtomicBool = AtomicBool::new(false);
pub fn init(mut freq: u32) {
assert_ne!(freq, 0);
#[expect(
@ -48,12 +54,29 @@ pub fn init(mut freq: u32) {
unsafe { CMD.lock().write(0b0011_0110_u8) };
unsafe { DATA.lock().write(div_bytes[0]) };
unsafe { DATA.lock().write(div_bytes[1]) };
CHECK_SLEEP_LIST.store(true, Ordering::Relaxed);
}
fn handler(_irq: u8, eoi_guard: EoiGuard) {
NUM_INTERRUPTS.fetch_add(1, Ordering::Relaxed);
let ticks = NUM_INTERRUPTS.fetch_add(1, Ordering::Relaxed) + 1;
if CHECK_SLEEP_LIST.load(Ordering::Relaxed) {
let mut list = THREAD_SLEEP_LIST.lock();
while list.front().is_some_and(|x| x.0 <= ticks) {
let entry = list.pop_front().unwrap();
TASKING.force_wake(entry.1, entry.2).unwrap();
}
}
drop(eoi_guard);
if TASKING.ok_to_yield() {
TASKING.task_yield();
}
}
pub fn thread_sleeping(pid: usize, tid: usize, duration: u64) {
let end = NUM_INTERRUPTS.load(Ordering::Relaxed) + duration;
THREAD_SLEEP_LIST.lock().push_back((end, pid, tid));
}
pub fn cancel_sleep(pid: usize, tid: usize) {
THREAD_SLEEP_LIST.lock().retain(|x| x.1 != pid && x.2 != tid);
}

View File

@ -1,7 +1,7 @@
use crate::{
gdt,
interrupts::{send_ipc_to, REGISTERD_PIDS},
println,
pit, println,
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE},
};
use alloc::{boxed::Box, collections::VecDeque, vec, vec::Vec};
@ -13,7 +13,7 @@ use core::{
};
use crossbeam_queue::SegQueue;
use slab::Slab;
use spin::{mutex, Lazy, Mutex, RwLock};
use spin::{Lazy, Mutex, RwLock};
use x86_64::{
structures::paging::{Page, PageTableFlags},
VirtAddr,
@ -85,6 +85,7 @@ pub enum SleepReason {
Exited,
JoinThread(usize),
LockedMutex(usize),
TimeSleep,
}
#[derive(Debug)]
@ -613,6 +614,22 @@ impl Tasking {
Ok(())
}
pub fn force_wake(&self, pid: usize, tid: usize) -> Result<(), InvalidPid> {
let processes = self.processes.read();
let process = processes.get(pid).ok_or(InvalidPid)?;
let threads = process.threads.read();
let thread = threads.get(tid).ok_or(InvalidPid)?;
let mut sleeping = thread.sleeping.write();
if *sleeping != Some(SleepReason::Exited) {
// only thing we cannot force a wakeup from
if Some(pid) != self.current_pid() || Some(tid) != self.current_tid() {
self.ready_to_run.lock().push_back((pid, tid));
}
*sleeping = None;
}
Ok(())
}
pub fn new_tls_key(&self) -> usize {
self.current_process_mut(|process| {
let id = process.num_tls;
@ -647,6 +664,16 @@ impl Tasking {
}
}
pub fn lock_mutex_timeout(&self, mutex: usize, duration: u64) -> bool {
if self.try_lock_mutex(mutex) {
return true;
}
pit::thread_sleeping(self.current_pid().unwrap(), self.current_tid().unwrap(), duration);
self.sleep(SleepReason::LockedMutex(mutex));
pit::cancel_sleep(self.current_pid().unwrap(), self.current_tid().unwrap());
self.try_lock_mutex(mutex)
}
pub fn try_lock_mutex(&self, mutex: usize) -> bool {
self.current_process(|process| {
let mut mutexes = process.mutexes.lock();
@ -665,7 +692,8 @@ impl Tasking {
mutexes[mutex] = false;
for (tid, thread) in &*process.threads().read() {
if *thread.sleeping.read() == Some(SleepReason::LockedMutex(mutex)) {
self.wake(self.current_pid().unwrap(), tid, SleepReason::LockedMutex(mutex));
self.wake(self.current_pid().unwrap(), tid, SleepReason::LockedMutex(mutex))
.unwrap();
break;
}
}
@ -676,6 +704,11 @@ impl Tasking {
self.current_process(|process| process.mutexes.lock().remove(mutex));
}
pub fn time_sleep(&self, timeout: u64) {
pit::thread_sleeping(self.current_pid().unwrap(), self.current_tid().unwrap(), timeout);
self.sleep(SleepReason::TimeSleep);
}
//pub fn print_stats(&self) {
// let mut total = KERNEL_SPACE.lock().get_bytes_allocated();
// println!(