Track allocations per-address space instead of per-process
This commit is contained in:
parent
899fde8218
commit
75814f3589
@ -213,13 +213,13 @@ static INITRD_BUF: Lazy<&'static [u8]> = Lazy::new(|| {
|
|||||||
usize(ramdisk_len),
|
usize(ramdisk_len),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
TASKING.record_heap_alloc(initrd.len().next_multiple_of(4096));
|
|
||||||
KERNEL_SPACE.lock().alloc_force_user = true;
|
KERNEL_SPACE.lock().alloc_force_user = true;
|
||||||
let initrd = Box::leak(
|
let initrd = Box::leak(
|
||||||
Vec::with_capacity_in(initrd.len(), &*KERNEL_SPACE)
|
Vec::with_capacity_in(initrd.len(), &*KERNEL_SPACE)
|
||||||
.tap_mut(|v| v.extend_from_slice(initrd))
|
.tap_mut(|v| v.extend_from_slice(initrd))
|
||||||
.into_boxed_slice(),
|
.into_boxed_slice(),
|
||||||
);
|
);
|
||||||
|
KERNEL_SPACE.lock().record_alloc(initrd.len().next_multiple_of(4096));
|
||||||
KERNEL_SPACE.lock().alloc_force_user = false;
|
KERNEL_SPACE.lock().alloc_force_user = false;
|
||||||
initrd
|
initrd
|
||||||
});
|
});
|
||||||
@ -266,9 +266,6 @@ extern "C" fn syscall_handler() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
if retval != 0 {
|
|
||||||
TASKING.record_alloc(usize(regs.rdx) * 4096);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
3 => {
|
3 => {
|
||||||
retval = u64(INITRD_BUF.as_ptr().expose_provenance());
|
retval = u64(INITRD_BUF.as_ptr().expose_provenance());
|
||||||
@ -315,9 +312,6 @@ extern "C" fn syscall_handler() {
|
|||||||
})
|
})
|
||||||
};
|
};
|
||||||
retval = failed.into();
|
retval = failed.into();
|
||||||
if !failed {
|
|
||||||
TASKING.record_alloc(num_pages * 4096);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
7 => {
|
7 => {
|
||||||
if let Some(buffer) = get_buffer(regs.rdx) {
|
if let Some(buffer) = get_buffer(regs.rdx) {
|
||||||
@ -619,9 +613,6 @@ extern "C" fn syscall_handler() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
if retval != 0 {
|
|
||||||
TASKING.record_alloc(usize(regs.rdx) * 4096);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
22 => 'call22: {
|
22 => 'call22: {
|
||||||
let Ok(page) = Page::from_start_address(VirtAddr::new(regs.rdx)) else {
|
let Ok(page) = Page::from_start_address(VirtAddr::new(regs.rdx)) else {
|
||||||
@ -629,7 +620,6 @@ extern "C" fn syscall_handler() {
|
|||||||
break 'call22;
|
break 'call22;
|
||||||
};
|
};
|
||||||
retval = if regs.rcx == 0 {
|
retval = if regs.rcx == 0 {
|
||||||
TASKING.record_dealloc(usize(regs.rsi * 4096));
|
|
||||||
u64::from(ACTIVE_SPACE.lock().unmap(page, usize(regs.rsi)).is_err())
|
u64::from(ACTIVE_SPACE.lock().unmap(page, usize(regs.rsi)).is_err())
|
||||||
} else {
|
} else {
|
||||||
TASKING.address_spaces_mut(|x| {
|
TASKING.address_spaces_mut(|x| {
|
||||||
@ -638,7 +628,6 @@ extern "C" fn syscall_handler() {
|
|||||||
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
|
reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0."
|
||||||
)]
|
)]
|
||||||
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
|
if let Some(space) = x.get_mut(usize(regs.rcx - 1)) {
|
||||||
TASKING.record_dealloc(usize(regs.rsi*4096));
|
|
||||||
u64::from(space.unmap(page, usize(regs.rsi)).is_err())
|
u64::from(space.unmap(page, usize(regs.rsi)).is_err())
|
||||||
} else {
|
} else {
|
||||||
1
|
1
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use crate::{println, tasking::TASKING, virtual_memory::KERNEL_SPACE};
|
use crate::{println, virtual_memory::KERNEL_SPACE};
|
||||||
|
|
||||||
use core::{
|
use core::{
|
||||||
alloc::{GlobalAlloc, Layout},
|
alloc::{GlobalAlloc, Layout},
|
||||||
@ -45,7 +45,6 @@ unsafe impl GlobalAlloc for Heap {
|
|||||||
let Ok(ptr) = KERNEL_SPACE.lock().map_free(num_pages, PageTableFlags::empty()) else {
|
let Ok(ptr) = KERNEL_SPACE.lock().map_free(num_pages, PageTableFlags::empty()) else {
|
||||||
return (ptr::null_mut(), layout);
|
return (ptr::null_mut(), layout);
|
||||||
};
|
};
|
||||||
TASKING.record_heap_alloc(num_pages * 4096);
|
|
||||||
#[expect(clippy::unwrap_used, reason = "
|
#[expect(clippy::unwrap_used, reason = "
|
||||||
from_size_align requires align to be a nonzero power of two, which it is.
|
from_size_align requires align to be a nonzero power of two, which it is.
|
||||||
Also, size must be less than isize when rounded up to a multiple of align.
|
Also, size must be less than isize when rounded up to a multiple of align.
|
||||||
|
@ -98,9 +98,12 @@ use elf::{
|
|||||||
endian::AnyEndian,
|
endian::AnyEndian,
|
||||||
ElfBytes,
|
ElfBytes,
|
||||||
};
|
};
|
||||||
|
use physical_memory::PHYSICAL_MEMORY;
|
||||||
use serial::SECOND_PORT;
|
use serial::SECOND_PORT;
|
||||||
|
use spin::lazy::Lazy;
|
||||||
use tar_no_std::TarArchiveRef;
|
use tar_no_std::TarArchiveRef;
|
||||||
use tasking::TASKING;
|
use tasking::TASKING;
|
||||||
|
use virtual_memory::{ACTIVE_SPACE, KERNEL_SPACE};
|
||||||
use x86_64::{
|
use x86_64::{
|
||||||
registers::rflags::{self, RFlags},
|
registers::rflags::{self, RFlags},
|
||||||
structures::paging::{Page, PageTableFlags},
|
structures::paging::{Page, PageTableFlags},
|
||||||
@ -122,6 +125,9 @@ pub fn main() {
|
|||||||
rflags::write(rflags_data);
|
rflags::write(rflags_data);
|
||||||
}
|
}
|
||||||
gdt::init();
|
gdt::init();
|
||||||
|
Lazy::force(&PHYSICAL_MEMORY);
|
||||||
|
Lazy::force(&KERNEL_SPACE);
|
||||||
|
Lazy::force(&ACTIVE_SPACE);
|
||||||
interrupts::init();
|
interrupts::init();
|
||||||
pit::init(100);
|
pit::init(100);
|
||||||
let initrd = unsafe {
|
let initrd = unsafe {
|
||||||
|
116
src/tasking.rs
116
src/tasking.rs
@ -1,7 +1,6 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
gdt,
|
gdt, println, qemu_exit,
|
||||||
println, qemu_exit,
|
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE},
|
||||||
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, KERNEL_SPACE},
|
|
||||||
};
|
};
|
||||||
use alloc::{borrow::ToOwned, boxed::Box, collections::VecDeque, ffi::CString, vec::Vec};
|
use alloc::{borrow::ToOwned, boxed::Box, collections::VecDeque, ffi::CString, vec::Vec};
|
||||||
use core::{
|
use core::{
|
||||||
@ -98,12 +97,10 @@ struct Process {
|
|||||||
kernel_esp: *mut usize,
|
kernel_esp: *mut usize,
|
||||||
kernel_esp_top: VirtAddr,
|
kernel_esp_top: VirtAddr,
|
||||||
arguments: (*const *const u8, usize),
|
arguments: (*const *const u8, usize),
|
||||||
|
|
||||||
address_spaces: Mutex<Slab<AddressSpace>>,
|
address_spaces: Mutex<Slab<AddressSpace>>,
|
||||||
data_buffers: Mutex<Slab<*mut [u8]>>,
|
data_buffers: Mutex<Slab<*mut [u8]>>,
|
||||||
message_queue: Mutex<SegQueue<(usize, usize)>>,
|
message_queue: Mutex<SegQueue<(usize, usize)>>,
|
||||||
sleeping: RwLock<Option<SleepReason>>,
|
sleeping: RwLock<Option<SleepReason>>,
|
||||||
bytes_allocated: AtomicUsize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl Send for Process {}
|
unsafe impl Send for Process {}
|
||||||
@ -117,12 +114,7 @@ pub static TASKING: Lazy<Tasking> = Lazy::new(|| Tasking {
|
|||||||
ready_to_run: Mutex::new(VecDeque::new()),
|
ready_to_run: Mutex::new(VecDeque::new()),
|
||||||
current_pid: RwLock::new(None),
|
current_pid: RwLock::new(None),
|
||||||
freeable_kstacks: Mutex::new(Vec::new()),
|
freeable_kstacks: Mutex::new(Vec::new()),
|
||||||
alloc_to_account: AtomicUsize::new(0),
|
|
||||||
dealloc_to_account: AtomicUsize::new(0),
|
|
||||||
kinit_allocated: AtomicUsize::new(0),
|
|
||||||
buf_allocated: AtomicUsize::new(0),
|
buf_allocated: AtomicUsize::new(0),
|
||||||
kstacks_allocated: AtomicUsize::new(0),
|
|
||||||
heap_allocated: AtomicUsize::new(0),
|
|
||||||
});
|
});
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -131,12 +123,7 @@ pub struct Tasking {
|
|||||||
ready_to_run: Mutex<VecDeque<usize>>,
|
ready_to_run: Mutex<VecDeque<usize>>,
|
||||||
current_pid: RwLock<Option<usize>>,
|
current_pid: RwLock<Option<usize>>,
|
||||||
freeable_kstacks: Mutex<Vec<Box<[usize], &'static ASpaceMutex>>>,
|
freeable_kstacks: Mutex<Vec<Box<[usize], &'static ASpaceMutex>>>,
|
||||||
alloc_to_account: AtomicUsize,
|
|
||||||
dealloc_to_account: AtomicUsize,
|
|
||||||
kinit_allocated: AtomicUsize,
|
|
||||||
buf_allocated: AtomicUsize,
|
buf_allocated: AtomicUsize,
|
||||||
kstacks_allocated: AtomicUsize,
|
|
||||||
heap_allocated: AtomicUsize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const KSTACK_SIZE: usize = 0x1_0000 / 8;
|
pub const KSTACK_SIZE: usize = 0x1_0000 / 8;
|
||||||
@ -158,7 +145,7 @@ impl Tasking {
|
|||||||
kernel_stack.push(0xFFF_FF80_0000 + (16 * 4096));
|
kernel_stack.push(0xFFF_FF80_0000 + (16 * 4096));
|
||||||
kernel_stack.push(entry_point.expose_provenance());
|
kernel_stack.push(entry_point.expose_provenance());
|
||||||
let mut kernel_stack = kernel_stack.into_boxed_slice();
|
let mut kernel_stack = kernel_stack.into_boxed_slice();
|
||||||
self.kstacks_allocated.fetch_add(KSTACK_SIZE * 8, Ordering::Relaxed);
|
KERNEL_SPACE.lock().record_alloc(KSTACK_SIZE * 8);
|
||||||
address_space.map_assert_unused(
|
address_space.map_assert_unused(
|
||||||
#[expect(
|
#[expect(
|
||||||
clippy::unwrap_used,
|
clippy::unwrap_used,
|
||||||
@ -168,7 +155,6 @@ impl Tasking {
|
|||||||
16,
|
16,
|
||||||
PageTableFlags::USER_ACCESSIBLE,
|
PageTableFlags::USER_ACCESSIBLE,
|
||||||
)?;
|
)?;
|
||||||
self.kstacks_allocated.fetch_add(16 * 4096, Ordering::Relaxed);
|
|
||||||
let arguments = arguments.iter().map(|arg| (*arg).to_owned()).collect::<Vec<CString>>();
|
let arguments = arguments.iter().map(|arg| (*arg).to_owned()).collect::<Vec<CString>>();
|
||||||
#[expect(
|
#[expect(
|
||||||
clippy::unwrap_used,
|
clippy::unwrap_used,
|
||||||
@ -195,7 +181,6 @@ impl Tasking {
|
|||||||
};
|
};
|
||||||
let user_arg_mem =
|
let user_arg_mem =
|
||||||
address_space.map_free(args_layout.size() / 4096, PageTableFlags::USER_ACCESSIBLE)?;
|
address_space.map_free(args_layout.size() / 4096, PageTableFlags::USER_ACCESSIBLE)?;
|
||||||
self.kstacks_allocated.fetch_add(args_layout.size(), Ordering::Relaxed);
|
|
||||||
address_space.run(|| unsafe {
|
address_space.run(|| unsafe {
|
||||||
let mut ptr_ptr: *mut *const u8 = user_arg_mem.cast();
|
let mut ptr_ptr: *mut *const u8 = user_arg_mem.cast();
|
||||||
for (&offset, argument) in arg_offsets.iter().zip(arguments.iter()) {
|
for (&offset, argument) in arg_offsets.iter().zip(arguments.iter()) {
|
||||||
@ -226,7 +211,6 @@ impl Tasking {
|
|||||||
message_queue: Mutex::new(SegQueue::new()),
|
message_queue: Mutex::new(SegQueue::new()),
|
||||||
sleeping: RwLock::new(None),
|
sleeping: RwLock::new(None),
|
||||||
arguments: (user_arg_mem.cast(), arguments.len()),
|
arguments: (user_arg_mem.cast(), arguments.len()),
|
||||||
bytes_allocated: AtomicUsize::new(0),
|
|
||||||
});
|
});
|
||||||
self.ready_to_run.lock().push_back(pid);
|
self.ready_to_run.lock().push_back(pid);
|
||||||
Ok(pid)
|
Ok(pid)
|
||||||
@ -239,12 +223,11 @@ impl Tasking {
|
|||||||
|| self.ready_to_run.is_locked()
|
|| self.ready_to_run.is_locked()
|
||||||
|| (self.processes.reader_count() > 0)
|
|| (self.processes.reader_count() > 0)
|
||||||
|| (self.processes.writer_count() > 0)
|
|| (self.processes.writer_count() > 0)
|
||||||
|| (self.current_pid.writer_count() > 0))
|
|| KERNEL_SPACE.is_locked())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn task_yield(&self) {
|
pub fn task_yield(&self) {
|
||||||
self.kstacks_allocated
|
KERNEL_SPACE.lock().record_dealloc(self.freeable_kstacks.lock().len() * KSTACK_SIZE * 8);
|
||||||
.fetch_sub(self.freeable_kstacks.lock().len() * KSTACK_SIZE * 8, Ordering::Relaxed);
|
|
||||||
self.freeable_kstacks.lock().clear();
|
self.freeable_kstacks.lock().clear();
|
||||||
let Some(current_pid) = *self.current_pid.read() else {
|
let Some(current_pid) = *self.current_pid.read() else {
|
||||||
return;
|
return;
|
||||||
@ -266,14 +249,6 @@ impl Tasking {
|
|||||||
let processes = self.processes.read();
|
let processes = self.processes.read();
|
||||||
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
||||||
let current_process = &processes[current_pid];
|
let current_process = &processes[current_pid];
|
||||||
current_process
|
|
||||||
.bytes_allocated
|
|
||||||
.fetch_add(self.alloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed);
|
|
||||||
current_process
|
|
||||||
.bytes_allocated
|
|
||||||
.fetch_sub(self.dealloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed);
|
|
||||||
self.alloc_to_account.store(0, Ordering::Relaxed);
|
|
||||||
self.dealloc_to_account.store(0, Ordering::Relaxed);
|
|
||||||
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
||||||
let next_process = &processes[next_process_pid];
|
let next_process = &processes[next_process_pid];
|
||||||
gdt::set_tss_stack(next_process.kernel_esp_top);
|
gdt::set_tss_stack(next_process.kernel_esp_top);
|
||||||
@ -312,21 +287,14 @@ impl Tasking {
|
|||||||
if let Some(current_pid) = *self.current_pid.read() {
|
if let Some(current_pid) = *self.current_pid.read() {
|
||||||
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
||||||
let current_process = &processes[current_pid];
|
let current_process = &processes[current_pid];
|
||||||
current_process
|
let bytes_used = current_process.address_space.as_ref().map_or_else(
|
||||||
.bytes_allocated
|
|| ACTIVE_SPACE.lock().get_bytes_allocated(),
|
||||||
.fetch_add(self.alloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed);
|
|space| space.get_bytes_allocated(),
|
||||||
current_process
|
);
|
||||||
.bytes_allocated
|
|
||||||
.fetch_sub(self.dealloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed);
|
|
||||||
self.alloc_to_account.store(0, Ordering::Relaxed);
|
|
||||||
self.dealloc_to_account.store(0, Ordering::Relaxed);
|
|
||||||
println!(
|
println!(
|
||||||
"[TASKING] PID {current_pid} exiting, used {} ({}), this is being leaked.",
|
"[TASKING] PID {current_pid} exiting, used {} ({}), this is being leaked.",
|
||||||
SizeFormatter::new(
|
SizeFormatter::new(bytes_used, BINARY),
|
||||||
current_process.bytes_allocated.load(Ordering::Relaxed),
|
bytes_used / 4096
|
||||||
BINARY
|
|
||||||
),
|
|
||||||
current_process.bytes_allocated.load(Ordering::Relaxed) / 4096
|
|
||||||
);
|
);
|
||||||
self.freeable_kstacks.lock().push(processes.remove(current_pid).kernel_stack);
|
self.freeable_kstacks.lock().push(processes.remove(current_pid).kernel_stack);
|
||||||
}
|
}
|
||||||
@ -424,32 +392,6 @@ impl Tasking {
|
|||||||
self.processes.read()[self.current_pid.read().unwrap()].arguments
|
self.processes.read()[self.current_pid.read().unwrap()].arguments
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn record_alloc(&self, size: usize) {
|
|
||||||
if let Some(pid) = *self.current_pid.read() {
|
|
||||||
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
|
||||||
if self.processes.writer_count() == 0 {
|
|
||||||
self.processes.read()[pid].bytes_allocated.fetch_add(size, Ordering::Relaxed);
|
|
||||||
} else {
|
|
||||||
self.alloc_to_account.fetch_add(size, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
self.kinit_allocated.fetch_add(size, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn record_dealloc(&self, size: usize) {
|
|
||||||
if let Some(pid) = *self.current_pid.read() {
|
|
||||||
#[warn(clippy::indexing_slicing, reason = "FIXME(?)")]
|
|
||||||
if self.processes.writer_count() == 0 {
|
|
||||||
self.processes.read()[pid].bytes_allocated.fetch_sub(size, Ordering::Relaxed);
|
|
||||||
} else {
|
|
||||||
self.dealloc_to_account.fetch_add(size, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
self.kinit_allocated.fetch_sub(size, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn record_buf_alloc(&self, size: usize) {
|
pub fn record_buf_alloc(&self, size: usize) {
|
||||||
self.buf_allocated.fetch_add(size, Ordering::Relaxed);
|
self.buf_allocated.fetch_add(size, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
@ -458,42 +400,24 @@ impl Tasking {
|
|||||||
self.buf_allocated.fetch_sub(size, Ordering::Relaxed);
|
self.buf_allocated.fetch_sub(size, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn record_heap_alloc(&self, size: usize) {
|
|
||||||
self.heap_allocated.fetch_add(size, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn record_heap_dealloc(&self, size: usize) {
|
|
||||||
self.heap_allocated.fetch_sub(size, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_stats(&self) {
|
pub fn print_stats(&self) {
|
||||||
let mut total = self.kinit_allocated.load(Ordering::Relaxed)
|
let mut total =
|
||||||
+ self.buf_allocated.load(Ordering::Relaxed)
|
KERNEL_SPACE.lock().get_bytes_allocated() + self.buf_allocated.load(Ordering::Relaxed);
|
||||||
+ self.kstacks_allocated.load(Ordering::Relaxed)
|
|
||||||
+ self.heap_allocated.load(Ordering::Relaxed);
|
|
||||||
println!(
|
println!(
|
||||||
"[TASKING] Kernel init used {}",
|
"[TASKING] Kernel misc used {}",
|
||||||
SizeFormatter::new(self.kinit_allocated.load(Ordering::Relaxed), BINARY)
|
SizeFormatter::new(KERNEL_SPACE.lock().get_bytes_allocated(), BINARY)
|
||||||
);
|
);
|
||||||
println!(
|
println!(
|
||||||
"[TASKING] Kernel buffers used {}",
|
"[TASKING] Kernel buffers used {}",
|
||||||
SizeFormatter::new(self.buf_allocated.load(Ordering::Relaxed), BINARY)
|
SizeFormatter::new(self.buf_allocated.load(Ordering::Relaxed), BINARY)
|
||||||
);
|
);
|
||||||
println!(
|
|
||||||
"[TASKING] Kernel stacks used {}",
|
|
||||||
SizeFormatter::new(self.kstacks_allocated.load(Ordering::Relaxed), BINARY)
|
|
||||||
);
|
|
||||||
println!(
|
|
||||||
"[TASKING] Kernel heap used {}",
|
|
||||||
SizeFormatter::new(self.heap_allocated.load(Ordering::Relaxed), BINARY)
|
|
||||||
);
|
|
||||||
for (i, process) in self.processes.read().iter() {
|
for (i, process) in self.processes.read().iter() {
|
||||||
total += process.bytes_allocated.load(Ordering::Relaxed);
|
let bytes_used = process.address_space.as_ref().map_or_else(
|
||||||
println!(
|
|| ACTIVE_SPACE.lock().get_bytes_allocated(),
|
||||||
"[TASKING] PID {} used {}",
|
|space| space.get_bytes_allocated(),
|
||||||
i,
|
|
||||||
SizeFormatter::new(process.bytes_allocated.load(Ordering::Relaxed), BINARY)
|
|
||||||
);
|
);
|
||||||
|
total += bytes_used;
|
||||||
|
println!("[TASKING] PID {} used {}", i, SizeFormatter::new(bytes_used, BINARY));
|
||||||
}
|
}
|
||||||
println!("[TASKING] Total used {} ({})", SizeFormatter::new(total, BINARY), total / 4096);
|
println!("[TASKING] Total used {} ({})", SizeFormatter::new(total, BINARY), total / 4096);
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,7 @@
|
|||||||
use crate::{bootinfo::BOOTINFO, physical_memory::PHYSICAL_MEMORY, tasking::TASKING};
|
use crate::{
|
||||||
|
bootinfo::BOOTINFO,
|
||||||
|
physical_memory::{PhysicalMemory, PHYSICAL_MEMORY},
|
||||||
|
};
|
||||||
use alloc::alloc::{AllocError, Allocator, Layout};
|
use alloc::alloc::{AllocError, Allocator, Layout};
|
||||||
use cast::{u64, usize};
|
use cast::{u64, usize};
|
||||||
use core::{
|
use core::{
|
||||||
@ -6,6 +9,7 @@ use core::{
|
|||||||
ops::Deref,
|
ops::Deref,
|
||||||
ptr::{self, NonNull},
|
ptr::{self, NonNull},
|
||||||
slice,
|
slice,
|
||||||
|
sync::atomic::{AtomicUsize, Ordering},
|
||||||
};
|
};
|
||||||
use replace_with::replace_with_or_abort;
|
use replace_with::replace_with_or_abort;
|
||||||
use spin::{Lazy, Mutex};
|
use spin::{Lazy, Mutex};
|
||||||
@ -27,6 +31,7 @@ pub struct AddressSpace {
|
|||||||
/// kernel allocator to allocate user accessible pages
|
/// kernel allocator to allocate user accessible pages
|
||||||
pub alloc_force_user: bool,
|
pub alloc_force_user: bool,
|
||||||
pub mapper: OffsetPageTable<'static>,
|
pub mapper: OffsetPageTable<'static>,
|
||||||
|
bytes_allocated: AtomicUsize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for AddressSpace {
|
impl fmt::Debug for AddressSpace {
|
||||||
@ -35,7 +40,7 @@ impl fmt::Debug for AddressSpace {
|
|||||||
.field("is_kernel", &self.is_kernel)
|
.field("is_kernel", &self.is_kernel)
|
||||||
.field("alloc_force_user", &self.alloc_force_user)
|
.field("alloc_force_user", &self.alloc_force_user)
|
||||||
.field("level_4_table", &ptr::from_ref(&self.mapper.level_4_table()))
|
.field("level_4_table", &ptr::from_ref(&self.mapper.level_4_table()))
|
||||||
.finish()
|
.finish_non_exhaustive()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,6 +149,28 @@ impl Deref for ASpaceMutex {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct PmemCountingWrapper<'a> {
|
||||||
|
pmem: &'a mut PhysicalMemory,
|
||||||
|
count: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> PmemCountingWrapper<'a> {
|
||||||
|
fn new(pmem: &'a mut PhysicalMemory) -> Self {
|
||||||
|
Self { pmem, count: 0 }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl FrameAllocator<Size4KiB> for PmemCountingWrapper<'_> {
|
||||||
|
fn allocate_frame(&mut self) -> Option<PhysFrame> {
|
||||||
|
if let Some(frame) = self.pmem.allocate_frame() {
|
||||||
|
self.count += 1;
|
||||||
|
Some(frame)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub static KERNEL_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
|
pub static KERNEL_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
|
||||||
// SAFETY: Cr3 must point to a valid table otherwise the system would triple fault, so
|
// SAFETY: Cr3 must point to a valid table otherwise the system would triple fault, so
|
||||||
// we know it is safe to turn the pointer to it into a reference.
|
// we know it is safe to turn the pointer to it into a reference.
|
||||||
@ -151,6 +178,7 @@ pub static KERNEL_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
|
|||||||
for i in 0..256 {
|
for i in 0..256 {
|
||||||
table[i].set_addr(PhysAddr::new(0), PageTableFlags::empty());
|
table[i].set_addr(PhysAddr::new(0), PageTableFlags::empty());
|
||||||
}
|
}
|
||||||
|
let mut num_pts_alloced = 0;
|
||||||
for i in 256..512 {
|
for i in 256..512 {
|
||||||
if table[i].flags().contains(PageTableFlags::PRESENT) {
|
if table[i].flags().contains(PageTableFlags::PRESENT) {
|
||||||
let new_flags =
|
let new_flags =
|
||||||
@ -167,7 +195,7 @@ pub static KERNEL_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
|
|||||||
)]
|
)]
|
||||||
let (new_child, new_child_phys) =
|
let (new_child, new_child_phys) =
|
||||||
alloc_pt().expect("Could not allocate new kernel entry");
|
alloc_pt().expect("Could not allocate new kernel entry");
|
||||||
TASKING.record_alloc(4096);
|
num_pts_alloced += 1;
|
||||||
new_child.write(PageTable::new());
|
new_child.write(PageTable::new());
|
||||||
new_child_phys
|
new_child_phys
|
||||||
},
|
},
|
||||||
@ -179,6 +207,7 @@ pub static KERNEL_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
|
|||||||
}
|
}
|
||||||
let mut kernel_space = AddressSpace::new_with_addr(table);
|
let mut kernel_space = AddressSpace::new_with_addr(table);
|
||||||
kernel_space.is_kernel = true;
|
kernel_space.is_kernel = true;
|
||||||
|
kernel_space.record_alloc(num_pts_alloced * 4096);
|
||||||
let l4_virt = VirtAddr::from_ptr(ptr::from_ref(kernel_space.mapper.level_4_table()));
|
let l4_virt = VirtAddr::from_ptr(ptr::from_ref(kernel_space.mapper.level_4_table()));
|
||||||
#[expect(
|
#[expect(
|
||||||
clippy::unwrap_used,
|
clippy::unwrap_used,
|
||||||
@ -219,7 +248,9 @@ impl AddressSpace {
|
|||||||
new_table.copy_from(KERNEL_SPACE.lock().mapper.level_4_table(), 1);
|
new_table.copy_from(KERNEL_SPACE.lock().mapper.level_4_table(), 1);
|
||||||
&mut *new_table
|
&mut *new_table
|
||||||
};
|
};
|
||||||
Ok(Self::new_with_addr(new_table))
|
let space = Self::new_with_addr(new_table);
|
||||||
|
space.record_alloc(4096);
|
||||||
|
Ok(space)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_with_addr(table: &'static mut PageTable) -> Self {
|
fn new_with_addr(table: &'static mut PageTable) -> Self {
|
||||||
@ -232,6 +263,7 @@ impl AddressSpace {
|
|||||||
mapper: unsafe { OffsetPageTable::new(table, *PHYS_OFFSET) },
|
mapper: unsafe { OffsetPageTable::new(table, *PHYS_OFFSET) },
|
||||||
alloc_force_user: false,
|
alloc_force_user: false,
|
||||||
is_kernel: false,
|
is_kernel: false,
|
||||||
|
bytes_allocated: AtomicUsize::new(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,6 +414,8 @@ impl AddressSpace {
|
|||||||
},
|
},
|
||||||
}) {
|
}) {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
let mut phys_mem = PHYSICAL_MEMORY.lock();
|
||||||
|
let mut pmem_wrap = PmemCountingWrapper::new(&mut phys_mem);
|
||||||
self.mapper
|
self.mapper
|
||||||
.map_to_with_table_flags(
|
.map_to_with_table_flags(
|
||||||
page,
|
page,
|
||||||
@ -390,9 +424,10 @@ impl AddressSpace {
|
|||||||
PageTableFlags::PRESENT
|
PageTableFlags::PRESENT
|
||||||
| PageTableFlags::WRITABLE
|
| PageTableFlags::WRITABLE
|
||||||
| PageTableFlags::USER_ACCESSIBLE,
|
| PageTableFlags::USER_ACCESSIBLE,
|
||||||
&mut *PHYSICAL_MEMORY.lock(),
|
&mut pmem_wrap,
|
||||||
)?
|
)?
|
||||||
.flush();
|
.flush();
|
||||||
|
self.record_alloc(pmem_wrap.count * 4096);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(page.start_address().as_mut_ptr())
|
Ok(page.start_address().as_mut_ptr())
|
||||||
@ -433,6 +468,7 @@ impl AddressSpace {
|
|||||||
unsafe {
|
unsafe {
|
||||||
let mut phys_mem = PHYSICAL_MEMORY.lock();
|
let mut phys_mem = PHYSICAL_MEMORY.lock();
|
||||||
let frame = phys_mem.allocate_frame().ok_or(PagingError::FrameAllocationFailed)?;
|
let frame = phys_mem.allocate_frame().ok_or(PagingError::FrameAllocationFailed)?;
|
||||||
|
let mut pmem_wrap = PmemCountingWrapper::new(&mut phys_mem);
|
||||||
self.mapper
|
self.mapper
|
||||||
.map_to_with_table_flags(
|
.map_to_with_table_flags(
|
||||||
page,
|
page,
|
||||||
@ -448,9 +484,10 @@ impl AddressSpace {
|
|||||||
PageTableFlags::PRESENT
|
PageTableFlags::PRESENT
|
||||||
| PageTableFlags::WRITABLE
|
| PageTableFlags::WRITABLE
|
||||||
| PageTableFlags::USER_ACCESSIBLE,
|
| PageTableFlags::USER_ACCESSIBLE,
|
||||||
&mut *phys_mem,
|
&mut pmem_wrap,
|
||||||
)?
|
)?
|
||||||
.flush();
|
.flush();
|
||||||
|
self.record_alloc(pmem_wrap.count * 4096);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(page.start_address().as_mut_ptr())
|
Ok(page.start_address().as_mut_ptr())
|
||||||
@ -489,37 +526,11 @@ impl AddressSpace {
|
|||||||
.allocate_frame_range(num_pages)
|
.allocate_frame_range(num_pages)
|
||||||
.ok_or(PagingError::FrameAllocationFailed)?;
|
.ok_or(PagingError::FrameAllocationFailed)?;
|
||||||
let phys_start = frame_range.start.start_address().as_u64();
|
let phys_start = frame_range.start.start_address().as_u64();
|
||||||
#[expect(
|
|
||||||
clippy::arithmetic_side_effects,
|
|
||||||
reason = "check_request_valid guarentees this won't overflow"
|
|
||||||
)]
|
|
||||||
for (page, frame) in
|
|
||||||
(PageRange { start: page, end: page + u64(num_pages) }).zip(frame_range)
|
|
||||||
{
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut phys_mem = PHYSICAL_MEMORY.lock();
|
self.map_to(page, frame_range.start, num_pages, flags | PageTableFlags::WRITABLE)
|
||||||
self.mapper
|
.map(|ptr| (ptr, phys_start))
|
||||||
.map_to_with_table_flags(
|
|
||||||
page,
|
|
||||||
frame,
|
|
||||||
flags
|
|
||||||
| PageTableFlags::PRESENT
|
|
||||||
| PageTableFlags::WRITABLE
|
|
||||||
| if self.alloc_force_user {
|
|
||||||
PageTableFlags::USER_ACCESSIBLE
|
|
||||||
} else {
|
|
||||||
PageTableFlags::empty()
|
|
||||||
},
|
|
||||||
PageTableFlags::PRESENT
|
|
||||||
| PageTableFlags::WRITABLE
|
|
||||||
| PageTableFlags::USER_ACCESSIBLE,
|
|
||||||
&mut *phys_mem,
|
|
||||||
)?
|
|
||||||
.flush();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok((page.start_address().as_mut_ptr(), phys_start))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Maps new virtual pages and returns the starting addresss
|
/// Maps new virtual pages and returns the starting addresss
|
||||||
///
|
///
|
||||||
@ -549,7 +560,9 @@ impl AddressSpace {
|
|||||||
) -> Result<*mut u8, PagingError> {
|
) -> Result<*mut u8, PagingError> {
|
||||||
// SAFETY: &mut aliasing is prevented by using free physical frames, and uninitialized
|
// SAFETY: &mut aliasing is prevented by using free physical frames, and uninitialized
|
||||||
// values are prevented by using free virtual pages.
|
// values are prevented by using free virtual pages.
|
||||||
unsafe { self.map(self.find_free_pages(num_pages)?, num_pages, flags) }
|
let ptr = unsafe { self.map(self.find_free_pages(num_pages)?, num_pages, flags)? };
|
||||||
|
self.record_alloc(num_pages * 4096);
|
||||||
|
Ok(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maps new virtual pages to new contigous physical memory
|
/// Maps new virtual pages to new contigous physical memory
|
||||||
@ -563,7 +576,10 @@ impl AddressSpace {
|
|||||||
) -> Result<(*mut u8, u64), PagingError> {
|
) -> Result<(*mut u8, u64), PagingError> {
|
||||||
// SAFETY: &mut aliasing is prevented by using free physical frames, and uninitialized
|
// SAFETY: &mut aliasing is prevented by using free physical frames, and uninitialized
|
||||||
// values are prevented by using free virtual pages.
|
// values are prevented by using free virtual pages.
|
||||||
unsafe { self.map_cont_phys(self.find_free_pages(num_pages)?, num_pages, flags) }
|
let ptr =
|
||||||
|
unsafe { self.map_cont_phys(self.find_free_pages(num_pages)?, num_pages, flags)? };
|
||||||
|
self.record_alloc(num_pages * 4096);
|
||||||
|
Ok(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Same behavior as `map`, but asserts that the requested virtual page range is unmapped, and
|
/// Same behavior as `map`, but asserts that the requested virtual page range is unmapped, and
|
||||||
@ -575,7 +591,9 @@ impl AddressSpace {
|
|||||||
flags: PageTableFlags,
|
flags: PageTableFlags,
|
||||||
) -> Result<*mut u8, PagingError> {
|
) -> Result<*mut u8, PagingError> {
|
||||||
self.check_request_unmapped(page, num_pages)?;
|
self.check_request_unmapped(page, num_pages)?;
|
||||||
unsafe { self.map(page, num_pages, flags) }
|
let ptr = unsafe { self.map(page, num_pages, flags)? };
|
||||||
|
self.record_alloc(num_pages * 4096);
|
||||||
|
Ok(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Same behavior as `map`, but only maps unmapped pages, and
|
/// Same behavior as `map`, but only maps unmapped pages, and
|
||||||
@ -596,12 +614,11 @@ impl AddressSpace {
|
|||||||
if self.translate_addr(page.start_address()).is_some() {
|
if self.translate_addr(page.start_address()).is_some() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if !self.is_kernel {
|
self.record_alloc(4096);
|
||||||
TASKING.record_alloc(4096);
|
|
||||||
}
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut phys_mem = PHYSICAL_MEMORY.lock();
|
let mut phys_mem = PHYSICAL_MEMORY.lock();
|
||||||
let frame = phys_mem.allocate_frame().ok_or(PagingError::FrameAllocationFailed)?;
|
let frame = phys_mem.allocate_frame().ok_or(PagingError::FrameAllocationFailed)?;
|
||||||
|
let mut pmem_wrap = PmemCountingWrapper::new(&mut phys_mem);
|
||||||
self.mapper
|
self.mapper
|
||||||
.map_to_with_table_flags(
|
.map_to_with_table_flags(
|
||||||
page,
|
page,
|
||||||
@ -610,9 +627,10 @@ impl AddressSpace {
|
|||||||
PageTableFlags::PRESENT
|
PageTableFlags::PRESENT
|
||||||
| PageTableFlags::WRITABLE
|
| PageTableFlags::WRITABLE
|
||||||
| PageTableFlags::USER_ACCESSIBLE,
|
| PageTableFlags::USER_ACCESSIBLE,
|
||||||
&mut *phys_mem,
|
&mut pmem_wrap,
|
||||||
)?
|
)?
|
||||||
.flush();
|
.flush();
|
||||||
|
self.record_alloc(pmem_wrap.count * 4096);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(page.start_address().as_mut_ptr())
|
Ok(page.start_address().as_mut_ptr())
|
||||||
@ -631,6 +649,7 @@ impl AddressSpace {
|
|||||||
flush.flush();
|
flush.flush();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
self.record_dealloc(num_pages * 4096);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -663,6 +682,18 @@ impl AddressSpace {
|
|||||||
}
|
}
|
||||||
Err(PagingError::PageAllocationFailed)
|
Err(PagingError::PageAllocationFailed)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn record_alloc(&self, size: usize) {
|
||||||
|
self.bytes_allocated.fetch_add(size, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn record_dealloc(&self, size: usize) {
|
||||||
|
self.bytes_allocated.fetch_sub(size, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_bytes_allocated(&self) -> usize {
|
||||||
|
self.bytes_allocated.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for AddressSpace {
|
impl Drop for AddressSpace {
|
||||||
@ -735,6 +766,7 @@ unsafe impl Allocator for ASpaceMutex {
|
|||||||
PageTableFlags::empty()
|
PageTableFlags::empty()
|
||||||
};
|
};
|
||||||
let start = space.map_free(size / 4096, flags).map_err(|_| AllocError)?;
|
let start = space.map_free(size / 4096, flags).map_err(|_| AllocError)?;
|
||||||
|
space.record_dealloc(size); // don't track allocates with this via the regular methods
|
||||||
Ok(unsafe { slice::from_raw_parts_mut(start.cast::<u8>(), size) }.into())
|
Ok(unsafe { slice::from_raw_parts_mut(start.cast::<u8>(), size) }.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -750,6 +782,7 @@ unsafe impl Allocator for ASpaceMutex {
|
|||||||
let start_page =
|
let start_page =
|
||||||
Page::from_start_address(VirtAddr::new(u64(ptr.as_ptr().expose_provenance()))).unwrap();
|
Page::from_start_address(VirtAddr::new(u64(ptr.as_ptr().expose_provenance()))).unwrap();
|
||||||
let length = layout.size().div_ceil(4096);
|
let length = layout.size().div_ceil(4096);
|
||||||
|
self.0.lock().record_alloc(length * 4096); //don't track allocates with this via the regular
|
||||||
#[expect(
|
#[expect(
|
||||||
clippy::unwrap_used,
|
clippy::unwrap_used,
|
||||||
reason = "
|
reason = "
|
||||||
|
Loading…
Reference in New Issue
Block a user