From 75814f3589612cfea11eaf267cbd5753757fa2eb Mon Sep 17 00:00:00 2001 From: pjht Date: Fri, 16 Aug 2024 19:23:31 -0500 Subject: [PATCH] Track allocations per-address space instead of per-process --- src/interrupts.rs | 13 +---- src/kernel_heap.rs | 3 +- src/main.rs | 6 +++ src/tasking.rs | 116 ++++++++--------------------------------- src/virtual_memory.rs | 117 +++++++++++++++++++++++++++--------------- 5 files changed, 103 insertions(+), 152 deletions(-) diff --git a/src/interrupts.rs b/src/interrupts.rs index 8d220c1..62a67bb 100644 --- a/src/interrupts.rs +++ b/src/interrupts.rs @@ -213,13 +213,13 @@ static INITRD_BUF: Lazy<&'static [u8]> = Lazy::new(|| { usize(ramdisk_len), ) }; - TASKING.record_heap_alloc(initrd.len().next_multiple_of(4096)); KERNEL_SPACE.lock().alloc_force_user = true; let initrd = Box::leak( Vec::with_capacity_in(initrd.len(), &*KERNEL_SPACE) .tap_mut(|v| v.extend_from_slice(initrd)) .into_boxed_slice(), ); + KERNEL_SPACE.lock().record_alloc(initrd.len().next_multiple_of(4096)); KERNEL_SPACE.lock().alloc_force_user = false; initrd }); @@ -266,9 +266,6 @@ extern "C" fn syscall_handler() { } }) }; - if retval != 0 { - TASKING.record_alloc(usize(regs.rdx) * 4096); - } } 3 => { retval = u64(INITRD_BUF.as_ptr().expose_provenance()); @@ -315,9 +312,6 @@ extern "C" fn syscall_handler() { }) }; retval = failed.into(); - if !failed { - TASKING.record_alloc(num_pages * 4096); - } } 7 => { if let Some(buffer) = get_buffer(regs.rdx) { @@ -619,9 +613,6 @@ extern "C" fn syscall_handler() { } }) }; - if retval != 0 { - TASKING.record_alloc(usize(regs.rdx) * 4096); - } } 22 => 'call22: { let Ok(page) = Page::from_start_address(VirtAddr::new(regs.rdx)) else { @@ -629,7 +620,6 @@ extern "C" fn syscall_handler() { break 'call22; }; retval = if regs.rcx == 0 { - TASKING.record_dealloc(usize(regs.rsi * 4096)); u64::from(ACTIVE_SPACE.lock().unmap(page, usize(regs.rsi)).is_err()) } else { TASKING.address_spaces_mut(|x| { @@ -638,7 +628,6 @@ extern "C" fn syscall_handler() { reason = "FIXME: The current address space should be usize::MAX as that is an invalid index, instead of 0." )] if let Some(space) = x.get_mut(usize(regs.rcx - 1)) { - TASKING.record_dealloc(usize(regs.rsi*4096)); u64::from(space.unmap(page, usize(regs.rsi)).is_err()) } else { 1 diff --git a/src/kernel_heap.rs b/src/kernel_heap.rs index 9e220b6..bf049fe 100644 --- a/src/kernel_heap.rs +++ b/src/kernel_heap.rs @@ -1,4 +1,4 @@ -use crate::{println, tasking::TASKING, virtual_memory::KERNEL_SPACE}; +use crate::{println, virtual_memory::KERNEL_SPACE}; use core::{ alloc::{GlobalAlloc, Layout}, @@ -45,7 +45,6 @@ unsafe impl GlobalAlloc for Heap { let Ok(ptr) = KERNEL_SPACE.lock().map_free(num_pages, PageTableFlags::empty()) else { return (ptr::null_mut(), layout); }; - TASKING.record_heap_alloc(num_pages * 4096); #[expect(clippy::unwrap_used, reason = " from_size_align requires align to be a nonzero power of two, which it is. Also, size must be less than isize when rounded up to a multiple of align. diff --git a/src/main.rs b/src/main.rs index 1bbdc72..811ed82 100644 --- a/src/main.rs +++ b/src/main.rs @@ -98,9 +98,12 @@ use elf::{ endian::AnyEndian, ElfBytes, }; +use physical_memory::PHYSICAL_MEMORY; use serial::SECOND_PORT; +use spin::lazy::Lazy; use tar_no_std::TarArchiveRef; use tasking::TASKING; +use virtual_memory::{ACTIVE_SPACE, KERNEL_SPACE}; use x86_64::{ registers::rflags::{self, RFlags}, structures::paging::{Page, PageTableFlags}, @@ -122,6 +125,9 @@ pub fn main() { rflags::write(rflags_data); } gdt::init(); + Lazy::force(&PHYSICAL_MEMORY); + Lazy::force(&KERNEL_SPACE); + Lazy::force(&ACTIVE_SPACE); interrupts::init(); pit::init(100); let initrd = unsafe { diff --git a/src/tasking.rs b/src/tasking.rs index 1042fab..795f08c 100644 --- a/src/tasking.rs +++ b/src/tasking.rs @@ -1,7 +1,6 @@ use crate::{ - gdt, - println, qemu_exit, - virtual_memory::{ASpaceMutex, AddressSpace, PagingError, KERNEL_SPACE}, + gdt, println, qemu_exit, + virtual_memory::{ASpaceMutex, AddressSpace, PagingError, ACTIVE_SPACE, KERNEL_SPACE}, }; use alloc::{borrow::ToOwned, boxed::Box, collections::VecDeque, ffi::CString, vec::Vec}; use core::{ @@ -98,12 +97,10 @@ struct Process { kernel_esp: *mut usize, kernel_esp_top: VirtAddr, arguments: (*const *const u8, usize), - address_spaces: Mutex>, data_buffers: Mutex>, message_queue: Mutex>, sleeping: RwLock>, - bytes_allocated: AtomicUsize, } unsafe impl Send for Process {} @@ -117,12 +114,7 @@ pub static TASKING: Lazy = Lazy::new(|| Tasking { ready_to_run: Mutex::new(VecDeque::new()), current_pid: RwLock::new(None), freeable_kstacks: Mutex::new(Vec::new()), - alloc_to_account: AtomicUsize::new(0), - dealloc_to_account: AtomicUsize::new(0), - kinit_allocated: AtomicUsize::new(0), buf_allocated: AtomicUsize::new(0), - kstacks_allocated: AtomicUsize::new(0), - heap_allocated: AtomicUsize::new(0), }); #[derive(Debug)] @@ -131,12 +123,7 @@ pub struct Tasking { ready_to_run: Mutex>, current_pid: RwLock>, freeable_kstacks: Mutex>>, - alloc_to_account: AtomicUsize, - dealloc_to_account: AtomicUsize, - kinit_allocated: AtomicUsize, buf_allocated: AtomicUsize, - kstacks_allocated: AtomicUsize, - heap_allocated: AtomicUsize, } pub const KSTACK_SIZE: usize = 0x1_0000 / 8; @@ -158,7 +145,7 @@ impl Tasking { kernel_stack.push(0xFFF_FF80_0000 + (16 * 4096)); kernel_stack.push(entry_point.expose_provenance()); let mut kernel_stack = kernel_stack.into_boxed_slice(); - self.kstacks_allocated.fetch_add(KSTACK_SIZE * 8, Ordering::Relaxed); + KERNEL_SPACE.lock().record_alloc(KSTACK_SIZE * 8); address_space.map_assert_unused( #[expect( clippy::unwrap_used, @@ -168,7 +155,6 @@ impl Tasking { 16, PageTableFlags::USER_ACCESSIBLE, )?; - self.kstacks_allocated.fetch_add(16 * 4096, Ordering::Relaxed); let arguments = arguments.iter().map(|arg| (*arg).to_owned()).collect::>(); #[expect( clippy::unwrap_used, @@ -195,7 +181,6 @@ impl Tasking { }; let user_arg_mem = address_space.map_free(args_layout.size() / 4096, PageTableFlags::USER_ACCESSIBLE)?; - self.kstacks_allocated.fetch_add(args_layout.size(), Ordering::Relaxed); address_space.run(|| unsafe { let mut ptr_ptr: *mut *const u8 = user_arg_mem.cast(); for (&offset, argument) in arg_offsets.iter().zip(arguments.iter()) { @@ -226,7 +211,6 @@ impl Tasking { message_queue: Mutex::new(SegQueue::new()), sleeping: RwLock::new(None), arguments: (user_arg_mem.cast(), arguments.len()), - bytes_allocated: AtomicUsize::new(0), }); self.ready_to_run.lock().push_back(pid); Ok(pid) @@ -239,12 +223,11 @@ impl Tasking { || self.ready_to_run.is_locked() || (self.processes.reader_count() > 0) || (self.processes.writer_count() > 0) - || (self.current_pid.writer_count() > 0)) + || KERNEL_SPACE.is_locked()) } pub fn task_yield(&self) { - self.kstacks_allocated - .fetch_sub(self.freeable_kstacks.lock().len() * KSTACK_SIZE * 8, Ordering::Relaxed); + KERNEL_SPACE.lock().record_dealloc(self.freeable_kstacks.lock().len() * KSTACK_SIZE * 8); self.freeable_kstacks.lock().clear(); let Some(current_pid) = *self.current_pid.read() else { return; @@ -266,14 +249,6 @@ impl Tasking { let processes = self.processes.read(); #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let current_process = &processes[current_pid]; - current_process - .bytes_allocated - .fetch_add(self.alloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed); - current_process - .bytes_allocated - .fetch_sub(self.dealloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed); - self.alloc_to_account.store(0, Ordering::Relaxed); - self.dealloc_to_account.store(0, Ordering::Relaxed); #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let next_process = &processes[next_process_pid]; gdt::set_tss_stack(next_process.kernel_esp_top); @@ -312,21 +287,14 @@ impl Tasking { if let Some(current_pid) = *self.current_pid.read() { #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] let current_process = &processes[current_pid]; - current_process - .bytes_allocated - .fetch_add(self.alloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed); - current_process - .bytes_allocated - .fetch_sub(self.dealloc_to_account.load(Ordering::Relaxed), Ordering::Relaxed); - self.alloc_to_account.store(0, Ordering::Relaxed); - self.dealloc_to_account.store(0, Ordering::Relaxed); + let bytes_used = current_process.address_space.as_ref().map_or_else( + || ACTIVE_SPACE.lock().get_bytes_allocated(), + |space| space.get_bytes_allocated(), + ); println!( "[TASKING] PID {current_pid} exiting, used {} ({}), this is being leaked.", - SizeFormatter::new( - current_process.bytes_allocated.load(Ordering::Relaxed), - BINARY - ), - current_process.bytes_allocated.load(Ordering::Relaxed) / 4096 + SizeFormatter::new(bytes_used, BINARY), + bytes_used / 4096 ); self.freeable_kstacks.lock().push(processes.remove(current_pid).kernel_stack); } @@ -424,32 +392,6 @@ impl Tasking { self.processes.read()[self.current_pid.read().unwrap()].arguments } - pub fn record_alloc(&self, size: usize) { - if let Some(pid) = *self.current_pid.read() { - #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] - if self.processes.writer_count() == 0 { - self.processes.read()[pid].bytes_allocated.fetch_add(size, Ordering::Relaxed); - } else { - self.alloc_to_account.fetch_add(size, Ordering::Relaxed); - } - } else { - self.kinit_allocated.fetch_add(size, Ordering::Relaxed); - } - } - - pub fn record_dealloc(&self, size: usize) { - if let Some(pid) = *self.current_pid.read() { - #[warn(clippy::indexing_slicing, reason = "FIXME(?)")] - if self.processes.writer_count() == 0 { - self.processes.read()[pid].bytes_allocated.fetch_sub(size, Ordering::Relaxed); - } else { - self.dealloc_to_account.fetch_add(size, Ordering::Relaxed); - } - } else { - self.kinit_allocated.fetch_sub(size, Ordering::Relaxed); - } - } - pub fn record_buf_alloc(&self, size: usize) { self.buf_allocated.fetch_add(size, Ordering::Relaxed); } @@ -458,42 +400,24 @@ impl Tasking { self.buf_allocated.fetch_sub(size, Ordering::Relaxed); } - pub fn record_heap_alloc(&self, size: usize) { - self.heap_allocated.fetch_add(size, Ordering::Relaxed); - } - - pub fn record_heap_dealloc(&self, size: usize) { - self.heap_allocated.fetch_sub(size, Ordering::Relaxed); - } - pub fn print_stats(&self) { - let mut total = self.kinit_allocated.load(Ordering::Relaxed) - + self.buf_allocated.load(Ordering::Relaxed) - + self.kstacks_allocated.load(Ordering::Relaxed) - + self.heap_allocated.load(Ordering::Relaxed); + let mut total = + KERNEL_SPACE.lock().get_bytes_allocated() + self.buf_allocated.load(Ordering::Relaxed); println!( - "[TASKING] Kernel init used {}", - SizeFormatter::new(self.kinit_allocated.load(Ordering::Relaxed), BINARY) + "[TASKING] Kernel misc used {}", + SizeFormatter::new(KERNEL_SPACE.lock().get_bytes_allocated(), BINARY) ); println!( "[TASKING] Kernel buffers used {}", SizeFormatter::new(self.buf_allocated.load(Ordering::Relaxed), BINARY) ); - println!( - "[TASKING] Kernel stacks used {}", - SizeFormatter::new(self.kstacks_allocated.load(Ordering::Relaxed), BINARY) - ); - println!( - "[TASKING] Kernel heap used {}", - SizeFormatter::new(self.heap_allocated.load(Ordering::Relaxed), BINARY) - ); for (i, process) in self.processes.read().iter() { - total += process.bytes_allocated.load(Ordering::Relaxed); - println!( - "[TASKING] PID {} used {}", - i, - SizeFormatter::new(process.bytes_allocated.load(Ordering::Relaxed), BINARY) + let bytes_used = process.address_space.as_ref().map_or_else( + || ACTIVE_SPACE.lock().get_bytes_allocated(), + |space| space.get_bytes_allocated(), ); + total += bytes_used; + println!("[TASKING] PID {} used {}", i, SizeFormatter::new(bytes_used, BINARY)); } println!("[TASKING] Total used {} ({})", SizeFormatter::new(total, BINARY), total / 4096); } diff --git a/src/virtual_memory.rs b/src/virtual_memory.rs index b1b8dd1..2b007c7 100644 --- a/src/virtual_memory.rs +++ b/src/virtual_memory.rs @@ -1,4 +1,7 @@ -use crate::{bootinfo::BOOTINFO, physical_memory::PHYSICAL_MEMORY, tasking::TASKING}; +use crate::{ + bootinfo::BOOTINFO, + physical_memory::{PhysicalMemory, PHYSICAL_MEMORY}, +}; use alloc::alloc::{AllocError, Allocator, Layout}; use cast::{u64, usize}; use core::{ @@ -6,6 +9,7 @@ use core::{ ops::Deref, ptr::{self, NonNull}, slice, + sync::atomic::{AtomicUsize, Ordering}, }; use replace_with::replace_with_or_abort; use spin::{Lazy, Mutex}; @@ -27,6 +31,7 @@ pub struct AddressSpace { /// kernel allocator to allocate user accessible pages pub alloc_force_user: bool, pub mapper: OffsetPageTable<'static>, + bytes_allocated: AtomicUsize, } impl fmt::Debug for AddressSpace { @@ -35,7 +40,7 @@ impl fmt::Debug for AddressSpace { .field("is_kernel", &self.is_kernel) .field("alloc_force_user", &self.alloc_force_user) .field("level_4_table", &ptr::from_ref(&self.mapper.level_4_table())) - .finish() + .finish_non_exhaustive() } } @@ -144,6 +149,28 @@ impl Deref for ASpaceMutex { } } +struct PmemCountingWrapper<'a> { + pmem: &'a mut PhysicalMemory, + count: usize, +} + +impl<'a> PmemCountingWrapper<'a> { + fn new(pmem: &'a mut PhysicalMemory) -> Self { + Self { pmem, count: 0 } + } +} + +unsafe impl FrameAllocator for PmemCountingWrapper<'_> { + fn allocate_frame(&mut self) -> Option { + if let Some(frame) = self.pmem.allocate_frame() { + self.count += 1; + Some(frame) + } else { + None + } + } +} + pub static KERNEL_SPACE: Lazy = Lazy::new(|| { // SAFETY: Cr3 must point to a valid table otherwise the system would triple fault, so // we know it is safe to turn the pointer to it into a reference. @@ -151,6 +178,7 @@ pub static KERNEL_SPACE: Lazy = Lazy::new(|| { for i in 0..256 { table[i].set_addr(PhysAddr::new(0), PageTableFlags::empty()); } + let mut num_pts_alloced = 0; for i in 256..512 { if table[i].flags().contains(PageTableFlags::PRESENT) { let new_flags = @@ -167,7 +195,7 @@ pub static KERNEL_SPACE: Lazy = Lazy::new(|| { )] let (new_child, new_child_phys) = alloc_pt().expect("Could not allocate new kernel entry"); - TASKING.record_alloc(4096); + num_pts_alloced += 1; new_child.write(PageTable::new()); new_child_phys }, @@ -179,6 +207,7 @@ pub static KERNEL_SPACE: Lazy = Lazy::new(|| { } let mut kernel_space = AddressSpace::new_with_addr(table); kernel_space.is_kernel = true; + kernel_space.record_alloc(num_pts_alloced * 4096); let l4_virt = VirtAddr::from_ptr(ptr::from_ref(kernel_space.mapper.level_4_table())); #[expect( clippy::unwrap_used, @@ -219,7 +248,9 @@ impl AddressSpace { new_table.copy_from(KERNEL_SPACE.lock().mapper.level_4_table(), 1); &mut *new_table }; - Ok(Self::new_with_addr(new_table)) + let space = Self::new_with_addr(new_table); + space.record_alloc(4096); + Ok(space) } fn new_with_addr(table: &'static mut PageTable) -> Self { @@ -232,6 +263,7 @@ impl AddressSpace { mapper: unsafe { OffsetPageTable::new(table, *PHYS_OFFSET) }, alloc_force_user: false, is_kernel: false, + bytes_allocated: AtomicUsize::new(0), } } @@ -382,6 +414,8 @@ impl AddressSpace { }, }) { unsafe { + let mut phys_mem = PHYSICAL_MEMORY.lock(); + let mut pmem_wrap = PmemCountingWrapper::new(&mut phys_mem); self.mapper .map_to_with_table_flags( page, @@ -390,9 +424,10 @@ impl AddressSpace { PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE, - &mut *PHYSICAL_MEMORY.lock(), + &mut pmem_wrap, )? .flush(); + self.record_alloc(pmem_wrap.count * 4096); } } Ok(page.start_address().as_mut_ptr()) @@ -433,6 +468,7 @@ impl AddressSpace { unsafe { let mut phys_mem = PHYSICAL_MEMORY.lock(); let frame = phys_mem.allocate_frame().ok_or(PagingError::FrameAllocationFailed)?; + let mut pmem_wrap = PmemCountingWrapper::new(&mut phys_mem); self.mapper .map_to_with_table_flags( page, @@ -448,9 +484,10 @@ impl AddressSpace { PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE, - &mut *phys_mem, + &mut pmem_wrap, )? .flush(); + self.record_alloc(pmem_wrap.count * 4096); } } Ok(page.start_address().as_mut_ptr()) @@ -489,36 +526,10 @@ impl AddressSpace { .allocate_frame_range(num_pages) .ok_or(PagingError::FrameAllocationFailed)?; let phys_start = frame_range.start.start_address().as_u64(); - #[expect( - clippy::arithmetic_side_effects, - reason = "check_request_valid guarentees this won't overflow" - )] - for (page, frame) in - (PageRange { start: page, end: page + u64(num_pages) }).zip(frame_range) - { - unsafe { - let mut phys_mem = PHYSICAL_MEMORY.lock(); - self.mapper - .map_to_with_table_flags( - page, - frame, - flags - | PageTableFlags::PRESENT - | PageTableFlags::WRITABLE - | if self.alloc_force_user { - PageTableFlags::USER_ACCESSIBLE - } else { - PageTableFlags::empty() - }, - PageTableFlags::PRESENT - | PageTableFlags::WRITABLE - | PageTableFlags::USER_ACCESSIBLE, - &mut *phys_mem, - )? - .flush(); - } + unsafe { + self.map_to(page, frame_range.start, num_pages, flags | PageTableFlags::WRITABLE) + .map(|ptr| (ptr, phys_start)) } - Ok((page.start_address().as_mut_ptr(), phys_start)) } /// Maps new virtual pages and returns the starting addresss @@ -549,7 +560,9 @@ impl AddressSpace { ) -> Result<*mut u8, PagingError> { // SAFETY: &mut aliasing is prevented by using free physical frames, and uninitialized // values are prevented by using free virtual pages. - unsafe { self.map(self.find_free_pages(num_pages)?, num_pages, flags) } + let ptr = unsafe { self.map(self.find_free_pages(num_pages)?, num_pages, flags)? }; + self.record_alloc(num_pages * 4096); + Ok(ptr) } /// Maps new virtual pages to new contigous physical memory @@ -563,7 +576,10 @@ impl AddressSpace { ) -> Result<(*mut u8, u64), PagingError> { // SAFETY: &mut aliasing is prevented by using free physical frames, and uninitialized // values are prevented by using free virtual pages. - unsafe { self.map_cont_phys(self.find_free_pages(num_pages)?, num_pages, flags) } + let ptr = + unsafe { self.map_cont_phys(self.find_free_pages(num_pages)?, num_pages, flags)? }; + self.record_alloc(num_pages * 4096); + Ok(ptr) } /// Same behavior as `map`, but asserts that the requested virtual page range is unmapped, and @@ -575,7 +591,9 @@ impl AddressSpace { flags: PageTableFlags, ) -> Result<*mut u8, PagingError> { self.check_request_unmapped(page, num_pages)?; - unsafe { self.map(page, num_pages, flags) } + let ptr = unsafe { self.map(page, num_pages, flags)? }; + self.record_alloc(num_pages * 4096); + Ok(ptr) } /// Same behavior as `map`, but only maps unmapped pages, and @@ -596,12 +614,11 @@ impl AddressSpace { if self.translate_addr(page.start_address()).is_some() { continue; } - if !self.is_kernel { - TASKING.record_alloc(4096); - } + self.record_alloc(4096); unsafe { let mut phys_mem = PHYSICAL_MEMORY.lock(); let frame = phys_mem.allocate_frame().ok_or(PagingError::FrameAllocationFailed)?; + let mut pmem_wrap = PmemCountingWrapper::new(&mut phys_mem); self.mapper .map_to_with_table_flags( page, @@ -610,9 +627,10 @@ impl AddressSpace { PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE, - &mut *phys_mem, + &mut pmem_wrap, )? .flush(); + self.record_alloc(pmem_wrap.count * 4096); } } Ok(page.start_address().as_mut_ptr()) @@ -631,6 +649,7 @@ impl AddressSpace { flush.flush(); } } + self.record_dealloc(num_pages * 4096); Ok(()) } @@ -663,6 +682,18 @@ impl AddressSpace { } Err(PagingError::PageAllocationFailed) } + + pub fn record_alloc(&self, size: usize) { + self.bytes_allocated.fetch_add(size, Ordering::Relaxed); + } + + pub fn record_dealloc(&self, size: usize) { + self.bytes_allocated.fetch_sub(size, Ordering::Relaxed); + } + + pub fn get_bytes_allocated(&self) -> usize { + self.bytes_allocated.load(Ordering::Relaxed) + } } impl Drop for AddressSpace { @@ -735,6 +766,7 @@ unsafe impl Allocator for ASpaceMutex { PageTableFlags::empty() }; let start = space.map_free(size / 4096, flags).map_err(|_| AllocError)?; + space.record_dealloc(size); // don't track allocates with this via the regular methods Ok(unsafe { slice::from_raw_parts_mut(start.cast::(), size) }.into()) } @@ -750,6 +782,7 @@ unsafe impl Allocator for ASpaceMutex { let start_page = Page::from_start_address(VirtAddr::new(u64(ptr.as_ptr().expose_provenance()))).unwrap(); let length = layout.size().div_ceil(4096); + self.0.lock().record_alloc(length * 4096); //don't track allocates with this via the regular #[expect( clippy::unwrap_used, reason = "