Count allocated and freed bytes in the kernel heap

This commit is contained in:
pjht 2024-07-07 08:01:09 -05:00
parent e79d426fb0
commit 3b157a6573
Signed by: pjht
GPG Key ID: 7B5F6AFBEC7EE78E
2 changed files with 57 additions and 15 deletions

View File

@ -1,50 +1,89 @@
use crate::virtual_memory::KERNEL_SPACE;
use crate::{println, virtual_memory::KERNEL_SPACE};
use core::{
alloc::{GlobalAlloc, Layout},
ptr::NonNull,
sync::atomic::{AtomicUsize, Ordering},
};
use linked_list_allocator::hole::HoleList;
use spin::Mutex;
use x86_64::structures::paging::PageTableFlags;
struct Heap(Mutex<HoleList>);
pub struct Heap {
heap: Mutex<HoleList>,
bytes_alloced: AtomicUsize,
bytes_freed: AtomicUsize,
pmem_size: AtomicUsize,
}
fn format_byte_count(count: usize) -> (f64, &'static str) {
let mut count = count as f64;
let mut prefix = 0;
while count >= 1024.0 {
count /= 1024.0;
prefix += 1;
}
let prefix = ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei"][prefix];
(count, prefix)
}
impl Heap {
pub fn print_stats(&self) {
let (fmtd_alloced, alloced_pfx) =
format_byte_count(self.bytes_alloced.load(Ordering::Relaxed));
let (fmtd_freed, freed_pfx) = format_byte_count(self.bytes_freed.load(Ordering::Relaxed));
let (fmtd_total, total_pfx) = format_byte_count(
self.bytes_alloced.load(Ordering::Relaxed) - self.bytes_freed.load(Ordering::Relaxed),
);
let (fmtd_pmem, pmem_pfx) = format_byte_count(self.pmem_size.load(Ordering::Relaxed));
println!(
"[HEAP] {:2} {}B allocated, {:2} {}B freed ({:2} {}B total)",
fmtd_alloced, alloced_pfx, fmtd_freed, freed_pfx, fmtd_total, total_pfx
);
println!("[HEAP] {:2} {}B physical memory used for heap", fmtd_pmem, pmem_pfx);
}
}
unsafe impl Send for Heap {}
unsafe impl Sync for Heap {}
unsafe impl GlobalAlloc for Heap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut locked_self = self.0.lock();
let ptr = locked_self
let mut locked_self = self.heap.lock();
let (ptr, true_layout) = locked_self
.allocate_first_fit(layout)
.map(|(allocation, _)| allocation.as_ptr())
.map(|(allocation, true_layout)| (allocation.as_ptr(), true_layout))
.unwrap_or_else(|_| {
drop(locked_self);
let num_pages = layout.size().div_ceil(4096) * 2;
unsafe {
self.dealloc(
KERNEL_SPACE.lock().map_free(num_pages, PageTableFlags::empty()).unwrap(),
Layout::from_size_align(num_pages * 4096, 4096).unwrap(),
);
}
self.0
self.pmem_size.fetch_add(num_pages * 4096, Ordering::Relaxed);
let ptr = KERNEL_SPACE.lock().map_free(num_pages, PageTableFlags::empty()).unwrap();
let layout = Layout::from_size_align(num_pages * 4096, 4096).unwrap();
unsafe { self.heap.lock().deallocate(NonNull::new_unchecked(ptr), layout) };
self.heap
.lock()
.allocate_first_fit(layout)
.map(|(allocation, _)| allocation.as_ptr())
.map(|(allocation, true_layout)| (allocation.as_ptr(), true_layout))
.unwrap()
});
assert!((ptr as usize & (layout.align() - 1)) == 0);
self.bytes_alloced.fetch_add(true_layout.size(), Ordering::Relaxed);
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
unsafe { self.0.lock().deallocate(NonNull::new_unchecked(ptr), layout) };
self.bytes_freed.fetch_add(layout.size(), Ordering::Relaxed);
unsafe { self.heap.lock().deallocate(NonNull::new_unchecked(ptr), layout) };
}
}
#[global_allocator]
static HEAP: Heap = Heap(Mutex::new(HoleList::empty()));
pub static HEAP: Heap = Heap {
heap: Mutex::new(HoleList::empty()),
bytes_alloced: AtomicUsize::new(0),
bytes_freed: AtomicUsize::new(0),
pmem_size: AtomicUsize::new(0),
};
#[alloc_error_handler]
fn alloc_error_handler(layout: Layout) -> ! {

View File

@ -1,6 +1,9 @@
use x86_64::instructions::port::Port;
use crate::kernel_heap::HEAP;
pub fn exit_qemu() -> ! {
HEAP.print_stats();
unsafe {
Port::new(0xf4).write(0u32);
}