Physical memory allocation is now done one frame at a time

This commit is contained in:
pjht 2020-08-30 09:37:55 -05:00
parent acde39065c
commit 9198692b5e
3 changed files with 18 additions and 12 deletions

View File

@ -61,6 +61,9 @@ void map_pages(void* virt_addr_ptr,void* phys_addr_ptr,int num_pages,char usr,ch
entry->wr=wr;
}
pg_struct_entry* entry=&page_table_map[table_entry+1024*dir_entry];
if (phys_addr_ptr==NULL) {
phys_addr=(uint32_t)pmem_alloc(1);
}
entry->pgno=phys_addr>>12;
entry->pres=1;
entry->usr=usr;
@ -120,9 +123,8 @@ void* find_free_pages(int num_pages) {
}
void* alloc_pages(int num_pages) {
void* phys_addr=pmem_alloc(num_pages);
void* addr=find_free_pages(num_pages);
map_pages(addr,phys_addr,num_pages,1,1);
map_pages(addr,NULL,num_pages,1,1);
return addr;
}
@ -141,8 +143,7 @@ void* virt_to_phys(void* virt_addr_arg) {
void alloc_pages_virt(int num_pages,void* addr) {
void* phys_addr=pmem_alloc(num_pages);
map_pages(addr,phys_addr,num_pages,1,1);
map_pages(addr,NULL,num_pages,1,1);
}
/**
@ -164,7 +165,7 @@ void* paging_new_address_space() {
entry->pres=1;
entry->wr=1;
entry->pgno=(uint32_t)dir>>12;
unmap_pages(freepg,1);
unmap_pages(freepg,1,0);
return dir;
}
@ -172,7 +173,7 @@ void load_address_space(void* address_space) {
asm volatile("movl %0, %%eax; movl %%eax, %%cr3;":"=m"(address_space)::"%eax");
}
void unmap_pages(void* start_virt,int num_pages) {
void unmap_pages(void* start_virt,int num_pages,int free_phys) {
uint32_t virt_addr=(uint32_t)start_virt;
int dir_entry=(virt_addr&0xFFC00000)>>22;
int table_entry=(virt_addr&0x3FF000)>>12;
@ -180,6 +181,9 @@ void unmap_pages(void* start_virt,int num_pages) {
if (page_table_map[dir_entry].pres) {
pg_struct_entry* entry=&page_table_map[table_entry+1024*dir_entry];
entry->pres=0;
if (free_phys) {
pmem_free((void*)(entry->pgno<<12),1);
}
invl_page(start_virt+(i*4096));
table_entry++;
if (table_entry==1024) {
@ -214,7 +218,6 @@ void paging_init() {
entry->wr=1;
entry->pgno=((uint32_t)entry_virt-0xC0000000)>>12;
}
// page_directory[985]=(uint32_t)(pmem_alloc(1024))|0x83;
for (size_t i=0;i<4;i++) {
uint32_t entry_virt=(uint32_t)&(kmalloc_page_tables[i*1024]);
pg_struct_entry* entry=&page_directory[i+1018];
@ -237,5 +240,5 @@ void* get_address_space() {
void dealloc_pages(int num_pages,void* addr) {
pmem_free((void*)((uint32_t)virt_to_phys(addr)>>12),num_pages);
unmap_pages(addr,num_pages);
unmap_pages(addr,num_pages,1);
}

View File

@ -32,8 +32,9 @@ void map_pages(void* virt_addr_ptr,void* phys_addr_ptr,int num_pages,char usr,ch
* Unmap virtual pages,
* \param start_virt The start of the virtual range to unmap.
* \param num_pages The number of pages to map.
* \param free_phys Also free the physical pages the virtual pages are mapped to.
*/
void unmap_pages(void* start_virt,int num_pages);
void unmap_pages(void* start_virt,int num_pages, int free_phys);
/**
* Allocate virtual pages & map them to newly allocated physical memory.
* \param num_pages The number of pages to allocate.

View File

@ -88,7 +88,7 @@ void* kernel_rpc_call(pid_t pid,char* name,void* buf,size_t size) {
virtaddr=alloc_pages((size/PAGE_SZ)+1);
void* physaddr=virt_to_phys(virtaddr);
memcpy(virtaddr,buf,size);
unmap_pages(virtaddr,(size/PAGE_SZ)+1);
unmap_pages(virtaddr,(size/PAGE_SZ)+1,0);
RUN_IN_ADDRESS_SPACE(tasking_get_address_space(pid),{
virtaddr=find_free_pages((size/PAGE_SZ)+1);
map_pages(virtaddr,physaddr,(size/PAGE_SZ)+1,1,1);
@ -130,7 +130,7 @@ void kernel_rpc_return(void* buf,size_t size) {
virtaddr=alloc_pages((size/PAGE_SZ)+1);
void* physaddr=virt_to_phys(virtaddr);
memcpy(virtaddr,buf,size);
unmap_pages(virtaddr,(size/PAGE_SZ)+1);
unmap_pages(virtaddr,(size/PAGE_SZ)+1,0);
RUN_IN_ADDRESS_SPACE(tasking_get_address_space(pid),{
virtaddr=find_free_pages((size/PAGE_SZ)+1);
map_pages(virtaddr,physaddr,(size/PAGE_SZ)+1,1,1);
@ -153,9 +153,11 @@ void kernel_rpc_mark_as_init() {
tasking_unblock(waiting_thread->waiting_pid,waiting_thread->waiting_tid);
if (waiting_thread==waiting_thread_list) {
waiting_thread_list=waiting_thread_list->next;
} else {
} else if (prev) {
prev->next=waiting_thread->next;
}
kfree(waiting_thread);
} else {
prev=waiting_thread;
}
}