os/kernel/cpu/i386/paging.c

242 lines
7.1 KiB
C
Raw Normal View History

2020-07-25 16:54:37 -05:00
/**
* \file
*/
2020-07-22 19:26:55 -05:00
#include "../../pmem.h"
#include "../../vga_err.h"
2020-07-12 13:47:17 -05:00
#include "../halt.h"
2020-07-22 19:35:23 -05:00
#include "../paging.h"
2020-07-22 19:26:55 -05:00
#include "arch_consts.h"
2020-07-22 19:35:23 -05:00
#include <stdint.h>
#include <stdlib.h>
2019-03-17 12:22:00 -05:00
2020-07-25 16:54:37 -05:00
/**
* Represents an entry in a page table/directory.
* \note Privlege bits in the page directory and page table entries for a page are ANDed together, so the most restrictive privlege between the page directory and the page table wins.
2020-07-25 16:54:37 -05:00
*/
typedef struct {
int pres:1; //!< Whether the page is present
int wr:1; //!< Whether the page is writeable
int usr:1; //!< Whether the page is accessible by user mode
int cachetype:1; //!< Cache type for the page. Write-through caching when 1, write-back caching when 0.
int cachedisable:1; //!< Whether caching is disabled
int accessed:1; //!< Whether the page has been accessed
int dirty:1; //!< Whether the page is dirty (has been written to)
int sz:1; //!< Page size
int osavail:4; //!< Availible for OS use
int pgno:20; //!< Physical page number this page maps to
} pg_struct_entry;
2020-07-25 16:54:37 -05:00
static pg_struct_entry page_directory[1024] __attribute__((aligned(4096))); //!< The kernel process's page directory
static pg_struct_entry kern_page_tables[NUM_KERN_FRAMES] __attribute__((aligned(4096))); //!< The page tables where the kernel binary is mapped in
static pg_struct_entry kstack_page_tables[218*1024] __attribute__((aligned(4096))); //!< Page tables for thread kernel stacks
static pg_struct_entry kmalloc_page_tables[4*1024] __attribute__((aligned(4096))); //!< Page tables for the kmalloc heap
static pg_struct_entry* pagdirmap=(pg_struct_entry*)0xFFFFF000; //!< Pointer to the page directory entries in the recursive mapping
static pg_struct_entry* page_table_map=(pg_struct_entry*)0xFFC00000; //!< Pointer to the page table entries in the recursive mapping
2020-07-25 16:54:37 -05:00
/**
* Checks whether a page is present
* \param page The page number to check
* \return Whether the page is present
*/
2020-07-23 11:50:23 -05:00
static char is_page_present(size_t page) {
2019-03-11 09:32:55 -05:00
int table=page>>10;
page=page&0x3FF;
if (!pagdirmap[table].pres) {
2019-03-11 09:32:55 -05:00
return 0;
}
return page_table_map[page+1024*table].pres;
2019-02-09 12:52:45 -06:00
}
2019-03-11 09:32:55 -05:00
void map_pages(void* virt_addr_ptr,void* phys_addr_ptr,int num_pages,char usr,char wr) {
2019-02-09 12:52:45 -06:00
uint32_t virt_addr=(uint32_t)virt_addr_ptr;
uint32_t phys_addr=(uint32_t)phys_addr_ptr;
int dir_entry=(virt_addr&0xFFC00000)>>22;
int table_entry=(virt_addr&0x3FF000)>>12;
for (int i=0;i<num_pages;i++) {
if (!pagdirmap[dir_entry].pres) {
pg_struct_entry* entry=&pagdirmap[dir_entry];
entry->pgno=(uint32_t)pmem_alloc(1)>>12;
entry->pres=1;
entry->usr=usr;
entry->wr=wr;
2019-03-11 09:32:55 -05:00
}
pg_struct_entry* entry=&page_table_map[table_entry+1024*dir_entry];
entry->pgno=phys_addr>>12;
entry->pres=1;
entry->usr=usr;
entry->wr=wr;
2019-02-09 12:52:45 -06:00
table_entry++;
if (table_entry==1024) {
table_entry=0;
dir_entry++;
}
2019-02-09 12:52:45 -06:00
phys_addr+=0x1000;
}
}
void* find_free_pages(int num_pages) {
2020-07-23 11:50:23 -05:00
size_t bmap_index;
size_t remaining_blks;
for(size_t i=1;i<131072;i++) {
2019-03-11 09:32:55 -05:00
char got_0=0;
remaining_blks=num_pages;
2020-07-23 11:50:23 -05:00
size_t old_j;
for (size_t j=i*8;;j++) {
2019-03-11 09:32:55 -05:00
char bit=is_page_present(j);
if (got_0) {
if (bit) {
if (remaining_blks==0) {
bmap_index=old_j;
break;
} else {
i+=j/8;
i--;
break;
}
} else {
remaining_blks--;
}
} else {
if (!bit) {
got_0=1;
old_j=j;
remaining_blks--;
}
}
if (remaining_blks==0) {
bmap_index=old_j;
break;
}
}
if (remaining_blks==0) {
break;
}
}
if (remaining_blks!=0) {
vga_write_string("[PANIC] Out of memory");
2020-07-09 11:28:57 -05:00
halt();
2019-03-11 09:32:55 -05:00
}
return (void*)(bmap_index<<12);
2019-05-04 10:52:38 -05:00
}
void* alloc_pages(int num_pages) {
void* phys_addr=pmem_alloc(num_pages);
void* addr=find_free_pages(num_pages);
2019-03-11 09:32:55 -05:00
map_pages(addr,phys_addr,num_pages,1,1);
return addr;
2019-02-09 12:52:45 -06:00
}
2019-05-04 10:42:17 -05:00
void* virt_to_phys(void* virt_addr_arg) {
uint32_t virt_addr=(uint32_t)virt_addr_arg;
2019-05-04 10:49:27 -05:00
int offset=virt_addr&0x3FF;
2019-05-04 10:42:17 -05:00
virt_addr=virt_addr&0xFFFFFC00;
if (!is_page_present(virt_addr>>12)) return NULL;
2019-05-04 10:49:27 -05:00
int dir_idx=(virt_addr&0xFFC00000)>>22;
int tbl_idx=(virt_addr&0x3FFC00)>>12;
if (!pagdirmap[dir_idx].pres) {
2019-05-04 10:42:17 -05:00
return 0;
}
return (void*)(((page_table_map[tbl_idx+1024*dir_idx].pgno)<<12)+offset);
2019-05-04 10:42:17 -05:00
}
2019-02-09 12:52:45 -06:00
2019-03-11 09:32:55 -05:00
void alloc_pages_virt(int num_pages,void* addr) {
void* phys_addr=pmem_alloc(num_pages);
map_pages(addr,phys_addr,num_pages,1,1);
2019-02-09 12:52:45 -06:00
}
2020-07-25 16:54:37 -05:00
/**
* Invalidates a page in the TLB,
* \param addr The address of the page to invalidate.
*/
static void invl_page(void* addr) {
2019-05-06 08:24:30 -05:00
asm volatile("invlpg (%0)"::"r"(addr):"memory");
}
2019-05-05 13:14:14 -05:00
void* paging_new_address_space() {
void* dir=pmem_alloc(1);
pg_struct_entry* freepg=find_free_pages(1);
map_pages(freepg,dir,1,0,1);
2020-07-23 11:50:23 -05:00
for (size_t i=0;i<1024;i++) {
freepg[i]=page_directory[i];
}
pg_struct_entry* entry=&freepg[1023];
entry->pres=1;
entry->wr=1;
entry->pgno=(uint32_t)dir>>12;
unmap_pages(freepg,1);
return dir;
}
void load_address_space(void* address_space) {
asm volatile("movl %0, %%eax; movl %%eax, %%cr3;":"=m"(address_space)::"%eax");
2020-07-22 21:28:00 -05:00
}
2020-07-23 11:50:23 -05:00
void unmap_pages(void* start_virt,int num_pages) {
2019-05-28 16:05:51 -05:00
uint32_t virt_addr=(uint32_t)start_virt;
int dir_entry=(virt_addr&0xFFC00000)>>22;
int table_entry=(virt_addr&0x3FF000)>>12;
2020-07-23 11:50:23 -05:00
for (int i=0;i<=num_pages;i++) {
if (page_table_map[dir_entry].pres) {
pg_struct_entry* entry=&page_table_map[table_entry+1024*dir_entry];
entry->pres=0;
invl_page(start_virt+(i*1024));
2019-05-28 16:05:51 -05:00
table_entry++;
if (table_entry==1024) {
dir_entry++;
table_entry=0;
}
}
}
}
2019-02-09 13:05:13 -06:00
void paging_init() {
2020-07-23 11:50:23 -05:00
for (size_t i=0;i<NUM_KERN_FRAMES;i++) {
pg_struct_entry* entry=&kern_page_tables[i];
entry->pres=1;
entry->wr=1;
entry->pgno=i;
2019-03-11 09:32:55 -05:00
}
2020-07-23 11:50:23 -05:00
for (size_t i=0;i<218*1024;i++) {
pg_struct_entry* entry=&kstack_page_tables[i];
entry->pres=0;
}
2020-07-23 11:50:23 -05:00
for (size_t i=0;i<4*1024;i++) {
pg_struct_entry* entry=&kmalloc_page_tables[i];
entry->pres=1;
entry->wr=1;
entry->pgno=(uint32_t)pmem_alloc(1)>>12;
2019-03-23 07:30:00 -05:00
}
2020-07-23 11:50:23 -05:00
for (size_t i=0;i<NUM_KERN_FRAMES/1024;i++) {
2019-03-11 09:32:55 -05:00
uint32_t entry_virt=(uint32_t)&(kern_page_tables[i*1024]);
pg_struct_entry* entry=&page_directory[i+768];
entry->pres=1;
entry->wr=1;
entry->pgno=((uint32_t)entry_virt-0xC0000000)>>12;
2019-03-11 09:32:55 -05:00
}
// page_directory[985]=(uint32_t)(pmem_alloc(1024))|0x83;
2020-07-23 11:50:23 -05:00
for (size_t i=0;i<4;i++) {
2019-06-22 11:11:12 -05:00
uint32_t entry_virt=(uint32_t)&(kmalloc_page_tables[i*1024]);
pg_struct_entry* entry=&page_directory[i+1018];
entry->pres=1;
entry->wr=1;
entry->pgno=((uint32_t)entry_virt-0xC0000000)>>12;
}
pg_struct_entry* entry=&page_directory[1023];
entry->pres=1;
entry->wr=1;
entry->pgno=((uint32_t)page_directory-0xC0000000)>>12;
2020-07-24 10:50:06 -05:00
load_address_space((uint32_t*)((uint32_t)page_directory-0xC0000000));
2019-02-09 12:52:45 -06:00
}
2020-07-22 19:26:55 -05:00
void* get_address_space() {
void* address_space;
asm volatile("movl %%cr3, %%eax; movl %%eax, %0;":"=m"(address_space)::"%eax");
return address_space;
2020-07-22 19:26:55 -05:00
}
void dealloc_pages(int num_pages,void* addr) {
pmem_free((uint32_t)virt_to_phys(addr)>>12,num_pages);
unmap_pages(addr,num_pages);
}