kernel/src/gdt.rs

56 lines
1.8 KiB
Rust

use core::ptr::addr_of;
use x86_64::{
instructions::tables::load_tss,
registers::segmentation::{Segment, SegmentSelector, CS, SS},
structures::gdt::{Descriptor, GlobalDescriptorTable},
structures::tss::TaskStateSegment,
VirtAddr,
};
use spin::Lazy;
struct Selectors {
code: SegmentSelector,
data: SegmentSelector,
tss: SegmentSelector,
}
struct GDTAndSelectors {
gdt: GlobalDescriptorTable,
selectors: Selectors,
}
static mut TSS: TaskStateSegment = TaskStateSegment::new();
static GDT: Lazy<GDTAndSelectors> = Lazy::new(|| {
let mut gdt = GlobalDescriptorTable::new();
let code_sel = gdt.append(Descriptor::kernel_code_segment());
let data_sel = gdt.append(Descriptor::kernel_data_segment());
// SAFETY: The TSS is a static and thus a pointer to it will always be valid
let tss_sel = gdt.append(unsafe { Descriptor::tss_segment_unchecked(addr_of!(TSS)) });
gdt.append(Descriptor::user_data_segment());
gdt.append(Descriptor::user_code_segment());
let selectors = Selectors { code: code_sel, data: data_sel, tss: tss_sel };
GDTAndSelectors { gdt, selectors }
});
pub fn init() {
GDT.gdt.load();
// SAFETY: The selectors are always valid due to coming
// from the currently loaded GDT
unsafe {
CS::set_reg(GDT.selectors.code);
SS::set_reg(GDT.selectors.data);
load_tss(GDT.selectors.tss);
};
}
pub fn set_tss_stack(addr: VirtAddr) {
// SAFETY: This is safe as there is no other way to write to
// the TSS except via this function, and the CPU only reads it
// during a switch to kernel mode. Also, since the kernel is singled-threaded,
// the TSS can never be accessed by multiple threads at the same time (multi-core uses
// different TSS's for each core due to differing kernel stacks)
unsafe { TSS.privilege_stack_table[0] = addr };
}