Initial commit

This commit is contained in:
pjht 2022-11-01 07:24:50 -05:00
commit 1995cf9337
30 changed files with 2122 additions and 0 deletions

10
.cargo/config.toml Normal file
View File

@ -0,0 +1,10 @@
[unstable]
build-std-features = ["compiler-builtins-mem"]
build-std = ["core", "compiler_builtins", "alloc"]
[build]
target = "x86_64-unknown-none.json"
rustflags = ["-C", "link-args=--image-base 0xffff800000000000", "-C", "force-unwind-tables", "-C", "link-arg=/home/pjht/projects/os-rust/kernel/eh_frame.ld"]
[target.'cfg(target_os = "none")']
runner = "./run.sh"

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
target

323
Cargo.lock generated Normal file
View File

@ -0,0 +1,323 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "ahash"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
dependencies = [
"getrandom",
"once_cell",
"version_check",
]
[[package]]
name = "arrayvec"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bit_field"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bootloader"
version = "0.10.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24e13520aa8580a2850fc9f5390dc6753f1062fb66f90e5a61bd5c72b55df731"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crossbeam-queue"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac"
dependencies = [
"cfg-if",
]
[[package]]
name = "elfloader"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a249d6a9d50f3bf5a3cb7bfd75e84989cad89c6c77b5996c8b084e844146ff04"
dependencies = [
"bitflags",
"log",
"xmas-elf",
]
[[package]]
name = "getrandom"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "gimli"
version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d"
[[package]]
name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
"ahash",
]
[[package]]
name = "intrusive-collections"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfe531a7789d7120f3e17d4f3f2cd95f54418ba7354f60b7b622b6644a07888a"
dependencies = [
"memoffset",
]
[[package]]
name = "kernel"
version = "0.1.0"
dependencies = [
"bootloader",
"crossbeam-queue",
"elfloader",
"hashbrown",
"intrusive-collections",
"linked_list_allocator",
"pic8259",
"replace_with",
"slab",
"spin",
"static_assertions",
"tap",
"tar-no-std",
"uart_16550",
"unwinding",
"x86_64",
]
[[package]]
name = "libc"
version = "0.2.137"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89"
[[package]]
name = "linked_list_allocator"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "549ce1740e46b291953c4340adcd74c59bcf4308f4cac050fd33ba91b7168f4a"
dependencies = [
"spinning_top",
]
[[package]]
name = "lock_api"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memoffset"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
[[package]]
name = "pic8259"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24ec21f514e2e16e94649f1d041ca4a7069b512c037ac156360652a775e6229d"
dependencies = [
"x86_64",
]
[[package]]
name = "replace_with"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3a8614ee435691de62bcffcf4a66d91b3594bf1428a5722e79103249a095690"
[[package]]
name = "rustversion"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8"
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "slab"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef"
dependencies = [
"autocfg",
]
[[package]]
name = "spin"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09"
dependencies = [
"lock_api",
]
[[package]]
name = "spinning_top"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75adad84ee84b521fb2cca2d4fd0f1dab1d8d026bda3c5bea4ca63b5f9f9293c"
dependencies = [
"lock_api",
]
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "tap"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "tar-no-std"
version = "0.1.7"
dependencies = [
"arrayvec",
"bitflags",
"log",
]
[[package]]
name = "uart_16550"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b074eb9300ad949edd74c529c0e8d451625af71bb948e6b65fe69f72dc1363d9"
dependencies = [
"bitflags",
"rustversion",
"x86_64",
]
[[package]]
name = "unwinding"
version = "0.1.4"
dependencies = [
"gimli",
]
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "volatile"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3ca98349dda8a60ae74e04fd90c7fb4d6a4fbe01e6d3be095478aa0b76f6c0c"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "x86_64"
version = "0.14.10"
source = "git+https://github.com/pjht/x86_64#5d941e68fa70779e07576d42a9f49bc89afdb9ed"
dependencies = [
"bit_field",
"bitflags",
"rustversion",
"volatile",
]
[[package]]
name = "xmas-elf"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d29b4d8e7beaceb4e77447ba941a7600d23d0319ab52da0461abea214832d5a"
dependencies = [
"zero",
]
[[package]]
name = "zero"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f1bc8a6b2005884962297587045002d8cfb8dcec9db332f4ca216ddc5de82c5"

46
Cargo.toml Normal file
View File

@ -0,0 +1,46 @@
[package]
name = "kernel"
version = "0.1.0"
edition = "2021"
[dependencies]
x86_64 = { git = "https://github.com/pjht/x86_64", features = ["experimental"] }
tar-no-std = { path = "../tar-no-std" }
unwinding = { path = "../unwinding", default-features = false, features = ["personality", "panic", "unwinder", "fde-static", "hide-trace"] }
uart_16550 = "0.2.15"
spin = "0.9.2"
linked_list_allocator = "0.9.1"
elfloader = "0.14.0"
tap = "1.0.1"
replace_with = { version = "0.1.7", default-features = false, features = ["nightly"] }
hashbrown = "0.12.0"
pic8259 = "0.10.2"
bootloader = "0.10.13"
static_assertions = "1.1.0"
crossbeam-queue = { version = "0.3.4", default-features = false, features = ["alloc"] }
slab = { version = "0.4.6", default-features = false }
intrusive-collections = "0.9.4"
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
strip = true
[features]
[package.metadata.bootimage]
run-args = ["-nographic", "-m", "4G", "-device", "isa-debug-exit,iobase=0xf4,iosize=0x04", "--no-reboot", "-hdb", "../ext2.img"]
[package.metadata.bootloader]
map-physical-memory = true
dynamic-range-start = "0xFFFF_8000_0000_0000"
# physical-memory-offset = "0xFFFFFF8000000000"
# kernel-stack-address = "0xFFFFFF7FFFE00000"
# boot-info-address = "0xFFFFFF7FFFC00000"
# kernel-stack-size = 2093056 # 511 pages
[patch.crates-io]
x86_64 = { git = "https://github.com/pjht/x86_64" }

18
build.rs Normal file
View File

@ -0,0 +1,18 @@
use std::process::Command;
fn main() {
println!("cargo:rerun-if-changed=sysroot");
println!("cargo:rerun-if-changed=build.rs");
println!(
"{}",
String::from_utf8(
Command::new("sh")
.arg("-c")
.arg("cd sysroot; tar cvf ../initrd.tar *")
.output()
.expect("failed to execute process")
.stdout
)
.expect("command output not valid utf8")
);
}

2
eh_frame.ld Normal file
View File

@ -0,0 +1,2 @@
__eh_frame = ADDR(.eh_frame);
__etext = ADDR(.text) + SIZEOF(.text);

BIN
initrd.tar Normal file

Binary file not shown.

3
run.sh Executable file
View File

@ -0,0 +1,3 @@
#! /bin/bash
cd ../simple_boot
cargo run -- $@

2
rust-toolchain.toml Normal file
View File

@ -0,0 +1,2 @@
[toolchain]
channel = "nightly"

1
rustfmt.toml Normal file
View File

@ -0,0 +1 @@
use_small_heuristics = "Max"

23
src/bootinfo.rs Normal file
View File

@ -0,0 +1,23 @@
use bootloader::boot_info::BootInfo;
use core::ops::Deref;
use spin::Once;
unsafe impl Sync for BootInfoHolder {}
pub struct BootInfoHolder(Once<&'static BootInfo>);
impl BootInfoHolder {
pub fn set(&self, bootinfo: &'static BootInfo) {
assert!(!self.0.is_completed(), "Boot info can only be set once!");
self.0.call_once(|| bootinfo);
}
}
impl Deref for BootInfoHolder {
type Target = &'static BootInfo;
fn deref(&self) -> &Self::Target {
self.0.get().expect("Boot info used before initialization!")
}
}
pub static BOOTINFO: BootInfoHolder = BootInfoHolder(Once::new());

61
src/gdt.rs Normal file
View File

@ -0,0 +1,61 @@
use x86_64::{
instructions::tables::load_tss,
registers::segmentation::{Segment, SegmentSelector, CS, SS},
structures::gdt::{Descriptor, GlobalDescriptorTable},
structures::tss::TaskStateSegment,
VirtAddr,
};
use spin::Lazy;
#[allow(unused)]
struct Selectors {
code_sel: SegmentSelector,
data_sel: SegmentSelector,
user_data_sel: SegmentSelector,
user_code_sel: SegmentSelector,
tss_sel: SegmentSelector,
}
struct GDTAndSelectors {
gdt: GlobalDescriptorTable,
selectors: Selectors,
}
static mut TSS: TaskStateSegment = TaskStateSegment::new();
static GDT: Lazy<GDTAndSelectors> = Lazy::new(|| {
let mut gdt = GlobalDescriptorTable::new();
let selectors = Selectors {
code_sel: gdt.add_entry(Descriptor::kernel_code_segment()),
data_sel: gdt.add_entry(Descriptor::kernel_data_segment()),
// SAFETY: This might actually be unsafe, as we are passing in a
// shared reference to the TSS but we can mutate it via `set_tss_stack`,
// but since it immediately turns the ref into a pointer, and the CPU never
// reads the TSS outside of ring switches, it should be safe.
tss_sel: gdt.add_entry(Descriptor::tss_segment(unsafe { &TSS })),
user_data_sel: gdt.add_entry(Descriptor::user_data_segment()),
user_code_sel: gdt.add_entry(Descriptor::user_code_segment()),
};
GDTAndSelectors { gdt, selectors }
});
pub fn init() {
GDT.gdt.load();
// SAFETY: The selectors are always valid due to coming
// from the currently loaded GDT
unsafe {
CS::set_reg(GDT.selectors.code_sel);
SS::set_reg(GDT.selectors.data_sel);
load_tss(GDT.selectors.tss_sel);
};
}
#[allow(unused)]
pub fn set_tss_stack(addr: VirtAddr) {
// SAFETY: This is safe as there is no other way to write to
// the TSS except via this function, and the CPU only reads it
// during a switch to kernel mode. Also, since the kernel is singled-threaded,
// the TSS can never be accessed by multiple threads at the same time (multi-core uses
// different TSS's for each core due to differing kernel stacks)
unsafe { TSS.privilege_stack_table[0] = addr };
}

397
src/interrupts.rs Normal file
View File

@ -0,0 +1,397 @@
use crate::{
print, println,
virtual_memory::{ASpaceMutex, AddressSpace, ACTIVE_SPACE, KERNEL_SPACE},
INITRD, TASKING,
};
use alloc::{boxed::Box, vec::Vec};
use core::{arch::asm, ptr::addr_of, str};
use hashbrown::HashMap;
use pic8259::ChainedPics;
use spin::{Lazy, Mutex, RwLock};
use tap::Tap;
use x86_64::{
registers::control::Cr2,
set_general_handler,
structures::{
idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode},
paging::{mapper::TranslateResult, Page, PageTableFlags, Translate},
},
PrivilegeLevel, VirtAddr,
};
const IRQ_BASE: u8 = 32;
const INTERRUPT_NAMES: [&str; 32] = [
"Divide By Zero",
"Debug",
"NMI",
"Breakpoint",
"Overflow",
"Bound Range Exceeded",
"Invalid Opcode",
"FPU Disabled",
"Double Fault",
"",
"Invalid TSS",
"Segment Not Present",
"Stack Segment Fault",
"General Protection Fault",
"Page Fault",
"",
"x87 FPU Exception",
"Alignment Check",
"Machine Check",
"SIMD FPU Exception",
"Virtualization Exception",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"Security Exception",
"",
];
static IDT: Lazy<InterruptDescriptorTable> = Lazy::new(|| {
let mut idt = InterruptDescriptorTable::new();
set_general_handler!(&mut idt, general_handler);
set_general_handler!(&mut idt, exc_handler, 0..32);
set_general_handler!(&mut idt, irq_handler, 32..48);
idt[0x80].set_handler_fn(syscall_handler_header).set_privilege_level(PrivilegeLevel::Ring3);
idt.page_fault.set_handler_fn(page_fault_handler);
idt
});
static PICS: Mutex<ChainedPics> = Mutex::new(unsafe { ChainedPics::new(IRQ_BASE, IRQ_BASE + 8) });
static IRQ_HANDLERS: RwLock<[Option<IrqHandler>; 16]> = RwLock::new([None; 16]);
pub type IrqHandler = fn(irq_num: u8, eoi_guard: EoiGuard);
#[derive(Debug)]
pub struct InvalidIrq;
pub fn init() {
IDT.load();
unsafe { PICS.lock().initialize() };
x86_64::instructions::interrupts::enable();
}
extern "x86-interrupt" fn page_fault_handler(
stack_frame: InterruptStackFrame,
error_code: PageFaultErrorCode,
) {
if error_code.contains(PageFaultErrorCode::PROTECTION_VIOLATION) {
panic!(
"Got Page Fault {error_code:#?} at {:#x}\nEntry flags: {:#?}\n{stack_frame:#?}",
Cr2::read(),
match ACTIVE_SPACE.lock().translate(Cr2::read()) {
TranslateResult::Mapped { flags, .. } => flags,
_ => {
panic!();
}
},
);
} else {
panic!("Got Page Fault {error_code:#?} at {:#x}\n{stack_frame:#?}", Cr2::read(),);
}
}
fn general_handler(stack_frame: InterruptStackFrame, index: u8, _error_code: Option<u64>) {
println!("Other interrupt {index}\n{stack_frame:#?}");
}
fn exc_handler(stack_frame: InterruptStackFrame, index: u8, error_code: Option<u64>) {
if let Some(error_code) = error_code {
panic!(
"Got exception {} with error code {error_code}\n{stack_frame:#?}",
INTERRUPT_NAMES[index as usize]
);
} else {
panic!("Got exception {}\n{stack_frame:#?}", INTERRUPT_NAMES[index as usize]);
};
}
pub struct EoiGuard(u8);
impl Drop for EoiGuard {
fn drop(&mut self) {
unsafe {
PICS.lock().notify_end_of_interrupt(self.0);
}
}
}
fn irq_handler(_stack_frame: InterruptStackFrame, index: u8, _error_code: Option<u64>) {
let irq_num = index - IRQ_BASE;
let eoi_guard = EoiGuard(index);
if let Some(handler) = IRQ_HANDLERS.read()[irq_num as usize] {
handler(irq_num, eoi_guard);
};
}
#[repr(C)]
#[derive(Debug)]
struct SyscallRegs {
rax: u64,
rbx: u64,
rcx: u64,
rdx: u64,
rsi: u64,
rdi: u64,
rsp: u64,
rbp: u64,
r8: u64,
r9: u64,
r10: u64,
r11: u64,
r12: u64,
r13: u64,
r14: u64,
r15: u64,
}
#[no_mangle]
static mut SYSCALL_REGS: SyscallRegs = SyscallRegs {
rax: 0,
rbx: 0,
rcx: 0,
rdx: 0,
rsi: 0,
rdi: 0,
rsp: 0,
rbp: 0,
r8: 0,
r9: 0,
r10: 0,
r11: 0,
r12: 0,
r13: 0,
r14: 0,
r15: 0,
};
#[naked]
extern "x86-interrupt" fn syscall_handler_header(stack_frame: InterruptStackFrame) {
unsafe {
asm!(
"mov [rip+SYSCALL_REGS], rax",
"mov [rip+SYSCALL_REGS+8], rbx",
"mov [rip+SYSCALL_REGS+16], rcx",
"mov [rip+SYSCALL_REGS+24], rdx",
"mov [rip+SYSCALL_REGS+32], rsi",
"mov [rip+SYSCALL_REGS+40], rdi",
"mov [rip+SYSCALL_REGS+48], rsp",
"mov [rip+SYSCALL_REGS+56], rbp",
"mov [rip+SYSCALL_REGS+64], r8",
"mov [rip+SYSCALL_REGS+72], r9",
"mov [rip+SYSCALL_REGS+80], r10",
"mov [rip+SYSCALL_REGS+88], r11",
"mov [rip+SYSCALL_REGS+96], r12",
"mov [rip+SYSCALL_REGS+104], r13",
"mov [rip+SYSCALL_REGS+112], r14",
"mov [rip+SYSCALL_REGS+120], r15",
"jmp syscall_handler",
options(noreturn)
)
}
}
fn get_buffer(id: u64) -> Option<Box<[u8], &'static ASpaceMutex>> {
TASKING
.lock()
.data_buffers_mut()
.try_remove(id as usize)
.map(|buf| unsafe { Box::from_raw_in(buf, &*KERNEL_SPACE) })
}
static REGISTERD_PIDS: Lazy<RwLock<HashMap<u64, u64>>> = Lazy::new(|| RwLock::new(HashMap::new()));
#[no_mangle]
#[allow(clippy::unit_arg)]
extern "C" fn syscall_handler() {
let regs = unsafe { &SYSCALL_REGS };
let mut retval = 0;
let mut retval2 = regs.rcx;
let mut retval3 = regs.rdx;
match regs.rax {
0 => {
retval = if let Some(chr) = char::from_u32(regs.rcx as u32) {
print!("{}", chr);
0
} else {
1
}
}
1 => TASKING.lock().exit(),
2 => {
retval = if regs.rcx == 0 {
ACTIVE_SPACE
.lock()
.map_free(regs.rdx as usize, PageTableFlags::from_bits_truncate(regs.rsi))
.map_or(0, |x| x as u64)
} else if let Some(space) =
TASKING.lock().address_spaces_mut().get_mut(regs.rcx as usize)
{
space
.map_free(regs.rdx as usize, PageTableFlags::from_bits_truncate(regs.rsi))
.map_or(0, |x| x as u64)
} else {
0
}
}
3 => {
let initrd = Box::leak(
Vec::with_capacity_in(INITRD.len(), &*ACTIVE_SPACE)
.tap_mut(|v| v.extend_from_slice(INITRD))
.into_boxed_slice(),
);
retval = addr_of!(initrd[0]) as u64;
retval2 = initrd.len() as u64;
}
4 => {
retval =
TASKING.lock().address_spaces_mut().insert(AddressSpace::new().unwrap()) as u64;
}
5 => {
TASKING.lock().address_spaces_mut().remove(regs.rcx as usize);
}
6 => {
let page = Page::from_start_address(VirtAddr::new(regs.rdx)).unwrap();
let num_pages = regs.rsi as usize;
let flags = PageTableFlags::from_bits_truncate(regs.rdi);
retval = if regs.rcx == 0 {
ACTIVE_SPACE.lock().map_assert_unused(page, num_pages, flags)
} else {
TASKING
.lock()
.address_spaces_mut()
.get_mut(regs.rcx as usize)
.unwrap()
.map_assert_unused(page, num_pages, flags)
}
.is_err() as u64;
}
7 => {
if let Some(buffer) = get_buffer(regs.rdx) {
let len = regs.rdi;
assert!(len <= buffer.len() as u64);
// let aligned_address = regs.rsi & !0xFFF;
// let aligned_len = len as u64 + (regs.rsi - aligned_address);
let mut tasking = TASKING.lock();
let space = tasking.address_spaces_mut().get_mut(regs.rcx as usize).unwrap();
// space
// .map_assert_unused(
// Page::from_start_address(VirtAddr::new(aligned_address)).unwrap(),
// aligned_len as usize / 4096 + if aligned_len % 4096 > 0 { 1 } else { 0 },
// PageTableFlags::USER_ACCESSIBLE | PageTableFlags::WRITABLE,
// )
// .unwrap();
space.run(|| unsafe { (regs.rsi as *mut u8).copy_from(&buffer[0], len as usize) });
retval = 0;
} else {
retval = 1;
}
}
8 => {
let space = TASKING.lock().address_spaces_mut().remove(regs.rdx as usize);
let res = TASKING.lock().new_process(regs.rcx as *const _, space);
if let Ok(pid) = res {
retval = 0;
retval2 = pid as u64;
} else {
retval = 1;
}
}
9 => {
REGISTERD_PIDS.write().insert(regs.rcx, TASKING.lock().current_pid().unwrap() as u64);
}
10 => {
let id = REGISTERD_PIDS.read().get(&regs.rcx).copied();
if let Some(id) = id {
retval = 0;
retval2 = id;
} else {
retval = 1;
}
}
11 => {
if let Some(buffer) = get_buffer(regs.rdx) {
assert!(regs.rsi as usize <= buffer.len());
let mut tasking = TASKING.lock();
if let Some(_queue) = tasking.message_queue_mut(regs.rcx as usize) {
let len = regs.rsi as usize;
let buffer = Box::into_raw(buffer);
let new_buffer_key =
tasking.proc_data_buffers_mut(regs.rcx as usize).insert(buffer);
let queue = tasking.message_queue_mut(regs.rcx as usize).unwrap();
queue.push((new_buffer_key, len));
retval = 0;
} else {
retval = 1;
}
} else {
retval = 1;
}
}
12 => {
let mut tasking = TASKING.lock();
if let Some(msg) = tasking.current_message_queue_mut().pop() {
retval2 = msg.1 as u64;
retval = *tasking.data_buffers_mut().get(msg.0).unwrap() as *const u8 as u64;
retval3 = msg.0 as u64;
} else {
retval = 0;
}
}
13 => {
retval = TASKING.lock().current_pid().unwrap() as u64;
}
14 => {
// AVAILABLE
}
15 => {
get_buffer(regs.rcx);
}
16 => {
let size = regs.rcx as usize;
let rounded_size = size + (4096 - (size % 4096));
KERNEL_SPACE.lock().alloc_force_user = true;
let buffer = Box::into_raw(
Vec::with_capacity_in(rounded_size, &*KERNEL_SPACE)
.tap_mut(|v| v.resize(rounded_size, 0))
.into_boxed_slice(),
);
KERNEL_SPACE.lock().alloc_force_user = false;
retval = TASKING.lock().data_buffers_mut().insert(buffer) as u64;
retval2 = buffer as *mut u8 as u64;
retval3 = rounded_size as u64;
}
_ => (),
};
unsafe {
asm!(
"mov rbx, [rip+SYSCALL_REGS+8]",
"mov rsi, [rip+SYSCALL_REGS+32]",
"mov rdi, [rip+SYSCALL_REGS+40]",
"mov rsp, [rip+SYSCALL_REGS+48]",
"mov rbp, [rip+SYSCALL_REGS+56]",
"mov r8, [rip+SYSCALL_REGS+64]",
"mov r9, [rip+SYSCALL_REGS+72]",
"mov r10, [rip+SYSCALL_REGS+80]",
"mov r11, [rip+SYSCALL_REGS+88]",
"mov r12, [rip+SYSCALL_REGS+96]",
"mov r13, [rip+SYSCALL_REGS+104]",
"mov r14, [rip+SYSCALL_REGS+112]",
"mov r15, [rip+SYSCALL_REGS+120]",
"iretq",
in("rax") retval, in("rcx") retval2, in("rdx") retval3, options(noreturn)
)
}
}
pub fn register_handler(irq: u8, handler: IrqHandler) -> Result<(), InvalidIrq> {
*(IRQ_HANDLERS.write().get_mut(irq as usize).ok_or(InvalidIrq)?) = Some(handler);
Ok(())
}

15
src/kernel_heap.rs Normal file
View File

@ -0,0 +1,15 @@
use crate::virtual_memory::KERNEL_SPACE;
use alloc::{alloc::Layout, boxed::Box};
use linked_list_allocator::LockedHeap;
#[global_allocator]
static HEAP: LockedHeap = LockedHeap::empty();
pub fn init() {
HEAP.lock().init_from_slice(Box::leak(Box::new_uninit_slice_in(131_072 * 4, &*KERNEL_SPACE)));
}
#[alloc_error_handler]
fn alloc_error_handler(layout: Layout) -> ! {
panic!("allocation error: {:?}", layout)
}

60
src/main.rs Normal file
View File

@ -0,0 +1,60 @@
#![no_std]
#![no_main]
#![feature(abi_x86_interrupt)]
#![feature(alloc_error_handler)]
#![feature(allocator_api)]
#![feature(naked_functions)]
#![feature(int_roundings)]
#![feature(type_alias_impl_trait)]
#![deny(unsafe_op_in_unsafe_fn)]
extern crate alloc;
mod bootinfo;
mod gdt;
mod interrupts;
mod kernel_heap;
mod panic_handler;
mod physical_memory;
mod pit;
mod qemu_exit;
mod serial;
mod simple_loader;
mod start;
mod tasking;
mod virtual_memory;
use simple_loader::SimpleLoader;
use tasking::TASKING;
pub static INITRD: &[u8] = include_bytes!("../initrd.tar");
use elfloader::ElfBinary;
use tar_no_std::TarArchiveRef;
use x86_64::registers::rflags::{self, RFlags};
pub fn main() {
let mut rflags_data = rflags::read();
rflags_data |= RFlags::IOPL_HIGH | RFlags::IOPL_LOW;
unsafe {
rflags::write(rflags_data);
}
gdt::init();
interrupts::init();
kernel_heap::init();
pit::init(100);
let initrd = TarArchiveRef::new(INITRD);
let init = ElfBinary::new(
initrd
.entries()
.find(|x| x.filename() == *"bin/init")
.expect("Could not find init in initrd")
.data(),
)
.expect("Init not an ELF binary");
TASKING
.lock()
.new_process(
init.entry_point() as _,
SimpleLoader::load(&init).expect("Failed to load init"),
)
.expect("Failed to create init process");
}

37
src/panic_handler.rs Normal file
View File

@ -0,0 +1,37 @@
#[cfg(debug_assertions)]
fn print_backtrace() {
use core::ffi::c_void;
use unwinding::abi::{UnwindContext, UnwindReasonCode, _Unwind_Backtrace, _Unwind_GetIP};
extern "C" fn callback(
unwind_ctx: &mut UnwindContext<'_>,
_arg: *mut c_void,
) -> UnwindReasonCode {
println!("{:#x}", _Unwind_GetIP(unwind_ctx));
UnwindReasonCode::NO_REASON
}
_Unwind_Backtrace(callback, core::ptr::null_mut());
}
use crate::{print, println, qemu_exit::exit_qemu, TASKING};
use core::panic::PanicInfo;
#[panic_handler]
fn panic(info: &PanicInfo<'_>) -> ! {
if let Some(tasking) = TASKING.try_lock() {
if let Some(pid) = tasking.current_pid() {
print!("PID {}", pid);
} else {
print!("Kernel Init");
}
} else {
print!("PID Unknown");
}
println!(" {info}");
#[cfg(debug_assertions)]
print_backtrace();
if let Some(mut tasking) = TASKING.try_lock() {
tasking.exit();
} else {
exit_qemu();
}
}

75
src/physical_memory.rs Normal file
View File

@ -0,0 +1,75 @@
use crate::{bootinfo::BOOTINFO, println, virtual_memory::AsVirt};
use bootloader::boot_info::MemoryRegionKind;
use core::mem;
use spin::{Lazy, Mutex};
use tap::Tap;
use x86_64::{
structures::paging::{FrameAllocator, FrameDeallocator, PhysFrame, Size4KiB},
PhysAddr,
};
type FrameIterator = impl Iterator<Item = PhysFrame>;
#[derive(Default)]
struct FrameStack(Option<(&'static mut FrameStack, PhysFrame)>);
pub struct FrameManager {
stack: FrameStack,
iter: FrameIterator,
}
unsafe impl FrameAllocator<Size4KiB> for FrameManager {
fn allocate_frame(&mut self) -> Option<PhysFrame> {
self.iter.next().or_else(|| self.stack.pop())
}
}
impl FrameStack {
/// # Safety
/// The frame must be unique
unsafe fn push(&mut self, frame: PhysFrame) {
self.0 = Some((
unsafe { &mut *frame.as_virt_ptr::<Self>().tap_mut(|x| x.write(mem::take(self))) },
frame,
));
}
fn pop(&mut self) -> Option<PhysFrame> {
let (head, frame) = self.0.take()?;
*self = mem::take(head);
Some(frame)
}
}
impl FrameDeallocator<Size4KiB> for FrameManager {
unsafe fn deallocate_frame(&mut self, frame: PhysFrame) {
unsafe { self.stack.push(frame) }
}
}
pub static PHYSICAL_MEMORY: Lazy<Mutex<FrameManager>> = Lazy::new(|| {
let region_iter =
BOOTINFO.memory_regions.iter().filter(|region| region.kind == MemoryRegionKind::Usable);
let frame_iter = region_iter
.clone()
.flat_map(|region| {
((region.start >> 12) + if region.start & 0xFFF > 0 { 1 } else { 0 })
..(region.end >> 12)
})
.map(|num| PhysFrame::from_start_address(PhysAddr::new(num << 12)).unwrap());
#[allow(clippy::cast_precision_loss)]
let mut mem_size = region_iter
.map(|region| {
4096 * ((region.end >> 12)
- ((region.start >> 12) + if region.start & 0xFFF > 0 { 1 } else { 0 }))
})
.sum::<u64>() as f64;
let mut prefix = 0;
while mem_size >= 1024.0 {
mem_size /= 1024.0;
prefix += 1;
}
let prefix = ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei"][prefix];
println!("[PMM] Initialized, found {:.2} {}B of memory", mem_size, prefix);
Mutex::new(FrameManager { stack: FrameStack(None), iter: frame_iter })
});

42
src/pit.rs Normal file
View File

@ -0,0 +1,42 @@
use crate::{
interrupts::{self, EoiGuard},
println, TASKING,
};
use spin::Mutex;
use x86_64::instructions::port::{Port, PortWriteOnly};
static DATA: Mutex<Port<u8>> = Mutex::new(Port::new(0x40));
static CMD: Mutex<PortWriteOnly<u8>> = Mutex::new(PortWriteOnly::new(0x43));
const MAX_FREQ: u32 = 1_193_180;
pub fn init(mut freq: u32) {
interrupts::register_handler(0, handler).unwrap();
let mut div = MAX_FREQ / freq;
if div > 65535 {
println!("[PIT] Frequency of {}Hz too slow, min freq is 18 Hz", freq);
div = 65535;
freq = 18;
} else if div == 0 {
println!("[PIT] Frequency of {}Hz too fast, max freq is {} Hz", freq, MAX_FREQ);
div = 1;
freq = MAX_FREQ;
}
println!("[PIT] Setting PIT to {}Hz with divisor of {}", freq, div);
// Command breakdown (MSB to LSB):
// 00 - Channel 0
// 11 - lobyte/hibyte access mode - set both bytes in one command
// 011 - Mode 3, sqaure wave generator at frequency MAX_FREQ/div, generates interrupts at that
// frequency
// 0 - binary mode, always used
unsafe { CMD.lock().write(0b0011_0110_u8) };
unsafe { DATA.lock().write((div & 0xFF) as u8) };
unsafe { DATA.lock().write(((div >> 8) & 0xFF) as u8) };
}
fn handler(_irq: u8, eoi_guard: EoiGuard) {
drop(eoi_guard);
if let Some(mut tasking) = TASKING.try_lock() {
tasking.task_yield();
}
}

8
src/qemu_exit.rs Normal file
View File

@ -0,0 +1,8 @@
use x86_64::instructions::port::Port;
pub fn exit_qemu() -> ! {
unsafe {
Port::new(0xf4).write(0u32);
}
unreachable!();
}

57
src/serial.rs Normal file
View File

@ -0,0 +1,57 @@
use core::{fmt, fmt::Write};
use spin::{Lazy, Mutex};
use uart_16550::SerialPort;
pub static PORT: Lazy<Wrapper> = Lazy::new(|| {
// SAFETY:
// 0x3f8 is the defined address for the first serial port on x86_64,
// so we know it is safe to use that as the port's base address.
let mut port = unsafe { SerialPort::new(0x3f8) };
port.init();
Wrapper(Mutex::new(port))
});
pub struct Wrapper(Mutex<SerialPort>);
impl fmt::Write for &Wrapper {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.0.lock().write_str(s)
}
}
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::serial::_print(format_args!($($arg)*)));
}
#[macro_export]
macro_rules! println {
() => ($crate::print!("\n"));
($($arg:tt)*) => ({
$crate::serial::_print(format_args!($($arg)*));
$crate::print!("\n");
})
}
#[macro_export]
macro_rules! dbg {
() => {
$crate::println!("[{}:{}]", file!(), line!())
};
($val:expr $(,)?) => {
match $val {
tmp => {
$crate::println!("[{}:{}] {} = {:#?}",
file!(), line!(), stringify!($val), &tmp);
tmp
}
}
};
($($val:expr),+ $(,)?) => {
($($crate::dbg!($val)),+,)
};
}
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
(&*PORT).write_fmt(args).unwrap();
}

62
src/simple_loader.rs Normal file
View File

@ -0,0 +1,62 @@
use crate::virtual_memory::{AddressSpace, PagingError};
use core::slice;
use elfloader::{ElfBinary, ElfLoader, ElfLoaderErr, Flags, LoadableHeaders, Rela, VAddr, P64};
use x86_64::{
structures::paging::{Page, PageTableFlags},
VirtAddr,
};
#[derive(Debug)]
pub enum LoadError {
Paging(PagingError),
ElfLoader(ElfLoaderErr),
}
impl From<PagingError> for LoadError {
fn from(err: PagingError) -> Self {
Self::Paging(err)
}
}
impl From<ElfLoaderErr> for LoadError {
fn from(err: ElfLoaderErr) -> Self {
Self::ElfLoader(err)
}
}
pub struct SimpleLoader(AddressSpace);
impl SimpleLoader {
pub fn load(binary: &ElfBinary) -> Result<AddressSpace, LoadError> {
let mut loader = Self(AddressSpace::new()?);
binary.load(&mut loader)?;
Ok(loader.0)
}
}
impl ElfLoader for SimpleLoader {
fn allocate(&mut self, load_headers: LoadableHeaders) -> Result<(), ElfLoaderErr> {
for header in load_headers {
let start_page = Page::containing_address(VirtAddr::new(header.virtual_addr()));
#[allow(clippy::cast_possible_truncation)]
self.0
.map_assert_unused(
start_page,
((header.mem_size() + (header.virtual_addr() % 4096)).div_ceil(4096)) as usize,
PageTableFlags::USER_ACCESSIBLE,
)
.expect("Unable to map region");
}
Ok(())
}
fn relocate(&mut self, _entry: &Rela<P64>) -> Result<(), ElfLoaderErr> {
Err(ElfLoaderErr::UnsupportedRelocationEntry)
}
fn load(&mut self, _flags: Flags, base: VAddr, region: &[u8]) -> Result<(), ElfLoaderErr> {
self.0.run(|| unsafe {
slice::from_raw_parts_mut(base as *mut u8, region.len()).copy_from_slice(region);
});
Ok(())
}
}

12
src/start.rs Normal file
View File

@ -0,0 +1,12 @@
use crate::{bootinfo::BOOTINFO, main, tasking::TASKING};
use bootloader::{boot_info::BootInfo, entry_point};
entry_point!(start);
#[allow(clippy::missing_panics_doc)]
#[allow(clippy::missing_errors_doc)]
fn start(bootinfo: &'static mut BootInfo) -> ! {
BOOTINFO.set(bootinfo);
main();
TASKING.lock().exit();
}

205
src/tasking.rs Normal file
View File

@ -0,0 +1,205 @@
use crate::{
gdt, println, qemu_exit,
virtual_memory::{ASpaceMutex, AddressSpace, PagingError, KERNEL_SPACE},
};
use alloc::{boxed::Box, vec::Vec};
use core::{arch::asm, ptr::addr_of};
use crossbeam_queue::SegQueue;
use slab::Slab;
use spin::{Lazy, Mutex};
use x86_64::{instructions::interrupts, structures::paging::PageTableFlags, VirtAddr};
#[naked]
extern "C" fn switch_to_asm(current_stack: *mut *mut usize, next_stack: *mut usize) {
unsafe {
asm!(
"push rbp",
"push rbx",
"push r12",
"push r13",
"push r14",
"push r15",
"mov [rdi], rsp",
"mov rsp, rsi",
"pop r15",
"pop r14",
"pop r13",
"pop r12",
"pop rbx",
"pop rbp",
"ret",
options(noreturn)
);
}
}
#[naked]
extern "C" fn switch_to_asm_exit(next_stack: *mut usize) {
unsafe {
asm!(
"mov rsp, rdi",
"pop r15",
"pop r14",
"pop r13",
"pop r12",
"pop rbx",
"pop rbp",
"ret",
options(noreturn)
);
}
}
#[naked]
extern "C" fn task_init() {
unsafe {
asm!(
"pop rcx",
"pop rbx",
"push 43",
"push rcx",
"pushfq",
"pop rax",
"or rax, 0x200",
"push rax",
"push 51",
"push rbx",
"iretq",
options(noreturn)
)
}
}
extern "C" fn task_force_unlock() {
// SAFETY: This is safe because this function is only ever used once for new tasks, to replace
// the end of the task_yield function that normally unlocks the mutex.
unsafe { TASKING.force_unlock() }
interrupts::enable();
}
#[derive(Debug)]
#[repr(C)]
struct Process {
kernel_stack: Box<[usize], &'static ASpaceMutex>,
kernel_esp: *mut usize,
kernel_esp_top: VirtAddr,
address_space: Option<AddressSpace>,
address_spaces: Slab<AddressSpace>,
data_buffers: Slab<*mut [u8]>,
message_queue: SegQueue<(usize, usize)>,
}
unsafe impl Send for Process {}
pub static TASKING: Lazy<Mutex<Tasking>> = Lazy::new(|| {
Mutex::new(Tasking {
processes: Slab::new(),
ready_to_run: Vec::new(),
current_process: None,
freeable_kstacks: Vec::new(),
})
});
#[derive(Debug)]
pub struct Tasking {
processes: Slab<Process>,
ready_to_run: Vec<usize>,
current_process: Option<usize>,
freeable_kstacks: Vec<Box<[usize], &'static ASpaceMutex>>,
}
impl Tasking {
pub fn new_process(
&mut self,
entry_point: *const extern "C" fn() -> !,
mut address_space: AddressSpace,
) -> Result<usize, PagingError> {
let mut kernel_stack = Vec::new_in(&*KERNEL_SPACE);
kernel_stack.resize(0x1_0000, 0);
let mut kernel_stack = kernel_stack.into_boxed_slice();
kernel_stack[0xFFFF] = entry_point as usize;
kernel_stack[0xFFFE] =
address_space.map_free(16, PageTableFlags::USER_ACCESSIBLE)? as usize + 0x10000;
kernel_stack[0xFFFD] = task_init as usize;
kernel_stack[0xFFFC] = task_force_unlock as usize;
let pid = self.processes.insert(Process {
kernel_esp: &mut kernel_stack[0xFFF6],
kernel_esp_top: VirtAddr::from_ptr(addr_of!(kernel_stack[0xFFFF]).wrapping_add(1)),
kernel_stack,
address_space: Some(address_space),
address_spaces: Slab::new(),
data_buffers: Slab::new(),
message_queue: SegQueue::new(),
});
self.ready_to_run.push(pid);
Ok(pid)
}
pub fn task_yield(&mut self) {
self.freeable_kstacks.clear();
let current_process = match self.current_process {
Some(x) => x,
None => return,
};
if let Some(next_process_pid) = self.ready_to_run.pop() {
let current_address_space = self.processes[next_process_pid]
.address_space
.take()
.expect("Non-current process has active page table")
.activate();
self.processes[current_process].address_space = Some(current_address_space);
let next_process = &self.processes[next_process_pid];
gdt::set_tss_stack(next_process.kernel_esp_top);
self.ready_to_run.push(current_process);
let kernel_esp = next_process.kernel_esp;
let previous_process = self.current_process.replace(next_process_pid).unwrap();
switch_to_asm(&mut (self.processes[previous_process].kernel_esp), kernel_esp);
}
}
pub fn current_pid(&self) -> Option<usize> {
self.current_process
}
pub fn exit(&mut self) -> ! {
if let Some(next_process_pid) = self.ready_to_run.pop() {
if let Some(current_process) = self.current_process {
self.freeable_kstacks.push(self.processes.remove(current_process).kernel_stack);
}
let next_process = &mut self.processes[next_process_pid];
next_process
.address_space
.take()
.expect("Non-current process has active page table")
.activate();
gdt::set_tss_stack(next_process.kernel_esp_top);
let kernel_esp = next_process.kernel_esp;
self.current_process = Some(next_process_pid);
switch_to_asm_exit(kernel_esp);
unreachable!()
} else {
println!("Last process exited, exiting QEMU");
qemu_exit::exit_qemu();
}
}
pub fn address_spaces_mut(&mut self) -> &mut Slab<AddressSpace> {
&mut self.processes[self.current_process.unwrap()].address_spaces
}
pub fn data_buffers_mut(&mut self) -> &mut Slab<*mut [u8]> {
&mut self.processes[self.current_process.unwrap()].data_buffers
}
pub fn proc_data_buffers_mut(&mut self, pid: usize) -> &mut Slab<*mut [u8]> {
&mut self.processes[pid].data_buffers
}
pub fn current_message_queue_mut(&mut self) -> &mut SegQueue<(usize, usize)> {
&mut self.processes[self.current_process.unwrap()].message_queue
}
pub fn message_queue_mut(&mut self, pid: usize) -> Option<&mut SegQueue<(usize, usize)>> {
Some(&mut self.processes.get_mut(pid)?.message_queue)
}
}

481
src/virtual_memory.rs Normal file
View File

@ -0,0 +1,481 @@
mod holes;
use crate::{bootinfo::BOOTINFO, physical_memory::PHYSICAL_MEMORY};
use alloc::alloc::{AllocError, Allocator, Layout};
use core::{fmt, ops::Deref, ptr::NonNull, slice};
use replace_with::replace_with_or_abort;
use spin::{Lazy, Mutex};
use x86_64::{
registers::control::Cr3,
structures::paging::{
frame::PhysFrameRange,
mapper::{MapToError, TranslateResult, UnmapError},
page::PageRange,
FrameAllocator, FrameDeallocator, Mapper, OffsetPageTable, Page, PageTable, PageTableFlags,
PhysFrame, Size4KiB, Translate,
},
PhysAddr, VirtAddr,
};
pub struct AddressSpace {
is_kernel: bool,
/// This field is used by the IPC code to force the
/// kernel allocator to allocate user accessible pages
pub alloc_force_user: bool,
pub mapper: OffsetPageTable<'static>,
}
impl fmt::Debug for AddressSpace {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AddressSpace")
.field("is_kernel", &self.is_kernel)
.field("alloc_force_user", &self.alloc_force_user)
.field("level_4_table", &(self.mapper.level_4_table_immut() as *const PageTable))
.finish()
}
}
#[derive(Debug)]
pub enum PagingError {
RequestInvalid,
ParentEntryHugePage,
PageAllocationFailed,
FrameAllocationFailed,
PageAlreadyMapped,
PagesAlreadyMapped,
PageNotMapped,
InvalidFrameAddress(PhysAddr),
}
impl From<MapToError<Size4KiB>> for PagingError {
fn from(err: MapToError<Size4KiB>) -> Self {
match err {
MapToError::FrameAllocationFailed => Self::FrameAllocationFailed,
MapToError::ParentEntryHugePage => Self::ParentEntryHugePage,
MapToError::PageAlreadyMapped(_) => Self::PageAlreadyMapped,
}
}
}
impl From<UnmapError> for PagingError {
fn from(err: UnmapError) -> Self {
match err {
UnmapError::ParentEntryHugePage => Self::ParentEntryHugePage,
UnmapError::PageNotMapped => Self::PageNotMapped,
UnmapError::InvalidFrameAddress(x) => Self::InvalidFrameAddress(x),
}
}
}
// SAFETY: from_start_address_unchecked requires that the given address is page (4 KiB) aligned,
// which means that the lower 12 bits are zero. We shift the page number left 12 bits, so the lower
// 12 bits will always be zero.
const USER_PAGE_RANGE: PageRange = unsafe {
PageRange {
start: Page::from_start_address_unchecked(VirtAddr::new_truncate(1 << 12)),
end: Page::from_start_address_unchecked(VirtAddr::new_truncate(0x8_0000_0000 << 12)),
}
};
const KERNEL_PAGE_RANGE: PageRange = unsafe {
PageRange {
start: Page::from_start_address_unchecked(VirtAddr::new_truncate(0x8_0000_0000 << 12)),
end: Page::from_start_address_unchecked(VirtAddr::new_truncate(0xF_FFFF_FFFF << 12)),
}
};
pub trait AsVirt {
fn as_virt_ptr<T>(&self) -> *mut T;
unsafe fn as_virt_ref<'a, T>(&self) -> &'a T {
unsafe { &*self.as_virt_ptr() }
}
unsafe fn as_virt_mut<'a, T>(&self) -> &'a mut T {
unsafe { &mut *self.as_virt_ptr() }
}
}
impl AsVirt for PhysFrame {
fn as_virt_ptr<T>(&self) -> *mut T {
self.start_address().as_virt_ptr()
}
}
impl AsVirt for PhysAddr {
fn as_virt_ptr<T>(&self) -> *mut T {
(*PHYS_OFFSET + self.as_u64()).as_mut_ptr()
}
}
static PHYS_OFFSET: Lazy<VirtAddr> =
Lazy::new(|| VirtAddr::new(BOOTINFO.physical_memory_offset.into_option().unwrap()));
pub struct ASpaceMutex(Mutex<AddressSpace>);
impl ASpaceMutex {
pub fn new(space: AddressSpace) -> Self {
Self(Mutex::new(space))
}
}
impl Deref for ASpaceMutex {
type Target = Mutex<AddressSpace>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub static KERNEL_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
// SAFETY: Cr3 must point to a valid table otherwise the system would triple fault, so
// we know it is safe to turn the pointer to it into a reference.
let table = unsafe { Cr3::read().0.as_virt_mut::<PageTable>() };
for i in 0..256 {
table[i].set_addr(PhysAddr::new(0), PageTableFlags::empty());
}
for i in 256..512 {
if table[i].flags().contains(PageTableFlags::PRESENT) {
let new_flags = table[i].flags() & !PageTableFlags::USER_ACCESSIBLE;
table[i].set_flags(new_flags);
} else {
// SAFETY: We initialize the newly allocated table before we make a reference to it, so
// the reference cannot point to uninitialized data.
table[i].set_addr(
unsafe {
let (new_child, new_child_phys) =
alloc_pt().expect("Could not allocate new kernel entry");
new_child.write(PageTable::new());
new_child_phys
},
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
);
}
}
let mut kernel_space = AddressSpace::new_with_addr(table);
kernel_space.is_kernel = true;
kernel_space.mapper.activate();
ASpaceMutex::new(kernel_space)
});
pub static ACTIVE_SPACE: Lazy<ASpaceMutex> = Lazy::new(|| {
let new_space = AddressSpace::new().expect("Could not allocate new user table");
new_space.mapper.activate();
ASpaceMutex::new(new_space)
});
fn alloc_pt() -> Result<(*mut PageTable, PhysAddr), PagingError> {
let frame = PHYSICAL_MEMORY.lock().allocate_frame().ok_or(MapToError::FrameAllocationFailed)?;
Ok((frame.as_virt_ptr(), frame.start_address()))
}
impl AddressSpace {
pub fn new() -> Result<Self, PagingError> {
// SAFETY: We copy the kernel space mappings to the newly allocated table before we make a reference to it, so
// the reference cannot point to uninitialized data.
let new_table = unsafe {
let new_table = alloc_pt()?.0;
new_table.copy_from(KERNEL_SPACE.lock().mapper.level_4_table_immut(), 1);
&mut *new_table
};
Ok(Self::new_with_addr(new_table))
}
fn new_with_addr(table: &'static mut PageTable) -> Self {
// SAFETY: The physical offset is passed by the bootloader, so unless the bootloader is
// buggy (in which case I can't do anything about it), the offset is ok. The requireent
// about a "valid page table hierarchy" doesn't make much sense, as the only issue I can
// see, duplicate mappings for a physical frame, is unsafe to make in the first place and
// so shouldn't have to be guarenteed by us or the direct caller.
Self {
mapper: unsafe { OffsetPageTable::new(table, *PHYS_OFFSET) },
alloc_force_user: false,
is_kernel: false,
}
}
#[allow(unused)]
pub fn activate(self) -> Self {
// This assert should never fire, as the only address space with the is_kernel bool set is
// stored in the KERNEL_SPACE_INTERNAL static, which is a Mutex, and you can never get
// ownership of a value in a mutex unless you own the mutex, and no function owns a static.
assert!(!self.is_kernel);
Lazy::force(&ACTIVE_SPACE);
self.mapper.activate();
core::mem::replace(&mut *ACTIVE_SPACE.lock(), self)
}
fn check_request_valid(&self, start: Page, length: usize) -> Result<(), PagingError> {
if (self.is_kernel && start < KERNEL_PAGE_RANGE.start)
|| (!self.is_kernel
&& (start >= KERNEL_PAGE_RANGE.start)
&& ((start + (length as u64)) >= KERNEL_PAGE_RANGE.start))
{
Err(PagingError::RequestInvalid)
} else {
Ok(())
}
}
#[allow(unused)]
fn check_request_unmapped(&self, start: Page, length: usize) -> Result<(), PagingError> {
for page in Page::range(start, start + length as u64) {
if self.translate_addr(page.start_address()).is_some() {
return Err(PagingError::PagesAlreadyMapped);
}
}
Ok(())
}
/// Runs a provided closure in an address space.
///
/// This function takes ownership of the address space and passes it back after running the
/// closure. This is necessary to prevent the closure fron having access to the address space by
/// both the local variable it was in, and the `ACTIVE_SPACE` static, which would alias mutable
/// references.
///
/// ## Examples
/// ```
/// let address_space = AddressSpace::new();
/// let address_space = address_space.run(|| {/* your closure here */});
/// ```
#[allow(unused)]
pub fn run<F: FnOnce()>(&mut self, func: F) {
replace_with_or_abort(self, |cl_self| {
let old_space = cl_self.activate();
func();
old_space.activate()
});
}
/// Maps virtual pages and returns the starting address.
///
/// ## Safety
///
/// Creating page table mappings is a fundamentally unsafe operation because
/// there are various ways to break memory safety through it. For example,
/// re-mapping an in-use page to a different frame changes and invalidates
/// all values stored in that page, resulting in undefined behavior on the
/// next use.
///
/// The caller must ensure that no undefined behavior or memory safety
/// violations can occur through the new mapping. Among other things, the
/// caller must prevent the following:
///
/// - Aliasing of `&mut` references, i.e. two `&mut` references that point to
/// the same physical address. This is undefined behavior in Rust.
/// - This can be ensured by by making sure all frames in the provided range
/// are not mapped anywhere else.
/// - Creating uninitalized or invalid values: Rust requires that all values
/// have a correct memory layout. For example, a `bool` must be either a 0
/// or a 1 in memory, but not a 3 or 4. An exception is the `MaybeUninit`
/// wrapper type, which abstracts over possibly uninitialized memory.
/// - This is only a problem when re-mapping pages to different physical
/// frames. Mapping pages that are not in use yet is fine.
#[allow(dead_code)]
unsafe fn map_to(
&mut self,
page: Page,
phys_frame: PhysFrame,
num_pages: usize,
flags: PageTableFlags,
) -> Result<*mut u8, PagingError> {
self.check_request_valid(page, num_pages)?;
unsafe {
self.mapper
.map_to_range_with_table_flags(
PageRange { start: page, end: page + num_pages as u64 },
PhysFrameRange { start: phys_frame, end: phys_frame + num_pages as u64 },
flags | PageTableFlags::PRESENT,
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
&mut *PHYSICAL_MEMORY.lock(),
)
.map_err(|(err, flush_range)| {
flush_range.flush_range();
err
})?
.flush_range();
}
Ok(page.start_address().as_mut_ptr())
}
/// Maps virtual pages to newly allocated physical memory and returns the starting address.
/// The newly allocated physical memory contains garbage data, so the mapping will always be writable,
/// as it doesn't make sense to map garbage data read only.
///
/// ## Safety
///
/// Creating page table mappings is a fundamentally unsafe operation because
/// there are various ways to break memory safety through it. For example,
/// re-mapping an in-use page to a different frame changes and invalidates
/// all values stored in that page, resulting in undefined behavior on the
/// next use.
///
/// The caller must ensure that no undefined behavior or memory safety
/// violations can occur through the new mapping by, among other things, preventing
/// creating uninitalized or invalid values. Rust requires that all values
/// have a correct memory layout. For example, a `bool` must be either a 0
/// or a 1 in memory, but not a 3 or 4. An exception is the `MaybeUninit`
/// wrapper type, which abstracts over possibly uninitialized memory.
/// Note: You only have to worry about this when re-mapping pages to
/// different physical frames. Mapping pages that are not in use yet is fine.
unsafe fn map(
&mut self,
page: Page,
num_pages: usize,
flags: PageTableFlags,
) -> Result<*mut u8, PagingError> {
self.check_request_valid(page, num_pages)?;
unsafe {
self.mapper
.map_range_with_table_flags(
PageRange { start: page, end: page + num_pages as u64 },
flags | PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
&mut *PHYSICAL_MEMORY.lock(),
)
.map_err(|(err, flush_range)| {
flush_range.flush_range();
err
})?
.flush_range();
}
Ok(page.start_address().as_mut_ptr())
}
/// Maps new virtual pages and returns the starting addresss
///
/// ## Safety
///
/// The caller must ensure that no undefined behavior or memory safety
/// violations can occur through the new mapping by, among other things, preventing
/// aliasing of `&mut` references, i.e. two `&mut` references that point to the same
/// physical address. This is undefined behavior in Rust. Aliasing can be prevented
/// by making sure all frames in the provided range are not mapped anywhere else.
#[allow(dead_code)]
pub unsafe fn map_free_to(
&mut self,
phys_frame: PhysFrame,
num_pages: usize,
flags: PageTableFlags,
) -> Result<*mut u8, PagingError> {
unsafe { self.map_to(self.find_free_pages(num_pages)?, phys_frame, num_pages, flags) }
}
/// Maps new virtual pages to new physical memory and returns the starting address.
/// The newly allocated physical memory contains garbage data, so the mapping will always be writable,
/// as it doesn't make sense to map garbage data read only.
pub fn map_free(
&mut self,
num_pages: usize,
flags: PageTableFlags,
) -> Result<*mut u8, PagingError> {
// SAFETY: &mut aliasing is prevented by using free physical frames, and uninitialized
// values are prevented by using free virtual pages.
unsafe { self.map(self.find_free_pages(num_pages)?, num_pages, flags) }
}
/// Same behavior as `map`, but asserts that the requested virtual page range is unmapped, and
/// thus is safe.
#[allow(unused)]
pub fn map_assert_unused(
&mut self,
page: Page,
num_pages: usize,
flags: PageTableFlags,
) -> Result<*mut u8, PagingError> {
self.check_request_unmapped(page, num_pages)?;
unsafe { self.map(page, num_pages, flags) }
}
/// Finds a range of free pages and returns the starting page
fn find_free_pages(&self, num_pages: usize) -> Result<Page, PagingError> {
let mut remaining_pages = num_pages;
let range = if self.is_kernel { KERNEL_PAGE_RANGE } else { USER_PAGE_RANGE };
for page in range {
if self.translate_addr(page.start_address()).is_none() {
remaining_pages -= 1;
if remaining_pages == 0 {
return Ok(page + 1 - (num_pages as u64));
}
} else {
remaining_pages = num_pages;
};
}
Err(PagingError::PageAllocationFailed)
}
}
impl Drop for AddressSpace {
fn drop(&mut self) {
drop_table(self.mapper.level_4_table_immut(), 4);
}
}
fn drop_table(table: &PageTable, level: u8) {
for (i, entry) in table.iter().enumerate() {
if level == 4 && i >= 256 {
continue;
}
if entry.flags().contains(PageTableFlags::PRESENT)
&& !entry.flags().contains(PageTableFlags::HUGE_PAGE)
{
if level > 2 {
// SAFETY: The present flag is set on the entry, which means the child frame must
// contain a valid page table, so making a reference to it must be ok.
// Unwrap: from_start_address requires it's input to be 4KiB aligned (have none of its lower 12 bits
// set). The addr method only returns a 4KiB aligned address if the HUGE_PAGE flag is not set.
// In addition, the returned address is only valid if the PRESENT flag is set.
// The if statements has ensured that if we get here, HUGE_FLAG is not set and PRESENT is set.
drop_table(
unsafe { PhysFrame::from_start_address(entry.addr()).unwrap().as_virt_ref() },
level - 1,
);
}
let phys_addr = match ACTIVE_SPACE.lock().translate(VirtAddr::from_ptr(table)) {
TranslateResult::Mapped { frame, .. } => frame.start_address(),
_ => panic!("Refrence must point to mapped page!"),
};
unsafe {
PHYSICAL_MEMORY
.lock()
.deallocate_frame(PhysFrame::from_start_address(phys_addr).unwrap());
};
}
}
}
impl Translate for AddressSpace {
fn translate(&self, addr: VirtAddr) -> TranslateResult {
self.mapper.translate(addr)
}
}
unsafe impl Allocator for ASpaceMutex {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
if layout.align() > 4096 {
return Err(AllocError);
}
let size = layout.size().next_multiple_of(4096);
let mut space = self.0.lock();
let flags = if !space.is_kernel || space.alloc_force_user {
PageTableFlags::USER_ACCESSIBLE
} else {
PageTableFlags::empty()
};
let start = space.map_free(size / 4096, flags).map_err(|_| AllocError)?;
Ok(unsafe { slice::from_raw_parts_mut(start.cast::<u8>(), size) }.into())
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
let start_page = Page::from_start_address(VirtAddr::new(ptr.as_ptr() as u64)).unwrap();
unsafe {
self.0.lock().mapper.unmap_range(
Page::range(start_page, start_page + (layout.size().div_ceil(4096) - 1) as u64),
&mut *PHYSICAL_MEMORY.lock(),
)
}
.unwrap()
.flush_all();
}
}

163
src/virtual_memory/holes.rs Normal file
View File

@ -0,0 +1,163 @@
#![allow(unused)]
use core::{
cell::UnsafeCell,
marker::PhantomData,
mem,
ops::{Deref, DerefMut},
ptr::{addr_of, NonNull},
sync::atomic::{AtomicU16, AtomicU8, Ordering},
};
use intrusive_collections::{
container_of, offset_of, Adapter, DefaultLinkOps, LinkOps, LinkedListLink, PointerOps,
};
use static_assertions::const_assert;
struct Hole {
start: u64,
size: u64,
link: LinkedListLink,
}
unsafe impl Sync for HolePage {}
#[repr(align(4096))]
struct HolePage {
holes: UnsafeCell<[Hole; 127]>,
ptr_bitmap: [AtomicU8; 16],
ptr_counter: AtomicU16,
}
impl Drop for HolePage {
fn drop(&mut self) {
assert!(
self.ptr_counter.load(Ordering::Relaxed) == 0,
"HolePage cannot be dropped when HolePtrs to it exist!"
);
}
}
const_assert!(mem::size_of::<HolePage>() <= 4096);
impl HolePage {
fn get_ptr(&self, idx: u8) -> HolePtr {
let byte = idx / 8;
let bit = idx % 8;
let mask = 1 << bit;
assert!(
self.ptr_bitmap[byte as usize].load(Ordering::Relaxed) & mask != 0,
"Attempted to get a HolePtr to a Hole with an existing pointer"
);
self.ptr_bitmap[byte as usize].fetch_or(mask, Ordering::Relaxed);
self.ptr_counter.fetch_add(1, Ordering::Relaxed);
HolePtr { page: self as *const _, idx }
}
fn get_raw_ptr(&self, idx: u8) -> *const Hole {
assert!(idx < 127);
unsafe { (self.holes.get().cast::<Hole>()).add(idx as usize) }
}
fn get_mut_raw_ptr(&self, idx: u8) -> *mut Hole {
assert!(idx < 127);
unsafe { (self.holes.get().cast::<Hole>()).add(idx as usize) }
}
}
struct HolePtr {
page: *const HolePage,
idx: u8,
}
impl Deref for HolePtr {
type Target = Hole;
fn deref(&self) -> &Self::Target {
unsafe { &*((*self.page).get_raw_ptr(self.idx)) }
}
}
impl DerefMut for HolePtr {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *((*self.page).get_mut_raw_ptr(self.idx)) }
}
}
impl Drop for HolePtr {
fn drop(&mut self) {
let byte = self.idx / 8;
let bit = self.idx % 8;
let mask = !(1 << bit);
let page = unsafe { &*self.page };
page.ptr_bitmap[byte as usize].fetch_and(mask, Ordering::Relaxed);
let old_ctr = page.ptr_counter.fetch_sub(1, Ordering::Relaxed);
if old_ctr - 1 == 0 {
// TODO: Deallocate page
}
}
}
struct HolePtrOps(PhantomData<HolePtr>);
unsafe impl PointerOps for HolePtrOps {
type Value = Hole;
type Pointer = HolePtr;
unsafe fn from_raw(&self, value: *const Self::Value) -> Self::Pointer {
let page = (value as usize & !0xFFFusize) as *const HolePage;
let holes = addr_of!((*page).holes).cast::<Hole>();
let idx = unsafe { value.offset_from(holes) };
HolePtr { page: unsafe { &*page }, idx: idx as u8 }
}
fn into_raw(&self, ptr: Self::Pointer) -> *const Self::Value {
unsafe { &*ptr.page }.get_raw_ptr(ptr.idx)
}
}
struct HoleAdapter {
link_ops: <LinkedListLink as DefaultLinkOps>::Ops,
ptr_ops: HolePtrOps,
}
impl HoleAdapter {
fn new() -> Self {
Self { link_ops: <LinkedListLink as DefaultLinkOps>::NEW, ptr_ops: HolePtrOps(PhantomData) }
}
}
unsafe impl Adapter for HoleAdapter {
type LinkOps = <LinkedListLink as DefaultLinkOps>::Ops;
type PointerOps = HolePtrOps;
unsafe fn get_value(
&self,
link: <Self::LinkOps as LinkOps>::LinkPtr,
) -> *const <Self::PointerOps as PointerOps>::Value {
container_of!(link.as_ptr(), Hole, link)
}
unsafe fn get_link(
&self,
value: *const <Self::PointerOps as PointerOps>::Value,
) -> <Self::LinkOps as LinkOps>::LinkPtr {
// We need to do this instead of just accessing the field directly
// to strictly follow the stack borrow rules.
unsafe {
NonNull::new_unchecked((value.cast::<u8>()).add(offset_of!(Hole, link)) as *mut _)
}
}
fn link_ops(&self) -> &Self::LinkOps {
&self.link_ops
}
fn link_ops_mut(&mut self) -> &mut Self::LinkOps {
&mut self.link_ops
}
fn pointer_ops(&self) -> &Self::PointerOps {
&self.ptr_ops
}
}

3
sysroot/.crates.toml Normal file
View File

@ -0,0 +1,3 @@
[v1]
"init 0.1.0 (path+file:///home/pjht/projects/os-rust/init)" = ["init"]
"test_proc 0.1.0 (path+file:///home/pjht/projects/os-rust/test_proc)" = ["test_proc"]

1
sysroot/.crates2.json Normal file
View File

@ -0,0 +1 @@
{"installs":{"init 0.1.0 (path+file:///home/pjht/projects/os-rust/init)":{"version_req":null,"bins":["init"],"features":[],"all_features":false,"no_default_features":false,"profile":"release","target":"x86_64-unknown-none","rustc":"rustc 1.65.0-nightly (0b79f758c 2022-08-18)\nbinary: rustc\ncommit-hash: 0b79f758c9aa6646606662a6d623a0752286cd17\ncommit-date: 2022-08-18\nhost: x86_64-unknown-linux-gnu\nrelease: 1.65.0-nightly\nLLVM version: 15.0.0\n"},"test_proc 0.1.0 (path+file:///home/pjht/projects/os-rust/test_proc)":{"version_req":null,"bins":["test_proc"],"features":[],"all_features":false,"no_default_features":false,"profile":"release","target":"x86_64-unknown-none","rustc":"rustc 1.65.0-nightly (0b79f758c 2022-08-18)\nbinary: rustc\ncommit-hash: 0b79f758c9aa6646606662a6d623a0752286cd17\ncommit-date: 2022-08-18\nhost: x86_64-unknown-linux-gnu\nrelease: 1.65.0-nightly\nLLVM version: 15.0.0\n"}}}

BIN
sysroot/bin/init Executable file

Binary file not shown.

BIN
sysroot/bin/test_proc Executable file

Binary file not shown.

14
x86_64-unknown-none.json Normal file
View File

@ -0,0 +1,14 @@
{
"llvm-target": "x86_64-unknown-none",
"data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128",
"arch": "x86_64",
"target-endian": "little",
"target-pointer-width": "64",
"target-c-int-width": "32",
"os": "none",
"executables": true,
"linker-flavor": "ld.lld",
"linker": "rust-lld",
"panic-strategy": "abort",
"features": "-mmx,-sse,+soft-float"
}