From 92e9bda9bde223fac326aea4d71daa194b14fbd4 Mon Sep 17 00:00:00 2001 From: pjht Date: Tue, 19 Mar 2024 09:23:45 -0500 Subject: [PATCH] Initial commit --- Tupfile | 7 + Tuprules.tup | 6 + cards.68k | 39 ++++ cards.i | 6 + elf.i | 237 +++++++++++++++++++++ initrd.68k | 154 ++++++++++++++ initrd.i | 17 ++ kernel.ld | 35 ++++ libstd_override.68k | 11 + main.68k | 146 +++++++++++++ pmem.68k | 150 ++++++++++++++ pmem.i | 11 + start.68k | 86 ++++++++ start.i | 5 + syscall.i | 6 + syscalls.68k | 66 ++++++ tasking.68k | 234 +++++++++++++++++++++ tasking.i | 13 ++ term.68k | 35 ++++ term.i | 9 + traps.68k | 271 ++++++++++++++++++++++++ traps.i | 5 + vmem.68k | 491 ++++++++++++++++++++++++++++++++++++++++++++ vmem.i | 51 +++++ 24 files changed, 2091 insertions(+) create mode 100644 Tupfile create mode 100644 Tuprules.tup create mode 100644 cards.68k create mode 100644 cards.i create mode 100644 elf.i create mode 100644 initrd.68k create mode 100644 initrd.i create mode 100644 kernel.ld create mode 100644 libstd_override.68k create mode 100644 main.68k create mode 100644 pmem.68k create mode 100644 pmem.i create mode 100644 start.68k create mode 100644 start.i create mode 100644 syscall.i create mode 100644 syscalls.68k create mode 100644 tasking.68k create mode 100644 tasking.i create mode 100644 term.68k create mode 100644 term.i create mode 100644 traps.68k create mode 100644 traps.i create mode 100644 vmem.68k create mode 100644 vmem.i diff --git a/Tupfile b/Tupfile new file mode 100644 index 0000000..2191fa0 --- /dev/null +++ b/Tupfile @@ -0,0 +1,7 @@ +include_rules + +LDFLAGS = -z max-page-size=4096 --orphan-handling=error -T kernel.ld +ASFLAGS = -m68010 -spaces -Felf -ldots -align -quiet -x -nowarn=62 + +: foreach *.68k | ../ |> vasmm68k_mot $(ASFLAGS) -I../sysroot/usr/include -o %o %f |> %B.o +: *.o | kernel.ld ../ |> m68k-elf-ld $(LDFLAGS) -o %o %f '%' |> kernel diff --git a/Tuprules.tup b/Tuprules.tup new file mode 100644 index 0000000..0461dea --- /dev/null +++ b/Tuprules.tup @@ -0,0 +1,6 @@ +.gitignore +LDFLAGS = -z max-page-size=4096 --orphan-handling=error +ASFLAGS = -m68010 -spaces -Felf -ldots -align -quiet -x -nowarn=62 + +!as = |> vasmm68k_mot $(ASFLAGS) -o %o %f |> %B.o +!ld = |> m68k-elf-ld $(LDFLAGS) -o %o %f $(LDLIBS) |> diff --git a/cards.68k b/cards.68k new file mode 100644 index 0000000..b9c430e --- /dev/null +++ b/cards.68k @@ -0,0 +1,39 @@ + section .text,text +; Finds the first card with the type in d0.b, and returns it's IO base address in a0, or 0 if not found +; Clobbers d1 +; Warning to developers: This function must only use PC-relative addressing as it is called from early boot before the kernel is properly mapped. + public find_first_card +find_first_card: + move.l #$ff0000, a0 ; a0 holds the address of the current card +ffc_loop: + lea ($100,a0), a0 ; Move to the next card + move.w ($fe,a0), d1 ; Load the type of the card into d1 + beq.b ffc_done ; If the type is 0 (empty slot), we have scanned all cards, so exit the loop + cmp.b d0, d1 ; If the card is the type we want, return with the address in a0 + beq.b ffc_done + bra.b ffc_loop ; Loop back and check the next card +ffc_done: + rts + +get_all_cards: +; Gets the indexes of the cards with the specified type +; a0 is a pointer to the buffer to store the results in +; d0 is the length of the buffer in 4 byte units +; d1 is the type of card to find + move.l #$ff0000, a1 ; a0 holds the address of the current card +gac_loop: + lea ($100,a1), a1 ; Move to the next card + move.w ($fe,a1), d2 ; Load the type of the card into d1 + cmp.b d1, d2 ; Check whether the type of the current card is the type we want + bne.b gac_next ; Skip the card if it is not + move.l a1, (a0) ; Put the card base into the buffer + lea ($4,a0), a0 ; Move to the next buffer location + subq.w #1, d0 ; Decement the count of available buffer locations +gac_next: + cmpa.l #$ffff00, a1 ; Check if we have gone through all the cards + beq.b gac_done ; If so, return + cmpi.b #0, d0 ; Check if we have filled the buffer + beq.b gac_done ; If so, returm + bra.b gac_loop +gac_done: + rts diff --git a/cards.i b/cards.i new file mode 100644 index 0000000..6edfe40 --- /dev/null +++ b/cards.i @@ -0,0 +1,6 @@ + ifnd CARDS_I +CARDS_I equ 1 +; Finds the first card with the type in d0.b, and returns it's IO base address in a0, or 0 if not found +; Clobbers d1 + xref find_first_card + endif diff --git a/elf.i b/elf.i new file mode 100644 index 0000000..f9857bb --- /dev/null +++ b/elf.i @@ -0,0 +1,237 @@ + ifnd ELF_I +ELF_I equ 1 + clrso +Elf_Ehdr.ei_mag: so.b 4 +Elf_Ehdr.ei_class: so.b 1 +Elf_Ehdr.ei_data: so.b 1 +Elf_Ehdr.ei_version so.b 1 +Elf_Ehdr.ei_osabi: so.b 1 +Elf_Ehdr.ei_padd: so.b 8 +Elf_Ehdr.e_type: so.w 1 +Elf_Ehdr.e_machine: so.w 1 +Elf_Ehdr.e_version: so.l 1 +Elf_Ehdr.sizeof equ __SO +Elf32_Ehdr.e_entry: so.l 1 +Elf32_Ehdr.e_phoff: so.l 1 +Elf32_Ehdr.e_shoff: so.l 1 +Elf32_Ehdr.e_flags: so.l 1 +Elf32_Ehdr.e_ehsize: so.w 1 +Elf32_Ehdr.e_phentsize: so.w 1 +Elf32_Ehdr.e_phnum: so.w 1 +Elf32_Ehdr.e_shentsize: so.w 1 +Elf32_Ehdr.e_shnum: so.w 1 +Elf32_Ehdr.e_shstrndx: so.w 1 +Elf32_Ehdr.sizeof equ __SO + + +ELFMAG0 equ $7f ; EI_MAG +ELFMAG1 equ 'E' +ELFMAG2 equ 'L' +ELFMAG3 equ 'F' + +ELFCLASSNONE equ 0 ; EI_CLASS +ELFCLASS32 equ 1 +ELFCLASS64 equ 2 +ELFCLASSNUM equ 3 + +ELFDATANONE equ 0 ; EI_DATA +ELFDATA2LSB equ 1 +ELFDATA2MSB equ 2 +ELFDATANUM equ 3 + +ET_NONE equ 0 ; e_type +ET_REL equ 1 +ET_EXEC equ 2 +ET_DYN equ 3 +ET_CORE equ 4 +ET_NUM equ 5 +ET_LOOS equ $fe00 ; OS specific range +ET_LOSUNW equ $feff +ET_SUNWPSEUDO equ $feff +ET_HISUNW equ $feff +ET_HIOS equ $feff +ET_LOPROC equ $ff00 ; processor specific range +ET_HIPROC equ $ffff + +EM_NONE equ 0 ; e_machine +EM_M32 equ 1 ; AT&T WE 32100 +EM_SPARC equ 2 ; Sun SPARC +EM_386 equ 3 ; Intel 80386 +EM_68K equ 4 ; Motorola 68000 +EM_88K equ 5 ; Motorola 88000 +EM_486 equ 6 ; Intel 80486 +EM_860 equ 7 ; Intel i860 +EM_MIPS equ 8 ; MIPS RS3000 Big-Endian +EM_S370 equ 9 ; IBM System/370 Processor +EM_MIPS_RS3_LE equ 10 ; MIPS RS3000 Little-Endian +EM_RS6000 equ 11 ; RS6000 +EM_UNKNOWN12 equ 12 +EM_UNKNOWN13 equ 13 +EM_UNKNOWN14 equ 14 +EM_PA_RISC equ 15 ; PA-RISC +EM_nCUBE equ 16 ; nCUBE +EM_VPP500 equ 17 ; Fujitsu VPP500 +EM_SPARC32PLUS equ 18 ; Sun SPARC 32+ +EM_960 equ 19 ; Intel 80960 +EM_PPC equ 20 ; PowerPC +EM_PPC64 equ 21 ; 64-bit PowerPC +EM_UNKNOWN22 equ 22 +EM_UNKNOWN23 equ 23 +EM_UNKNOWN24 equ 24 +EM_UNKNOWN25 equ 25 +EM_UNKNOWN26 equ 26 +EM_UNKNOWN27 equ 27 +EM_UNKNOWN28 equ 28 +EM_UNKNOWN29 equ 29 +EM_UNKNOWN30 equ 30 +EM_UNKNOWN31 equ 31 +EM_UNKNOWN32 equ 32 +EM_UNKNOWN33 equ 33 +EM_UNKNOWN34 equ 34 +EM_UNKNOWN35 equ 35 +EM_V800 equ 36 ; NEX V800 +EM_FR20 equ 37 ; Fujitsu FR20 +EM_RH32 equ 38 ; TRW RH-32 +EM_RCE equ 39 ; Motorola RCE +EM_ARM equ 40 ; Advanced RISC Marchines ARM +EM_ALPHA equ 41 ; Digital Alpha +EM_SH equ 42 ; Hitachi SH +EM_SPARCV9 equ 43 ; Sun SPARC V9 (64-bit) +EM_TRICORE equ 44 ; Siemens Tricore embedded processor +EM_ARC equ 45 ; Argonaut RISC Core, + ; Argonaut Technologies Inc. +EM_H8_300 equ 46 ; Hitachi H8/300 +EM_H8_300H equ 47 ; Hitachi H8/300H +EM_H8S equ 48 ; Hitachi H8S +EM_H8_500 equ 49 ; Hitachi H8/500 +EM_IA_64 equ 50 ; Intel IA64 +EM_MIPS_X equ 51 ; Stanford MIPS-X +EM_COLDFIRE equ 52 ; Motorola ColdFire +EM_68HC12 equ 53 ; Motorola M68HC12 +EM_MMA equ 54 ; Fujitsu MMA Mulimedia Accelerator +EM_PCP equ 55 ; Siemens PCP +EM_NCPU equ 56 ; Sony nCPU embedded RISC processor +EM_NDR1 equ 57 ; Denso NDR1 microprocessor +EM_STARCORE equ 58 ; Motorola Star*Core processor +EM_ME16 equ 59 ; Toyota ME16 processor +EM_ST100 equ 60 ; STMicroelectronics ST100 processor +EM_TINYJ equ 61 ; Advanced Logic Corp. TinyJ + ; embedded processor family +EM_AMD64 equ 62 ; AMDs x86-64 architecture +EM_X86_64 equ EM_AMD64 ; (compatibility) + +EM_PDSP equ 63 ; Sony DSP Processor +EM_UNKNOWN64 equ 64 +EM_UNKNOWN65 equ 65 +EM_FX66 equ 66 ; Siemens FX66 microcontroller +EM_ST9PLUS equ 67 ; STMicroelectronics ST9+8/16 bit +; microcontroller +EM_ST7 equ 68 ; STMicroelectronics ST7 8-bit +; microcontroller +EM_68HC16 equ 69 ; Motorola MC68HC16 Microcontroller +EM_68HC11 equ 70 ; Motorola MC68HC11 Microcontroller +EM_68HC08 equ 71 ; Motorola MC68HC08 Microcontroller +EM_68HC05 equ 72 ; Motorola MC68HC05 Microcontroller +EM_SVX equ 73 ; Silicon Graphics SVx +EM_ST19 equ 74 ; STMicroelectronics ST19 8-bit +; microcontroller +EM_VAX equ 75 ; Digital VAX +EM_CRIS equ 76 ; Axis Communications 32-bit + ; embedded processor +EM_JAVELIN equ 77 ; Infineon Technologies 32-bit + ; embedded processor +EM_FIREPATH equ 78 ; Element 14 64-bit DSP Processor +EM_ZSP equ 79 ; LSI Logic 16-bit DSP Processor +EM_MMIX equ 80 ; Donald Knuth's educational + ; 64-bit processor +EM_HUANY equ 81 ; Harvard University +; machine-independent + ; object files +EM_PRISM equ 82 ; SiTera Prism +EM_AVR equ 83 ; Atmel AVR 8-bit microcontroller +EM_FR30 equ 84 ; Fujitsu FR30 +EM_D10V equ 85 ; Mitsubishi D10V +EM_D30V equ 86 ; Mitsubishi D30V +EM_V850 equ 87 ; NEC v850 +EM_M32R equ 88 ; Mitsubishi M32R +EM_MN10300 equ 89 ; Matsushita MN10300 +EM_MN10200 equ 90 ; Matsushita MN10200 +EM_PJ equ 91 ; picoJava +EM_OPENRISC equ 92 ; OpenRISC 32-bit embedded processor +EM_ARC_A5 equ 93 ; ARC Cores Tangent-A5 +EM_XTENSA equ 94 ; Tensilica Xtensa architecture +EM_NUM equ 95 + +EV_NONE equ 0 ; e_version, EI_VERSION +EV_CURRENT equ 1 +EV_NUM equ 2 + + +ELFOSABI_NONE equ 0 ; No extensions or unspecified +ELFOSABI_HPUX equ 1 ; Hewlett-Packard HP-UX +ELFOSABI_NETBSD equ 2 ; NetBSD +ELFOSABI_LINUX equ 3 ; Linux +ELFOSABI_UNKNOWN4 equ 4 +ELFOSABI_UNKNOWN5 equ 5 +ELFOSABI_SOLARIS equ 6 ; Sun Solaris +ELFOSABI_AIX equ 7 ; AIX +ELFOSABI_IRIX equ 8 ; IRIX +ELFOSABI_FREEBSD equ 9 ; FreeBSD +ELFOSABI_TRU64 equ 10 ; Compaq TRU64 UNIX +ELFOSABI_MODESTO equ 11 ; Novell Modesto +ELFOSABI_OPENBSD equ 12 ; Open BSD + + clrso +Elf32_Phdr.p_type: so.l 1 +Elf32_Phdr.p_offset: so.l 1 +Elf32_Phdr.p_vaddr: so.l 1 +Elf32_Phdr.p_paddr: so.l 1 +Elf32_Phdr.p_filesz: so.l 1 +Elf32_Phdr.p_memsz: so.l 1 +Elf32_Phdr.p_flags: so.l 1 +Elf32_Phdr.p_align: so.l 1 +Elf32_Phdr.sizeof equ __SO + +PT_NULL equ 0 ; p_type +PT_LOAD equ 1 +PT_DYNAMIC equ 2 +PT_INTERP equ 3 +PT_NOTE equ 4 +PT_SHLIB equ 5 +PT_PHDR equ 6 +PT_TLS equ 7 +PT_NUM equ 8 + +PT_LOOS equ $60000000 ; OS specific range + +; +; Note: The amd64 psABI defines that the UNWIND program header +; should reside in the OS specific range of the program +; headers. +; +PT_SUNW_UNWIND equ $6464e550 ; amd64 UNWIND program header +PT_GNU_EH_FRAME equ PT_SUNW_UNWIND + + +PT_LOSUNW equ $6ffffffa +PT_SUNWBSS equ $6ffffffa ; Sun Specific segment +PT_SUNWSTACK equ $6ffffffb ; describes the stack segment +PT_SUNWDTRACE equ $6ffffffc ; private +PT_SUNWCAP equ $6ffffffd ; hard/soft capabilities segment +PT_HISUNW equ $6fffffff +PT_HIOS equ $6fffffff + +PT_LOPROC equ $70000000 ; processor specific range +PT_HIPROC equ $7fffffff + +PF_R equ $4 ; p_flags +PF_W equ $2 +PF_X equ $1 + +PF_MASKOS equ $0ff00000 ; OS specific values +PF_MASKPROC equ $f0000000 ; processor specific values + +PF_SUNW_FAILURE equ $00100000 ; mapping absent due to failure + +PN_XNUM equ $ffff ; extended program header index + endif diff --git a/initrd.68k b/initrd.68k new file mode 100644 index 0000000..3a78b62 --- /dev/null +++ b/initrd.68k @@ -0,0 +1,154 @@ +STORAGE_SEC equ $0 +STORAGE_CNT equ $4 +STORAGE_CMD equ $8 +STORAGE_DMADR equ $C + + include cards.i + include string.i + include memory.i + section .text,text +; Initialize the initrd reader + public initrd_init +initrd_init: + move.w #$4, d0 ; Get the pointer to the MMU card + jsr find_first_card + move.l a0, storage_card_base + rts + +; Get the start byte for a specific file in the initrd +; Pointer to name in a0 +; Number returned in d0, or 0 if not found + public initrd_find_file +initrd_find_file: + movem.l d2/a2, -(a7) + move.l a0, a2 ; Save the name pointer in a2 + move.l #512, d0 ; Allocate a buffer to read sectors into + jsr malloc + move.l a0, a1 ; Save the buffer pointer in a1 + move.l #1, d0 ; Set the first sector to read +iff_loop: + movem.l d0/a1, -(a7) + move.l storage_card_base, a0 ; Read a file header sector + move.l #1, d1 + bsr.b read_sectors + movem.l (a7)+, d0/a1 + move.l (a1), d2 ; Abort with a 0 return if we have reached the end of the initrd + bne.b .1 + move.l #0, d0 + bra.b iff_done +.1: + movem.l d0/a1, -(a7) + move.l a2, a0 ; Compare the passed in file name with the one in the header sector + adda.l #12, a1 + jsr strcmp + movem.l (a7)+, d0/a1 + bne.b .2 ; Continue to the next file if the name doesn't match + addi.l #1, d0 ; Add one to the current sector number to get the start sector of the file + lsl.l #8, d0 ; Shift the sector number left by 9 to get the start byte of the file + lsl.l #1, d0 + movem.l (a7)+, d2/a2 + rts +.2: + ; Add the # of sectors of file data + 1 to the sector number to advance to the next header sector + addi.l #1, d0 + move.l (4,a1), d2 + add.l d2, d0 + bra.b iff_loop +iff_done: + movem.l (a7)+, d2/a2 + rts + +; Reads bytes of a file from the initrd +; Start byte of file in d0 +; Offset in d1 +; Length in d2 +; Buffer in a0 + public initrd_read_bytes +initrd_read_bytes: + add.l d1, d0 + move.l d2, d1 + move.l a0, a1 + move.l storage_card_base, a0 + bra.b read_bytes + +; Reads sectors from a storage card +; Card base in a0 +; Destination in a1 +; Start sector in d0.l +; Sector count in d1.l +read_sectors: + cmpi.l #0, d1 ; Do nothing if asked to read 0 sectors + beq.b read_sectors_done + move.l d0, (STORAGE_SEC,a0) ; Set the sector number + move.l d1, (STORAGE_CNT,a0) ; Set the sector count + move.l a1, (STORAGE_DMADR,a0) ; Set the destination address + move.w #$1, (STORAGE_CMD,a0) ; Issue a DMA read command +read_sectors_done: + rts + +; Reads bytes off a storage card +; Card base in a0 +; Destination in a1 +; Start byte in d0.l +; Byte count in d1.l +read_bytes: + movem.l d2-d5/a2/a3,-(a7) ; Save callee preserved registers + move.l d0, d4 ; Save start byte in d4 + move.l d1, d5 ; Save byte count in d5 + move.l a1, a3 ; Save destination in a6 + lsr.l #8, d0 ; Divide start byte by 512 to compute starting sector + lsr.l #1, d0 + move.l d0, d3 ; Save the starting sector in d3 + move.l #1, d1 ; Read the starting sector into the sector buffer + move.l #sec_buf, a1 + bsr.b read_sectors + move.l a3, a2 ; Load the destination into a2 + move.l d4, d0 ; Load the start byte into d0 + andi.l #$1FF, d0 ; Modulus start byte by 512 to compute sector data offset + adda.l d0, a1 ; Add the offset to the start of the sector buffer + move.l #$200, d1 ; Compute the number of bytes to transfer by subtracting the offset from 512 + sub.l d0, d1 + cmp d5, d1 ; Compare the number of bytes to transfer with the byte count + ble.b count_ok ; If it was less than the byte count, do not adjust the bytes to transfer + move.l d5, d1 ; Otherwise, cap the transfer count to the total byte count +count_ok: + move.l d1, d4 ; Save the number of bytes in d4 + subi.l #1, d1 ; Subtract 1 from the number of bytes to account for the extra loop done by dbra +start_sec_loop: ; Transfer the required bytes from the start sector to the destination buffer + move.b (a1)+, (a2)+ + dbra d1, start_sec_loop + move.l d3, d0 ; Load the starting sector into d0 + addi #1, d0 ; Compute the start of the middle sectors by adding 1 to the starting sector + move.l d4, d2 ; Load the number of bytes transferred into d2 + move.l d5, d1 ; Load the byte count into d1 + sub.l d2, d1 ; Compute the number of remaining bytes by subtracting the number of transferred bytes from the byte count + cmpi.l #0, d1 ; If there are no more bytes to read, end early + beq.b read_bytes_done + move.l d1, d4 ; Save the number of remaining bytes in d4 + lsr.l #8, d1 ; Divide remaining bytes by 512 to compute the number of middle sectors + lsr.l #1, d1 + move.l a2, a1 ; Transfer the sector data to the end of the start sector bytes + bsr.b read_sectors ; Read the middle sectors + move.l d1, d3 ; Save the number of middle sectors in d3 + lsl.l #8, d1 ; Multiply the number of middle sectors by 512 to compute the number of bytes transferred + lsl.l #1, d1 + sub.l d1, d4 ; Subtract the number of transferred bytes from the number of remaining bytes + cmpi.l #0, d4 ; If there are no more bytes to read, end early + beq.b read_bytes_done + adda.l d1, a2 ; Add the number of bytes transferred to a2 + add.l d3, d0 ; Compute the end sector number by adding the start and count of the middle sectors + move.l #1, d1 ; Set the number of sectors to read to 1 + move.l #sec_buf, a1 ; Set the read address of the sector to the sector buffer + bsr.w read_sectors ; Read the end sector + move.l d4, d1 ; Load the number of remaining bytes into d1 +end_sec_loop: ; Transfer the required bytes from the start sector to the destination buffer + move.b (a1)+, (a2)+ + dbra d1, end_sec_loop +read_bytes_done: + movem.l (a7)+, d2-d5/a2/a3 ; Restore callee preserved registers + rts + + + section .bss,bss +storage_card_base: ds.b 4 +sec_buf: ds.b 512 diff --git a/initrd.i b/initrd.i new file mode 100644 index 0000000..b9379db --- /dev/null +++ b/initrd.i @@ -0,0 +1,17 @@ + ifnd INITRD_I +INITRD_I equ 1 +; Initialize the initrd reader + xref initrd_init + +; Get the start byte for a specific file in the initrd +; Pointer to name in a0 +; Number returned in d0, or 0 if not found + xref initrd_find_file + +; Reads bytes of a file from the initrd +; Start byte of file in d0 +; Offset in d1 +; Length in d2 +; Buffer in a0 + xref initrd_read_bytes + endif diff --git a/kernel.ld b/kernel.ld new file mode 100644 index 0000000..ddc63d5 --- /dev/null +++ b/kernel.ld @@ -0,0 +1,35 @@ +ENTRY (_start) + +SECTIONS +{ + . = 0x1000; + .early.text ALIGN (4K) : { + *(.early.text) + } + .early.rodata ALIGN (4K) : { + *(.early.rodata) + } + .early.data ALIGN (4K) : { + *(.early.data) + } + .early.bss ALIGN (4K) : { + *(.early.bss) + } + + . += 0xC00000; + .text ALIGN (4K) : AT (ADDR (.text) - 0xC00000) { + *(.text) + } + .rodata ALIGN (4K) : AT (ADDR (.rodata) - 0xC00000) { + *(.rodata) + } + .data ALIGN (4K) : AT (ADDR (.data) - 0xC00000) { + *(.data) + } + .bss ALIGN (4K) : AT (ADDR (.bss) - 0xC00000) { + *(COMMON) + *(.bss) + } + + _kernel_end_page = ALIGN(4K); +} diff --git a/libstd_override.68k b/libstd_override.68k new file mode 100644 index 0000000..c9431ca --- /dev/null +++ b/libstd_override.68k @@ -0,0 +1,11 @@ + include vmem.i + section .text,text + public alloc_pages +alloc_pages: + move.l d2, -(a7) + move.l #0, d1 + move.l #$2, d2 + jsr vmem_map_free +alloc_pages_done: + move.l (a7)+, d2 + rts diff --git a/main.68k b/main.68k new file mode 100644 index 0000000..dcdb5ad --- /dev/null +++ b/main.68k @@ -0,0 +1,146 @@ + include elf.i + include term.i + include traps.i + include vmem.i + include pmem.i + include initrd.i + include tasking.i + include start.i + include memory.i + section .text,text + public main +main: + move.l #inital_stack, a7 ; Load the initial stack pointer + jsr term_init + jsr traps_init + jsr vmem_init + jsr pmem_init + jsr initrd_init + jsr pmem_pop_frame + move.l #0, d2 + move.l #$FEF000, a0 + move.l #1, d1 + move.l #$2, d3 + move.l d0, -(a7) + jsr vmem_map_to + move.l (a7)+, d0 + move.l #($FEF000+$1000), a7 + clrfo +main.elf_header: fo.b Elf32_Ehdr.sizeof + link a6, #__FO ; Create a stack frame + move.l d0, a0 + jsr tasking_init + ; Activate the address space for init + move.l #init_addr_space, a0 + jsr vmem_activate_addr_space + ; Get the offset of init in the initrd + move.l #init_name, a0 + jsr initrd_find_file + move.l d0, d5 + ; Read the ELF header + lea.l (main.elf_header,a6), a0 + move.l #0, d1 + move.l #Elf32_Ehdr.sizeof, d2 + jsr initrd_read_bytes + ; Allocate elf_header.e_phnum * 32 bytes to read the program headers into + clr.l d0 + move.w (main.elf_header+Elf32_Ehdr.e_phnum,a6), d0 + move.l d0, d4 + lsl.l #5, d0 + move.l d0, d2 + jsr malloc + ; Read the program headers + move.l (main.elf_header+Elf32_Ehdr.e_phoff,a6), d1 + move.l a0, -(a7) + move.l d5, d0 + jsr initrd_read_bytes + move.l (a7)+, a2 + ; Loop through the program headers + subi.l #1, d4 +phead_loop: + ; If the program header's type is not LOAD, skip it + move.l (Elf32_Phdr.p_type,a2), d0 + cmpi.l #PT_LOAD, d0 + bne.w skip_pheader + ; Get the memory size of the pheader into d0 and round it to the next multiple of 4096 + ; round_size = p_memsz & 0xFFF > 0 ? p_memsz & ~(0xFFF) + 1 : p_memsz & ~(0xFF) + move.l (Elf32_Phdr.p_memsz,a2), d0 + andi.l #$FFF, d0 + beq.b .1 + move.l #$1000, d1 + bra.b .2 +.1: + move.l #0, d1 +.2: + move.l (Elf32_Phdr.p_memsz,a2), d0 + andi.l #(~$FFF), d0 + add.l d1, d0 + ; Shift the rounded size right by 12 to get the number of pages the pheader takes up + lsr.l #8, d0 + lsr.l #4, d0 + ; Map the segment to free memory + move.l (Elf32_Phdr.p_vaddr,a2), a0 + move.l #0, d1 + move.l #6, d2 + jsr vmem_map + ; Zero the segment's memory + move.l (Elf32_Phdr.p_vaddr,a2), a0 + move.l (Elf32_Phdr.p_memsz,a2), d0 + subi.l #1, d0 +.3: + move.b #0, (a0)+ + dbra d0, .3 + ; Read the segment's data off disk + move.l d5, d0 + move.l (Elf32_Phdr.p_offset,a2), d1 + move.l (Elf32_Phdr.p_filesz,a2), d2 + move.l (Elf32_Phdr.p_vaddr,a2), a0 + jsr initrd_read_bytes + ; Get the memory size of the pheader into d0 and round it to the next multiple of 4096 + ; Same as abobe + move.l (Elf32_Phdr.p_memsz,a2), d0 + andi.l #$FFF, d0 + beq.b .4 + move.l #$1000, d1 + bra.b .5 +.4: + move.l #0, d1 +.5: + move.l (Elf32_Phdr.p_memsz,a2), d0 + andi.l #(~$FFF), d0 + add.l d1, d0 + ; Shift the rounded size right by 12 to get the number of pages the pheader takes up + lsr.l #8, d0 + lsr.l #4, d0 + ; Get the pheader's flags and make them into the page mapping flags, put the + ; argumemts into the right registers, and set the flags for the pheader's memory + move.l d0, d1 + move.l (Elf32_Phdr.p_vaddr,a2), a0 + move.l (Elf32_Phdr.p_flags,a2), d0 + andi.l #PF_W, d0 + move.l (Elf32_Phdr.p_flags,a2), d2 + andi.l #PF_X, d2 + lsl.l #3, d2 + or.l d2, d0 + ori.l #$4, d0 + move.l #$0, d2 + jsr vmem_set_flags +skip_pheader: + ; Advance to the next pheader and loop back if there are more + lea.l (Elf32_Phdr.sizeof,a2), a2 + dbra d4, phead_loop + ; Create the init process + move.l #init_addr_space, a0 + move.l (main.elf_header+Elf32_Ehdr.e_entry,a6), a1 + jsr tasking_new_process + unlk a6 ; Tear down the stack frame + jsr tasking_exit + + section .bss,bss + ds.b 4096 +inital_stack: + + section .data,data +init_name: dc.b "init",0 + align 1 +init_addr_space: dc.l $0, $0, $0, kernel_map - $6000 diff --git a/pmem.68k b/pmem.68k new file mode 100644 index 0000000..475508c --- /dev/null +++ b/pmem.68k @@ -0,0 +1,150 @@ + include vmem.i + include term.i + include cards.i + include string.i + xref _kernel_end_page + section .text, text + public pmem_init +; Initialize the physical memory manager +pmem_init: + movem.l d2/d3/d4, -(a7) ; Save old values of d2, d3, and d4 (callee preserved) + clrfo +pmem_init.buf: fo.b 12 + link a6, #__FO ; Create a 12-byte stack frame to use as a buffer for hex_to_ascii + move.w #$1, d0 ; Get the pointer to the ROM card + jsr find_first_card + move.w #$C000, ($F0,a0) ; Enable the card at physical address $C0000000 + move.b #$1, ($F3,a0) + move.l #0, d2 ; Map the beginning of the ROM card's RAM into virtual memory + move.l #$C0008000, d0 + move.l #$1, d1 + move.l #$2, d3 + jsr vmem_map_free_to + move.l a0, -(a7) ; Save a0 + move.l a0, a1 + move.l #$1, d0 + jsr vmem_get_free_kernel_pages ; Alocate the stack page + move.l a0, stack_page_addr + move.l a0, a1 + move.l #kernel_address_space, a0 + move.l (a7)+, a0 ; Restore a0 + adda.l #($100-$4), a0 + move.l #1, d0 ; d0 holds the physical base address of the RAM card (+ 1 for the enable flag) + move.l #_kernel_end_page, d4 ; Put the physical address of the last kernel page into d4 + sub.l #$C01000, d4 +ramcard_map_loop: + lea ($4,a0), a0 ; Move to the next RAM card + move.l (a0), a1 ; Load the IO base pointer into a1 + cmpa.l #0, a1 + beq.w ramcard_map_loop_done ; If the pointer is 0, we have reached the end of the list, so exit the loop + move.l d0, (a1) ; Map the card to the base address in d0 + move.l d0, d3 ; Save the base address in d3 for later use + move.l ($4,a1), d1 ; Advance d0 by the size of the card + move.l d1, d2 ; Save the card size in d2 for later use + add.l d1, d0 + movem.l d0/a0, -(a7) ; Save base address and RAM card pointer + move.l #pmem_log_name, a0 ; Log the mapped RAM card + jsr term_print + lea.l (pmem_init.buf,a6), a0 ; Print the card size in bytes + move.l d2, d0 + jsr hex_to_ascii + lea.l (pmem_init.buf,a6), a0 + jsr term_print + move.l #ramcard_log_msg, a0 ; Print " byte RAM card at " + jsr term_print + lea.l (pmem_init.buf,a6), a0 ; Print the card base address + move.l d3, d0 + subi.l #1, d0 + jsr hex_to_ascii + lea.l (-12,a6), a0 + jsr term_println + subi.l #1, d3 +rc_map_page_push_loop: + cmp.l d4, d3 ; If the current physical page is used by the kernel binary, skip it + bhi.b .1 + add.l #$1000, d3 + sub.l #$1000, d2 + bra.b rc_map_page_push_loop +.1: + move.l d3, d0 ; Push the frame in d3 and move to the next one + jsr pmem_push_frame + add.l #$1000, d3 + sub.l #$1000, d2 + bne.b rc_map_page_push_loop ; Loop back if this card has more pages to push +rc_map_pages_pushed: + movem.l (a7)+, d0/a0 ; Restore base address and RAM card pointer + bra.w ramcard_map_loop ; Loop back and map the next card +ramcard_map_loop_done: + move.l a0, d0 + andi.l #(~$FFF), d0 + move.l d0, a0 + move.l #0, d0 + jsr vmem_unmap_page + unlk a6 ; Tear down the stack frame + movem.l (a7)+, d2/d3/d4 ; Restore d2, d3, and d4 (callee preserved) + rts + + public pmem_push_frame +; Pushes a frame onto the stack +; Frame to push in d0 +pmem_push_frame: + move.l d0, -(a7) + move.l #0, d0 + move.l stack_page_addr, a0 + jsr vmem_get_map_ptr + move.l (a7)+, d0 + move.l (a0), d1 ; Read the mapping entry into d1 + andi.l #(~$FFF), d1 ; Clear the entry's flags to get the pointer to its physical page + movem.l d1/d2/d3, -(a7) + ; Map the stack page to the frame to push + move.l #0, d2 + move.l stack_page_addr, a0 + move.l #1, d1 + move.l #$2, d3 + jsr vmem_map_to + move.l stack_page_addr, a0 ; Clear the TLB entry for the mapping page + jsr vmem_clear_tlb_entry + movem.l (a7)+, d1/d2/d3 + move.l stack_page_addr, a0 ; Load the address of the stack page into a0 + move.l d1, (a0) ; Write the address of the old top frame to the next pointer of the new top + rts + + public pmem_pop_frame +; Pops a frame off the stack +; Returns frame address in d0 +pmem_pop_frame: + move.l #0, d0 ; Read the mapping entry into d0 + move.l stack_page_addr, a0 + jsr vmem_get_map_ptr + move.l (a0), d0 + andi.l #(~$FFF), d0 ; Clear the entry's flags to get the pointer to its physical frame + move.l stack_page_addr, a1 ; Load the address of the stack page into a1 + move.l (a1), d1 ; Get the address of the frame pointed to by the top of the stack + beq.b pop_no_page ; If the pointed-to frame is the null frame, OOM + movem.l d0/d2/d3, -(a7) + ; Map the stack page to the pointed-to-frame + move.l d1, d0 + move.l #0, d2 + move.l stack_page_addr, a0 + move.l #1, d1 + move.l #$2, d3 + jsr vmem_map_to + move.l stack_page_addr, a0 ; Clear the TLB entry for the mapping page + jsr vmem_clear_tlb_entry + movem.l (a7)+, d0/d2/d3 + rts +pop_no_page: + move.l #oom_error_str, a0 + jsr term_println + stop #$2700 + + section .data,data +pmem_log_name: dc.b "[PMEM] ",0 +ramcard_log_msg: dc.b " byte RAM card at ",0 +oom_error_str: dc.b "Out of physical memory",0 + + + section .bss,bss + align 1 +stack_page_addr: + ds.b 4 diff --git a/pmem.i b/pmem.i new file mode 100644 index 0000000..5715173 --- /dev/null +++ b/pmem.i @@ -0,0 +1,11 @@ + ifnd PMEM_I +PMEM_I equ 1 +; Initialize the physical memory manager + xref pmem_init +; Pushes a frame onto the stack +; Frame to push in d0 + xref pmem_push_frame +; Pops a frame off the stack +; Returns frame address in d0 + xref pmem_pop_frame + endif diff --git a/start.68k b/start.68k new file mode 100644 index 0000000..43771fd --- /dev/null +++ b/start.68k @@ -0,0 +1,86 @@ + include cards.i + xref main + section .early.text, text + public _start +; Receives pointer to program headers in a0 and number of program headers in d0 +_start: + move.l d0, d2 ; Move the number of program headers to d2 + move.l a0, a1 ; Move the program headaer pointer to a1 + move.w #$5, d0 ; Get the pointer to the MMU card + jsr (find_first_card - $C00000) + cmpa.l #0, a0 ; Abort if there is no MMU card + beq.w no_mmu + subq.w #$1, d2 ; Adjust the number of program headers for dbra + phead_loop: + move.l (a1), d1 ; If the type of the program header isn't 1 (LOAD), skip the header + cmpi.l #$1, d1 + bne.b next_phead + move.b (9,a1), d1 ; If the segment isn't for the high quarter, skip it + cmpi.b #$c0, d1 + bgt.b next_phead + move.l (20,a1), d4 ; Put the memory size in d4 + move.l d4, d3 ; Copy the size to d3 + lsr #8, d3 ; Shift d3 right 12 bits to get the number of full pages the segment takes up + lsr #4, d3 + andi.l #$FFF, d4 ; If the segment takes up a partial page, add 1 to the number of required pages in d3 + cmp.l #$0, d4 + beq.b even_page + addq.l #1, d3 +even_page: + subq.b #1, d3 ; Adjust the number of pages to map for dbra + move.l (12,a1), d1 ; Get the starting physical page in d3 + move.l (24,a1), d4 ; Get the permission flags in d4 + andi.l #$2, d4 ; Isolate the writable flag + or.l d4, d1 ; Copy the writable flag to the page entry + move.l (24,a1), d4 ; Get the permission flags in d4 + andi.l #$1, d4 ; Isolate the executable flag + lsl.l #3, d4 ; Copy the executable flag to the page entry + or.l d4, d1 + ori.l #$1, d1 ; Set the present flag on the entry + move.l (8,a1), d0 ; Get the starting virtual page number in the quarter in d0 + lsr.l #8, d0 + lsr.l #4, d0 + andi.l #$3ff, d0 + lsl #2, d0 ; Get the pointer to the entry for the page in a2 + move.l #(kernel_map - $C00000), a2 + adda d0, a2 +map_loop: + move.l d1, (a2)+ ; Write the entry to the mapping page and advance the entry pointer + addi.l #$1000, d1 ; Advance the entry to the next physical page + dbra d3, map_loop ; Loop back if there are more pages to map +next_phead: + lea ($20,a1), a1 ; Advance a1 to point to the next program header + dbra d2, phead_loop ; If there are more program headers, loop back +io_map: + move.l #15, d0 ; Put the number of IO pages in d0, minus one to adjust for dbra + move.l #(kernel_map - $C00000 + $3f0 * 4), a1 ; Put the poiner to the mapping for virtual page $FF0000 in a1 + move.l #$ffff0003, d1 ; Put the initial map entry in d1 +io_map_loop: + move.l d1, (a1)+ ; Write the entry to the mapping page and advance the entry pointer + addi.l #$1000, d1 ; Advance the entry to the next physical page + dbra d0, io_map_loop ; Loop back if there are more pages to map +map_done: + move.l #(kernel_map - $C00000), d0 ; Load the pointer to the mapping page into d0 + ori.l #$1, d0 ; Set the map page present flag + move.l d0, ($0,a0) ; Write the mapping page to both the lower and upper quarter map registers + move.l d0, ($C,a0) + jmp (higher_bridge - $C00000) ; Jump to the lower-half equivalent of the bridging function +no_mmu: + stop #$2700 ; If there was no MMU card, halt the CPU + + section .text,text +higher_bridge: + move.w #1, ($14,a0) ; Enable the MMU + jmp in_higher.l ; Jump to the higher half (THis function has been called with the PC in the + ; lower half, so this seemingly no-op jump instruction actually switches to the higher half) +in_higher: + move.w #0, $ff0000 ; Disable the IO space at the top of the lower 16MB of memory + move.l #0, ($0,a0) ; Disable the mapping in the lower quarter + jmp main ; Jump to the kernel's main function + + section .bss,bss + public kernel_map +kernel_map: + align 12 + ds.b 4096 + diff --git a/start.i b/start.i new file mode 100644 index 0000000..ea86f71 --- /dev/null +++ b/start.i @@ -0,0 +1,5 @@ + ifnd START_I +START_I equ 1 + xref __start + xref kernel_map + endif diff --git a/syscall.i b/syscall.i new file mode 100644 index 0000000..c606dec --- /dev/null +++ b/syscall.i @@ -0,0 +1,6 @@ + ifnd SYSCALL_I +SYSCALL_I equ 1 +; Handle a syscall +; Syscall number in d0 + xref handle_syscall + endif diff --git a/syscalls.68k b/syscalls.68k new file mode 100644 index 0000000..5528e64 --- /dev/null +++ b/syscalls.68k @@ -0,0 +1,66 @@ + include tasking.i + include vmem.i + include term.i + section .text,text +; Handle a syscall +; Syscall number in d0 + public handle_syscall +handle_syscall: + ; Get the d0th entry in the syscall table + move.l #syscall_table, a6 + lsl.l #2, d0 + adda.l d0, a6 + move.l (a6), a6 + ; Call the entry + jsr (a6) + rts + +syscall_exit: + jmp tasking_exit + +syscall_yield: + jmp tasking_yield + +syscall_print: + jmp term_print + +syscall_println: + jmp term_println + +; Maps the range of pages starting at address a0 with length d1 to free physical frames +; Permission flags in d2 +syscall_vmem_map: + move.l d1, d0 + move.l #0, d1 + ori.l #$4, d2 + jmp vmem_map + +; Map a free range of pages with length d1 to free physical frames +; Returns the range start in a0 +; Permission flags in d2 +syscall_vmem_map_free: + move.l d1, d0 + move.l #2, d1 + ori.l #$4, d2 + jmp vmem_map_free + +; Maps a free range of virtual pages with length d2 to the range of physical frames starting at d1 +; Returns the range start in a0 +; Permission flags in d3 +syscall_vmem_map_free_to: + move.l d1, d0 + move.l d2, d1 + move.l #2, d2 + ori.l #$4, d3 + jmp vmem_map_free_to + + section .data,data +syscall_table: + align 1 + dc.l syscall_exit + dc.l syscall_yield + dc.l syscall_print + dc.l syscall_println + dc.l syscall_vmem_map + dc.l syscall_vmem_map_free + dc.l syscall_vmem_map_free_to diff --git a/tasking.68k b/tasking.68k new file mode 100644 index 0000000..b01d704 --- /dev/null +++ b/tasking.68k @@ -0,0 +1,234 @@ + clrso +task.pid: so.w 2 +task.stack_frame: so.l 1 +task.stack_ptr: so.l 1 +task.next_ptr: so.l 1 +task.address_space: so.l 4 +task.sizeof equ __SO + + include term.i + include vmem.i + include pmem.i + include memory.i + section .text,text +; Initializes tasking, takes the kernel stack frame in a0 + public tasking_init +tasking_init: + move.l a0, -(a7) + move.l #task.sizeof, d0 ; Allocate space for the task data + jsr malloc +ti_malloc_done: + move.l (a7)+, a1 + move.w #0, (task.pid,a0) + move.l a1, (task.stack_frame,a0) + move.l #0, (task.stack_ptr,a0) + move.l #0, (task.next_ptr,a0) + move.l a0, current_process + move.l #15, d1 ; Put the address space size - 1 in d1 + lea.l (task.address_space,a0), a1 ; Put the address of the task address space in a1 + move.l #kernel_address_space, a0 ; Put the address of the kernel address space in a0 +.1: + move.b (a0)+,(a1)+ + dbra d1, .1 ; Loop back if there is more to copy + move.w #1, next_pid + rts + +; Creates a new process +; Pointer to address space in a0 +; Start address in a1 + public tasking_new_process +tasking_new_process: + movem.l a2/a3, -(a7) + movem.l a0/a1, -(a7) + move.l #32, d0 ; Allocate space for the task data + jsr malloc +tnp_malloc_done: + movem.l (a7)+, a1/a2 + move.w next_pid, d0 ; Get the next free PID + move.w d0, (task.pid,a0) + addi.w #1, d0 ; Increment the counter by 1 + move.w d0, next_pid + movem.l a0/a1, -(a7) + jsr pmem_pop_frame ; Get a frame for the process' kernel stack + movem.l (a7)+, a0/a1 + move.l d0, (task.stack_frame,a0) + ; Map the new process' stack into memory + movem.l a0/a1/d2/d3, -(a7) + move.l #1, d1 + move.l #0, d2 + move.l #$2, d3 + jsr vmem_map_free_to + move.l a0, a3 + movem.l (a7)+, a0/a1/d2/d3 + adda.l #$1000, a3 + ; Push start address onto new process stack + move.l a2, -(a3) + ; Push address of user mode switch shim onto process stack + move.l #tasking_um_switch_shim, -(a3) + ; Push dummy register values onto new process stack + move.l #0, -(a3) + move.l #0, -(a3) + move.l #0, -(a3) + move.l #0, -(a3) + + move.l #0, -(a3) + move.l #0, -(a3) + move.l #0, -(a3) + move.l #0, -(a3) + + move.l #0, -(a3) + move.l #0, -(a3) + move.l #0, -(a3) + ; Set the page number of the new process's kernel stack address to $FEF + move.l a3, d0 + andi.l #$FFF, d0 + ori.l #$FEF000, d0 + move.l d0, (task.stack_ptr,a0) + move.l #0, (task.next_ptr,a0) + ; Unmap the temporary stack mapping + move.l a0, -(a7) + move.l a3, d0 ; Put the page number of the temporary page into a0 + and.l #~($FFF), d0 + move.l d0, a0 + move.l #1, d0 + move.l #0, d1 + jsr vmem_unmap + move.l (a7)+, a0 + move.l #15, d1 ; Put the address space size - 1 in d1 + lea.l (task.address_space,a0), a2 ; Put the address of the task address space in a2 +.1: + move.b (a1)+, (a2)+ + dbra d1, .1 ; Loop back if there is more to copy + jsr rtr_push_head + movem.l (a7)+, a2/a3 + rts + +tasking_um_switch_shim: + move.w sr, d0 ; Push the SR with the supervisor bit cleared to make a "fake" exception frame + andi.w #(~$2000), d0 + move.w d0, -(a7) + rte ; Switch the CPU into usermode and start executing the new process + +; Yields to the next process + public tasking_yield +tasking_yield: + movem.l d2-d7/a2-a6, -(a7) + ; Save the current kernel stack pointer + move.l current_process, a0 + move.l a7, (task.stack_ptr,a0) + jsr rtr_pop ; Get the next ready process + ; Return if there is none + cmp.l #0, a0 + bne.b .1 + movem.l (a7)+, d2-d7/a2-a6 + rts +.1: + move.l a0, a2 ; Save the next process in a2 + ; Push the current process on the ready to run list + move.l current_process, a0 + jsr rtr_push_tail + ; Update the current process to be the next process to run + move.l a2, current_process + ; Activate the next process' address space + lea.l (task.address_space,a2), a0 + jsr vmem_activate_addr_space + ; Set the kernel stack frame to the next process' stack frame + move.l #0, d0 + move.l #$FEF000, a0 + jsr vmem_get_map_ptr + move.l (task.stack_frame,a2), d0 + ori.l #$3, d0 + move.l d0, (a0) + move.l vmem_mmu_base_addr, a1 + move.l #$FEF000, ($10,a1) + move.l (task.stack_ptr,a2), a7 + movem.l (a7)+, d2-d7/a2-a6 + rts + +; Exits the current process + public tasking_exit +tasking_exit: + movem.l d2-d7/a2-a6, -(a7) + jsr rtr_pop ; Get the next ready process + ; Return if there is none + cmp.l #0, a0 + bne.b .1 + move.l #last_proc_exited_str, a0 + jsr term_println + stop #$2700 +.1: + move.l a0, a2 ; Save the next process in a2 + ; Update the current process to be the next process to run + move.l a2, current_process + ; Activate the next process' address space + lea.l (task.address_space,a2), a0 + jsr vmem_activate_addr_space + ; Set the kernel stack frame to the next process' stack frame + move.l #0, d0 + move.l #$FEF000, a0 + jsr vmem_get_map_ptr + move.l (task.stack_frame,a2), d0 + ori.l #$3, d0 + move.l d0, (a0) + move.l vmem_mmu_base_addr, a1 + move.l #$FEF000, ($10,a1) + move.l (task.stack_ptr,a2), a7 + movem.l (a7)+, d2-d7/a2-a6 + rts + +; Push the process pointed to by a0 onto the head of the ready to run list +rtr_push_head: + cmp.l #0, ready_to_run_head + bne.b .1 + ; Set the ready to run list to the passed-in process if it is empty + move.l a0, ready_to_run_head + move.l a0, ready_to_run_tail + rts +.1: + move.l ready_to_run_head, (task.next_ptr,a0) ; Set the next pointer of the passed-in process to the current head + move.l a0, ready_to_run_head ; Set the head to the passed-in process + rts + +; Push the process pointed to by a0 onto the tail of the ready to run list +rtr_push_tail: + cmp.l #0, ready_to_run_head + bne.b .1 + ; Set the ready to run list to the passed-in process if it is empty + move.l a0, ready_to_run_head + move.l a0, ready_to_run_tail + rts +.1: + ; Set the next pointer of the tail to the passed-in process + move.l ready_to_run_tail, a1 + move.l a0, (task.next_ptr,a1) + move.l #0, (task.next_ptr,a0) ; Set the next pointer of the passed-in process to NULL + move.l a0, ready_to_run_tail ; Set the tail to the passed-in process + rts + +; Pop a process of the head of the ready to run list +; Address returned in a0, or 0 if no process +rtr_pop: + ; Return 0 if the list is empty + cmp.l #0, ready_to_run_head + bne.b .1 + move.l #0, a0 + rts +.1: + ; Set the head to the current head's next pointer + move.l ready_to_run_head, a0 + move.l (task.next_ptr,a0), ready_to_run_head + ; If the new head is NULL, set the tail to NULL + cmp.l #0, ready_to_run_head + bne.b .2 + move.l #0, ready_to_run_tail +.2: + rts + + section .bss,bss +ready_to_run_head: ds.b 4 +ready_to_run_tail: ds.b 4 +current_process: ds.b 4 +next_pid: ds.b 2 + + section .data,data +last_proc_exited_str: dc.b "Last process exited, halting",0 diff --git a/tasking.i b/tasking.i new file mode 100644 index 0000000..f0373aa --- /dev/null +++ b/tasking.i @@ -0,0 +1,13 @@ + ifnd TASKING_I +TASKING_I equ 1 +; Initializes tasking, takes the kernel stack frame in a0 + xref tasking_init +; Creates a new process +; Pointer to address space in a0 +; Start address in a1 + xref tasking_new_process +; Yields to the next process + xref tasking_yield +; Exits the current process + xref tasking_exit + endif diff --git a/term.68k b/term.68k new file mode 100644 index 0000000..64553d5 --- /dev/null +++ b/term.68k @@ -0,0 +1,35 @@ + include cards.i + section .text,text +; Initializes the terminal card driver + public term_init +term_init: + move.b #$3, d0 ; Get the pointer to the terminal card + jsr find_first_card + move.l a0, term_io_base ; Save the pointer for later use + rts + +; Prints the string pointed to by a0 + public term_print +term_print: + move.l term_io_base, a1 + term_print_loop: + move.b (a0)+, d0 ; Get the next character of the string + cmpi.b #0, d0 ; If NULL, end of string; return + beq.b term_print_done + move.b d0, (a1) ; Send the character to the terminal + bra.b term_print +term_print_done: + rts + +; Prints the string pointed to by a0 followed by a newline + public term_println +term_println: + bsr.b term_print + ; take advantage of the known register state after term_print and + ; write the newline w/o loading the address from memory + move.b #$A, (a1) + rts + + section .bss,bss +term_io_base: + ds.b 4 diff --git a/term.i b/term.i new file mode 100644 index 0000000..293547d --- /dev/null +++ b/term.i @@ -0,0 +1,9 @@ + ifnd TERM_I +TERM_I equ 1 +; Initializes the terminal card driver + xref term_init +; Prints the string pointed to by a0 + xref term_print +; Prints the string pointed to by a0 followed by a newline + xref term_println + endif diff --git a/traps.68k b/traps.68k new file mode 100644 index 0000000..4b5fa63 --- /dev/null +++ b/traps.68k @@ -0,0 +1,271 @@ + include syscall.i + include term.i + include string.i + section .text,text + public traps_init +; Initialize trap handling +traps_init: + move.l #trap_table, a0 + movec a0, VBR + rts + +generic_handler: + ; Read the format/vector word and isolate the interrupt vector + move.w ($6,a7), d0 + andi.l #$FF, d0 + cmpi.l #$20, d0 + ; Only the first 32 traps are named, print generic message for the rest + bge.b unk_trap + ; Get the pointer to the trap name in a0 + lsl.w #2, d0 + move.l #trap_name_table, a0 + move.l (a0,d0), a0 + jsr term_print ; Print it + ; Print " Trap" + move.l #trap_string, a0 + jsr term_print + ; Print where the PC was when the trap occured + move.l ($2,a7), a0 + jsr print_pc + ; Halt + stop #$2700 +unk_trap: + ; Print the unknown trap message + move.l #unk_trap_msg, a0 + jsr term_print + ; Convert the trap number to a hex string + move.w ($6,a7), d0 + andi.l #$FF, d0 + move.l #string_buffer, a0 + jsr hex_to_ascii + ; Print it + move.l #string_buffer, a0 + jsr term_print + ; Print where the PC was when the trap occured + move.l ($2,a7), a0 + jsr print_pc + ; Halt + stop #$2700 + +berr_handler: + ; Select the correct prefix based on whether the error was in supervisor or user mode + move.w ($8,a7), d0 + andi.w #$4, d0 + cmpi.w #0, d0 + beq.b .1 + move.l #berr_supervisor_pfx, a0 + bra.b .2 +.1: + move.l #berr_user_pfx, a0 +.2: + jsr term_print ; Print the prefi + ; Print " bus error " + move.l #berr_msg, a0 + jsr term_print + ; Get the error kind out of the special status word (end up with ins flag, word flag, and write flg in that order, shifted left by 2 and put in d0) + move.l #0, d1 + move.w ($8,a7), d0 + andi.w #$100, d0 + cmpi.w #0, d0 + beq.b .3 + move.l #$4, d1 +.3: + move.w ($8,a7), d0 + andi.w #$200, d0 + cmpi.w #0, d0 + beq.b .4 + ori.l #$8, d1 +.4: + move.w ($8,a7), d0 + andi.w #$2000, d0 + cmpi.w #0, d0 + beq.b .5 + ori.l #$10, d1 +.5: + ; Get the pointer to the correct message for this kind of bus error in a0 + move.l #berr_table, a0 + adda.l d1, a0 + move.l (a0), a0 + jsr term_print ; Print it + ; Convert the triggering address to a hex string + move.l ($a,a7), d0 + move.l #string_buffer, a0 + jsr hex_to_ascii + ; Print it + move.l #string_buffer, a0 + jsr term_print + ; Print where the PC was when the trap occured + move.l ($2,a7), a0 + jsr print_pc + stop #$2700 + +int_vector_handler: + movem.l d0-d7/a0-a6,-(a7) + lea.l (60,a7), a6 + ; Print "Interrupt Vector " + move.l #int_vector_msg, a0 + jsr term_print + ; Get the interrupt vector offset + move.w ($6,a6), d0 + andi.l #$FF, d0 + subi.l #$40, d0 + ; Convert it to a hex string + move.l #string_buffer, a0 + jsr hex_to_ascii + ; Print it + move.l #string_buffer, a0 + jsr term_println + movem.l (a7)+, d0-d7/a0-a6 + rte + +user_trap_handler: + movem.l d2-d7/a2-a6,-(a7) + jsr handle_syscall + movem.l (a7)+, d2-d7/a2-a6 + rte + + +; Print " (PC: {a0})\n" +; Clobbers d0, a0, a1, a2 +print_pc: + ; Print the message up to the first variable + move.l a0, a2 + move.l #pc_string_start, a0 + jsr term_print + ; Convert the PC value to a hex string + move.l a2, d0 + move.l #string_buffer, a0 + jsr hex_to_ascii + ; Print it + move.l #string_buffer, a0 + jsr term_print + ; Print the rest of the message + move.l #pc_string_end, a0 + jsr term_println + rts + + + section .bss,bss +string_buffer: + ds.b 11 + + section .data,data +unk_trap_msg: dc.b "Unknown trap ",0 +trap_string: dc.b " Trap",0 +berr_msg: dc.b "bus error ",0 +pc_string_start: dc.b " (PC: ",0 +pc_string_end: dc.b ")",0 +int_vector_msg: dc.b "Interrupt Vector ",0 +user_trap_msg: dc.b "User Trap ",0 +berr_supervisor_pfx: dc.b "Supervsior ",0 +berr_user_pfx: dc.b "User ",0 + align 1 +berr_table: + dc.l berr_rdw_msg + dc.l berr_wdw_msg + dc.l berr_rdb_msg + dc.l berr_wdb_msg + dc.l berr_riw_msg + dc.l berr_wiw_msg + dc.l berr_rib_msg + dc.l berr_wib_msg +berr_rdw_msg: dc.b "reading word from ",0 +berr_wdw_msg: dc.b "writing word to ",0 +berr_rdb_msg: dc.b "reading byte from ",0 +berr_wdb_msg: dc.b "writing byte to ",0 +berr_riw_msg: dc.b "reading instruction word from ",0 +berr_wiw_msg: dc.b "writing instruction word to ",0 +berr_rib_msg: dc.b "reading instruction byte from ",0 +berr_wib_msg: dc.b "writing instruction byte to ",0 + align 1 +trap_name_table: + dc.l $0 + dc.l $0 + dc.l berr_msg + dc.l address_trap_name + dc.l ill_ins_trap_name + dc.l zero_div_trap_name + dc.l chk_ins_trap_name + dc.l trapv_trap_name + dc.l priv_viol_trap_name + dc.l trace_trap_name + dc.l line_1010_emu_trap_name + dc.l line_1111_emu_trap_name + dc.l reserved_trap_name + dc.l reserved_trap_name + dc.l format_error_trap_name + dc.l uninit_int_vect_trap_name + dc.l reserved_trap_name + dc.l reserved_trap_name + dc.l reserved_trap_name + dc.l reserved_trap_name + dc.l reserved_trap_name + dc.l reserved_trap_name + dc.l reserved_trap_name + dc.l reserved_trap_name + dc.l spur_interrupt_trap_name + dc.l lev_1_autovec_trap_name + dc.l lev_2_autovec_trap_name + dc.l lev_3_autovec_trap_name + dc.l lev_4_autovec_trap_name + dc.l lev_5_autovec_trap_name + dc.l lev_6_autovec_trap_name + dc.l lev_7_autovec_trap_name +address_trap_name: dc.b "Address Error",0 +ill_ins_trap_name: dc.b "Illegal Instruction",0 +zero_div_trap_name: dc.b "Zero Divide",0 +chk_ins_trap_name: dc.b "CHK Instruction",0 +trapv_trap_name: dc.b "TRAPV Instruction",0 +priv_viol_trap_name: dc.b "Privilege Violation",0 +trace_trap_name: dc.b "Trace",0 +line_1010_emu_trap_name: dc.b "Line 1010 Emulator",0 +line_1111_emu_trap_name: dc.b "Line 1111 Emulator",0 +reserved_trap_name: dc.b "Reserved",0 +format_error_trap_name: dc.b "Format Error",0 +uninit_int_vect_trap_name: dc.b "Uninitialized Interrupt Vector",0 +spur_interrupt_trap_name: dc.b "Spurious Interrupt",0 +lev_1_autovec_trap_name: dc.b "Level 1 Interrupt Autovector",0 +lev_2_autovec_trap_name: dc.b "Level 2 Interrupt Autovector",0 +lev_3_autovec_trap_name: dc.b "Level 3 Interrupt Autovector",0 +lev_4_autovec_trap_name: dc.b "Level 4 Interrupt Autovector",0 +lev_5_autovec_trap_name: dc.b "Level 5 Interrupt Autovector",0 +lev_6_autovec_trap_name: dc.b "Level 6 Interrupt Autovector",0 +lev_7_autovec_trap_name: dc.b "Level 7 Interrupt Autovector",0 + align 10 +trap_table: + dc.l generic_handler ; Reset initial SSP, should never be called + dc.l generic_handler ; Reset initial PC, should never be called + dc.l berr_handler ; Bus Error + dc.l generic_handler ; Address Error + dc.l generic_handler ; Illegal Instruction + dc.l generic_handler ; Zero Divide + dc.l generic_handler ; CHK Instruction + dc.l generic_handler ; TRAPV Instruction + dc.l generic_handler ; Privilege Violation + dc.l generic_handler ; Trace + dc.l generic_handler ; Line 1010 Emulator + dc.l generic_handler ; Line 1111 Emulator + dc.l generic_handler ; Reserved + dc.l generic_handler ; Reserved + dc.l generic_handler ; Format Error + dc.l generic_handler ; Uninitialized Interrupt Vector + rept 8 + dc.l generic_handler ; Reserved + endr + dc.l generic_handler ; Spurious Interrupt + dc.l generic_handler ; Level 1 Interrupt Autovector + dc.l generic_handler ; Level 2 Interrupt Autovector + dc.l generic_handler ; Level 3 Interrupt Autovector + dc.l generic_handler ; Level 4 Interrupt Autovector + dc.l generic_handler ; Level 5 Interrupt Autovector + dc.l generic_handler ; Level 6 Interrupt Autovector + dc.l generic_handler ; Level 7 Interrupt Autovector + rept 16 + dc.l user_trap_handler + endr + rept 16 + dc.l generic_handler ; Reserved + endr + rept 192 + dc.l int_vector_handler + endr diff --git a/traps.i b/traps.i new file mode 100644 index 0000000..5241cbe --- /dev/null +++ b/traps.i @@ -0,0 +1,5 @@ + ifnd TRAPS_I +TRAPS_I equ 1 +; Initialize trap handling + xref traps_init + endif diff --git a/vmem.68k b/vmem.68k new file mode 100644 index 0000000..056f2e3 --- /dev/null +++ b/vmem.68k @@ -0,0 +1,491 @@ + include pmem.i + include cards.i + include start.i + section .text,text +; Initialize the virtual memory manager + public vmem_init +vmem_init: + movem.l d2/d3, -(a7) + move.w #$5, d0 ; Get the pointer to the MMU card + jsr find_first_card + move.l a0, vmem_mmu_base_addr ; Save it for later use + ; Map 3 pages to dummy physical frames for use in accessing user page mappings + move.l #$FFFFC000, d0 + move.l #3, d1 + move.l #0, d2 + move.l #$2, d3 + jsr vmem_map_free_to + move.l a0, vmem_active_space_mapping_ptr ; Save the pointer for later use + ; Same as above for the secondary address space + move.l #$FFFFC000, d0 + move.l #3, d1 + move.l #0, d2 + move.l #$2, d3 + jsr vmem_map_free_to + move.l a0, vmem_secondary_space_mapping_ptr ; Save the pointer for later use + movem.l (a7)+, d2/d3 + ; Activate the kernel address space + move.l #kernel_address_space, a0 + jsr vmem_activate_addr_space + rts + +; Clears the TLB entry of the page pointed to by a0 + public vmem_clear_tlb_entry +vmem_clear_tlb_entry: + ; Write the address to the TLB clear register of the MMU card + move.l vmem_mmu_base_addr, a1 + move.l a0, ($10,a1) + rts + +; Activates the address space pointed to by a0 + public vmem_activate_addr_space +vmem_activate_addr_space: + movem.l a2, -(a7) + move.l a0, vmem_active_space_ptr ; Set the pointer to the current address space + move.l vmem_mmu_base_addr, a1 ; Get the MMU card base into a1 + move.l vmem_active_space_mapping_ptr, a2 ; Load the pointer to the active space mapping pages into a2 + move.l #2, d0 ; Loop 3 times, count -1 due to dbra looping n+1 times +vaas_loop: + move.l (a0)+, d1 ; Read the next mapping frame into d1 + movem.l d0/d1/d2/d3/a0/a1, -(a7) + ; Map the mapping page pointed to by a2 to the mapping frame just read into d1 + move.l a2, a0 + move.l d1, d0 + move.l #1, d1 + move.l #0, d2 + move.l #$2, d3 + jsr vmem_map_to + movem.l (a7)+, d0/d1/d2/d3/a0/a1 + cmp.l #0, d1 ; If the mapping frame isn't 0, mark it as active + beq.b .1 + ori.l #3, d1 +.1: + move.l d1, (a1)+ ; Write the frame to the next quarter mapping register in the MMU + adda.l #$1000, a2 ; Advance to the next mapping page + dbra d0, vaas_loop ; Loop back if there are more frames to read + movem.l (a7)+, a2 + rts + +; Sets the secondary address space + public vmem_set_secondary_addr_space +vmem_set_secondary_addr_space: + move.l a0, vmem_secondary_space_ptr ; Set the pointer to the secondary address space + move.l vmem_secondary_space_mapping_ptr, a1 ; Load the pointer to the secondary space mapping pages into a1 + move.l #2, d0 ; Loop 3 times, count -1 due to dbra looping n+1 times +vssas_loop: + move.l (a0)+, d1 ; Read the next mapping frame into d1 + movem.l d0/d1/d2/d3/a0, -(a7) + ; Map the mapping page pointed to by a1 to the mapping frame just read into d1 + move.l a1, a0 + move.l d1, d0 + move.l #1, d1 + move.l #0, d2 + move.l #$2, d3 + jsr vmem_map_to + movem.l (a7)+, d0/d1/d2/d3/a0 + adda.l #$1000, a1 ; Advance to the next mapping page + dbra d0, vssas_loop ; Loop back if there are more frames to read + rts + +; Get the pointer to the mapping entry for the page in a0 +; Pointer returned in a0 +; Address space number in d0 + public vmem_get_map_ptr +vmem_get_map_ptr: + move.l d2, -(a7) + ; Save the space # in d2 + move.l d0, d2 + ; Get the quarter number * 4096 in d0 (offset of mapping page), and the page number in the quarter * 4 in d1 + ; qnum_x4k = (addr >> 10 & 0x3000) + ; q_pnum_x4 = (addr >> 10 & 0xFFC) + move.l a0, d0 + lsr.l #8, d0 + lsr.l #2, d0 + move.l d0, d1 + andi.l #$3000, d0 + andi.l #$FFC, d1 + ; If the quarter is the kernel quarter, the mapping page is fixed + ; and we can compute the pointer w/o using the user mapping pages + cmp.l #$3000, d0 + bne.b .1 + move.l #kernel_map, a0 + bra.b .5 +.1: + ; Get the correct pointers for the space #. Active space for 0, secondary space for 1 + cmp.l #0, d2 + bne.b .2 + move.l vmem_active_space_mapping_ptr, a0 + move.l vmem_active_space_ptr, a1 + bra.b .4 +.2: + cmp.l #1, d2 + bne.b .3 + move.l vmem_secondary_space_mapping_ptr, a0 + move.l vmem_secondary_space_ptr, a1 + bra.b .4 +.3: + ; Halt if invalid space # passed + stop #2700 +.4: + ; Add the mapping page offset to the base of the mapping pages for the space + adda.l d0, a0 + ; Shift the mapping page offset to get the quarter # * 4 in d0 + lsr.l #8, d0 + lsr.l #2, d0 + ; Get the mapping frame for that quarter + move.l (a1,d0), d0 + ; Return 0 if the frame is not present + cmp.l #0, d0 + bne.b .5 + move.l #0, a0 + move.l (a7)+, d2 + rts +.5: + ; Return the mapping page address + (page # * 4) + lea.l (a0,d1), a0 + move.l (a7)+, d2 + rts + +; Unmaps the virtual page at address a0 +; Address space number in d0 + public vmem_unmap_page +vmem_unmap_page: + movem.l d0/a0, -(a7) + ; Clear the page's TLB entry + bsr.w vmem_clear_tlb_entry + movem.l (a7)+, d0/a0 + ; Set the page entry to 0 + bsr.w vmem_get_map_ptr + move.l #0, (a0) + rts + +; Sets the mapping for the virtual page at address a0 to d0 +; Address space number in d1 +vmem_set_page_mapping: + movem.l d2/a2, -(a7) + movem.l d0/d1/a0, -(a7) + ; Clear the page's TLB entry + bsr.w vmem_clear_tlb_entry + movem.l (a7)+, d0/d1/a0 + movem.l d0/d1/a0, -(a7) + ; Get the mapping pointer for the passed-in page + move.l d1, d0 + bsr.w vmem_get_map_ptr + move.l a0, a2 ; Save it in a2 + movem.l (a7)+, d0/d1/a0 + ; If there is no pointer, a mapping frame must be allocated, else write the mapping + cmpa.l #0, a2 + bne.b .4 + move.l d0, -(a7) + movem.l d1/a0, -(a7) + jsr pmem_pop_frame ; Get a physical frame to use + movem.l (a7)+, d1/a0 + ; Get the passed-in page's quarter # * 4 in d2 + move.l a0, d2 + lsr.l #8, d2 + lsr.l #8, d2 + lsr.l #4, d2 + andi.l #%1100, d2 + ; Set the new physical frame as the mapping for the passed-in page's quarter and update the specified address space + cmp.l #0, d1 + bne.b .1 + movem.l d1/a0, -(a7) + move.l vmem_active_space_ptr, a0 + move.l d0, (a0,d2) + bsr.w vmem_activate_addr_space + movem.l (a7)+, d1/a0 + bra.b .3 +.1: + cmp.l #1, d1 + bne.b .2 + movem.l d1/a0, -(a7) + move.l vmem_secondary_space_ptr, a0 + move.l d0, (a0,d2) + bsr.w vmem_set_secondary_addr_space + movem.l (a7)+, d1/a0 + bra.b .3 +.2: + ; Halt if invalid space # passed + stop #2700 +.3: + ; Get the new mapping pointer + move.l d1, d0 + bsr.w vmem_get_map_ptr + move.l a0, a2 + move.l (a7)+, d0 +.4: + move.l d0, (a2) ; Write the mapping to the mapping page + movem.l (a7)+, d2/a2 + rts + +; Sets the permission flags for the virtual page at address a0 to d0 +; Address space number in d1 +vmem_set_page_flags: + movem.l d2/a2, -(a7) + movem.l d0/d1/a0, -(a7) + ; Clear the page's TLB entry + bsr.w vmem_clear_tlb_entry + movem.l (a7)+, d0/d1/a0 + movem.l d0/d1/a0, -(a7) + move.l d1, d0 + ; Get the mapping pointer for the passed-in page + bsr.w vmem_get_map_ptr + move.l a0, a2 + movem.l (a7)+, d0/d1/a1 + ; If there is no pointer, return + cmpa.l #0, a2 + beq.b .1 + move.l (a2), d1 ; Read the old entry + andi.l #(~$FFE), d1 ; Clear the permission flags on the old entry + or.l d0, d1 ; Set the new permission flags + move.l d1, (a2) ; Write the entry +.1: + movem.l (a7)+, d2/a2 + rts + +; Maps the virtual page at address a0 to the physical frame at address d0 +; d0 must have none of its lower 12 bits set +; Address space number in d1 +; Permission flags in d2 +vmem_map_page_to: + ; Set the passed-in permission flags and the present flag on the passed-in page number + or.l d2, d0 + or.l #1, d0 + bra.w vmem_set_page_mapping + +; Maps the virtual page at address a0 to a free physical frame +; Address space number in d0 +; Permission flags in d1 +vmem_map_page: + ; Separate the register pushes to allow popping them directly + ; into the right registers for vmem_map_page_to's arguments + move.l d2, -(a7) + move.l a0, -(a7) + move.l d0, -(a7) + move.l d1, -(a7) + jsr pmem_pop_frame + movem.l (a7)+, d2 + movem.l (a7)+, d1 + movem.l (a7)+, a0 + bsr.w vmem_map_page_to + move.l (a7)+, d2 + rts + +; Unmaps the range of virtual pages at address a0 with length d0 +; Address space number in d1 + public vmem_unmap +vmem_unmap: + subi.l #1, d0 ; Subtract 1 to account for the extra loop done by dbra +vmem_um_loop: + movem.l d0/d1/a0, -(a7) + move.l d1, d0 + bsr.w vmem_unmap_page + movem.l (a7)+, d0/d1/a0 + adda.l #$1000, a0 + dbra d0, vmem_um_loop + rts + +; Sets the permission flags of the range of virtual pages starting at address a0 with length d1 to d0 +; Address space number in d2 + public vmem_set_flags +vmem_set_flags: + subi.l #1, d1 ; Subtract 1 to account for the extra loop done by dbra +vmem_sf_loop: + movem.l d0/d1/a0, -(a7) + move.l d2, d1 + bsr.w vmem_set_page_flags + movem.l (a7)+, d0/d1/a0 + adda.l #$1000, a0 + dbra d1, vmem_sf_loop + rts + +; Maps the range of virtual pages starting at address a0 with length d1 to the range of physical frames starting at d0 +; d0 must have none of its lower 12 bits set +; Address space number in d2 +; Permission flags in d3 + public vmem_map_to +vmem_map_to: + subi.l #1, d1 ; Subtract 1 to account for the extra loop done by dbra +vmem_mt_loop: + movem.l d0/d1/d2/a0, -(a7) + move.l d2, d1 + move.l d3, d2 + bsr.w vmem_map_page_to + movem.l (a7)+, d0/d1/d2/a0 + adda.l #$1000, a0 + add.l #$1000, d0 + dbra d1, vmem_mt_loop + rts + +; Maps the range of virtual pages starting at address a0 with length d0 to free physical frames +; Address space number in d1 +; Permission flags in d2 + public vmem_map +vmem_map: + subi.l #1, d0 ; Subtract 1 to account for the extra loop done by dbra +vmem_m_loop: + movem.l d0/d1/a0, -(a7) + move.l d1, d0 + move.l d2, d1 + bsr.w vmem_map_page + movem.l (a7)+, d0/d1/a0 + adda.l #$1000, a0 + dbra d0, vmem_m_loop + rts + +; Maps a free range of virtual pages with length d0 to free physical frames +; Returns the range start in a0 +; Address space number in d1 +; Permission flags in d2 + public vmem_map_free +vmem_map_free: + movem.l d0/d1/d3, -(a7) + move.l d1, d3 + ; Use bit 2 of the address space # to choose between getting free user or kernel + ; pages as only 0 and 1 are valid address space #s, requiring only bit 0. + andi.l #$2, d3 + beq.b .1 + bsr.w vmem_get_free_user_pages + bra.b .2 +.1: + bsr.b vmem_get_free_kernel_pages +.2: + movem.l (a7)+, d0/d1/d3 + ; Clear bit 2 to make the passed-in address space # valid for the rest of the VMM code + andi.l #$1, d1 + move.l a0, -(a7) + bsr.b vmem_map + move.l (a7)+, a0 + rts + +; Maps a free range of virtual pages with length d1 to the range of physical frames starting at d0 +; Returns the range start in a0 +; Address space number in d2 +; Permission flags in d3 + public vmem_map_free_to +vmem_map_free_to: + movem.l d0/d1/d3, -(a7) + move.l d1, d0 + move.l d2, d3 + ; Use bit 2 of the address space # to choose between getting free user or kernel + ; pages as only 0 and 1 are valid address space #s, requiring only bit 0. + andi.l #$2, d3 + beq.b .1 + bsr.w vmem_get_free_user_pages + bra.b .2 +.1: + bsr.b vmem_get_free_kernel_pages +.2: + movem.l (a7)+, d0/d1/d3 + ; Clear bit 2 to make the passed-in address space # valid for the rest of the VMM code + andi.l #$1, d2 + move.l a0, -(a7) + bsr.w vmem_map_to + move.l (a7)+, a0 + rts + +; Copies the range of page mappings at address a0 in the primary space with length d0 to the secondary space starting at address a1 + public vmem_copy_to_secondary +vmem_copy_to_secondary: + move.l d0, -(a7) + ; Get the mapping pointer for the start page in the primary address space + move.l #0, d0 + bsr.w vmem_get_map_ptr + move.l (a7)+, d0 + subi.l #1, d0 ; Subtract 1 to account for the extra loop done by dbra +vmem_cts_loop: + ; Read the next page mapping and increment the pointer + move.l (a0)+, d1 + movem.l d0/a0/a1, -(a7) + ; Set the same mapping in the secondary address space at address a1 + move.l d1, d0 + move.l #1, d1 + move.l a1, a0 + bsr.w vmem_set_page_mapping + movem.l (a7)+, d0/a0/a1 + adda.l #$1000, a1 + dbra d0, vmem_cts_loop + rts + +; Get a range of free kernel pages with length in d0 and return its start in a0 + public vmem_get_free_kernel_pages +vmem_get_free_kernel_pages: + move.l d2, -(a7) + move.l d0, d1 ; Set the remaining page # count to the length of the requested range + move.l #$C00000, a0 ; Put the start page of the search in a0 +vmem_gfkp_loop: + movem.l d0/d1/a0, -(a7) + move.l #0, d0 ; Get the pointer for the current page + bsr.w vmem_get_map_ptr + ; If NULL, load dummy free mapping, otherwise read page mapping into d2 + cmpa.l #0, a0 + beq.b .1 + move.l (a0), d2 + bra.b .2 +.1: + move.l #0, d2 +.2: + movem.l (a7)+, d0/d1/a0 + btst #0, d2 ; If the page is not free, skip it + beq.b .3 + subi.l #1, d1 + bne.b .4 ; If we have not found a run of pages of the right length, continue to check for free pages + adda.l #$1000, a0 ; Start address is (current_page + $1000 - (length * $1000)), compute this in d0 and return + lsl.l #8, d0 + lsl.l #4, d0 + suba.l d0, a0 + move.l (a7)+, d2 + rts +.3: + move.l d0, d1 ; Page not free, reset the page # count and try again +.4: + adda.l #$1000, a0 ; Move to the next page and continue the search + bra.b vmem_gfkp_loop + +; Get a range of free user pages with length in d0 and return its start in a0 + public vmem_get_free_user_pages +vmem_get_free_user_pages: + move.l d2, -(a7) + move.l d0, d1 ; Set the remaining page # count to the length of the requested range + move.l #$1000, a0 ; Put the start page of the search in a0 +vmem_gfup_loop: + movem.l d0/d1/a0, -(a7) + move.l #0, d0 ; Get the pointer for the current page + bsr.w vmem_get_map_ptr + ; If NULL, load dummy free mapping, otherwise read page mapping into d2 + cmpa.l #0, a0 + beq.b .1 + move.l (a0), d2 + bra.b .2 +.1: + move.l #0, d2 +.2: + movem.l (a7)+, d0/d1/a0 + btst #0, d2 ; If the page is not free, skip it + beq.b .3 + subi.l #1, d1 + bne.b .4 ; If we have not found a run of pages of the right length, continue to check for free pages + adda.l #$1000, a0 ; Start address is (current_page + $1000 - (length * $1000)), compute this in d0 and return + lsl.l #8, d0 + lsl.l #4, d0 + suba.l d0, a0 + move.l (a7)+, d2 + rts +.3: + move.l d0, d1 ; Page not free, reset the page # count and try again +.4: + adda.l #$1000, a0 ; Move to the next page and continue the search + bra.b vmem_gfup_loop + + section .data,data + public kernel_address_space +kernel_address_space: + dc.l $0, $0, $0, kernel_map - $C00000 +vmem_active_space_ptr: dc.l kernel_address_space + + section .bss,bss + public vmem_mmu_base_addr +vmem_mmu_base_addr: ds.l 1 +vmem_secondary_space_ptr: ds.l 1 +vmem_active_space_mapping_ptr: ds.l 1 +vmem_secondary_space_mapping_ptr: ds.l 1 diff --git a/vmem.i b/vmem.i new file mode 100644 index 0000000..00b63e4 --- /dev/null +++ b/vmem.i @@ -0,0 +1,51 @@ + ifnd VMEM_I +VMEM_I equ 1 +; Initialize the virtual memory manager + xref vmem_init +; Clears the TLB entry of the page pointed to by a0 + xref vmem_clear_tlb_entry +; Activates the address space pointed to by a0 + xref vmem_activate_addr_space +; Sets the secondary address space + xref vmem_set_secondary_addr_space +; Get the pointer to the mapping entry for the page in a0 +; Pointer returned in a0 +; Address space number in d0 + xref vmem_get_map_ptr +; Unmaps the virtual page at address a0 +; Address space number in d0 + xref vmem_unmap_page +; Unmaps the range of virtual pages at address a0 with length d0 +; Address space number in d1 + xref vmem_unmap +; Sets the permission flags of the range of virtual pages starting at address a0 with length d1 to d0 +; Address space number in d2 + xref vmem_set_flags +; Maps the range of virtual pages starting at address a0 with length d1 to the range of physical frames starting at d0 +; d0 must have none of its lower 12 bits set +; Address space number in d2 +; Permission flags in d3 + xref vmem_map_to +; Maps the range of virtual pages starting at address a0 with length d0 to free physical frames +; Address space number in d1 +; Permission flags in d2 + xref vmem_map +; Maps a free range of virtual pages with length d0 to free physical frames +; Returns the range start in a0 +; Address space number in d1 +; Permission flags in d2 + xref vmem_map_free +; Maps a free range of virtual pages with length d1 to the range of physical frames starting at d0 +; Returns the range start in a0 +; Address space number in d2 +; Permission flags in d3 + xref vmem_map_free_to +; Copies the range of page mappings at address a0 in the primary space with length d0 to the secondary space starting at address a1 + xref vmem_copy_to_secondary +; Get a range of free kernel pages with length in d0 and return its start in a0 + xref vmem_get_free_kernel_pages +; Get a range of free user pages with length in d0 and return its start in a0 + xref vmem_get_free_user_pages + xref kernel_address_space + xref vmem_mmu_base_addr + endif