kernel/vmem.68k

492 lines
14 KiB
Plaintext
Raw Permalink Normal View History

2024-03-19 09:23:45 -05:00
include pmem.i
include cards.i
include start.i
section .text,text
; Initialize the virtual memory manager
public vmem_init
vmem_init:
movem.l d2/d3, -(a7)
move.w #$5, d0 ; Get the pointer to the MMU card
jsr find_first_card
move.l a0, vmem_mmu_base_addr ; Save it for later use
; Map 3 pages to dummy physical frames for use in accessing user page mappings
move.l #$FFFFC000, d0
move.l #3, d1
move.l #0, d2
move.l #$2, d3
jsr vmem_map_free_to
move.l a0, vmem_active_space_mapping_ptr ; Save the pointer for later use
; Same as above for the secondary address space
move.l #$FFFFC000, d0
move.l #3, d1
move.l #0, d2
move.l #$2, d3
jsr vmem_map_free_to
move.l a0, vmem_secondary_space_mapping_ptr ; Save the pointer for later use
movem.l (a7)+, d2/d3
; Activate the kernel address space
move.l #kernel_address_space, a0
jsr vmem_activate_addr_space
rts
; Clears the TLB entry of the page pointed to by a0
public vmem_clear_tlb_entry
vmem_clear_tlb_entry:
; Write the address to the TLB clear register of the MMU card
move.l vmem_mmu_base_addr, a1
move.l a0, ($10,a1)
rts
; Activates the address space pointed to by a0
public vmem_activate_addr_space
vmem_activate_addr_space:
movem.l a2, -(a7)
move.l a0, vmem_active_space_ptr ; Set the pointer to the current address space
move.l vmem_mmu_base_addr, a1 ; Get the MMU card base into a1
move.l vmem_active_space_mapping_ptr, a2 ; Load the pointer to the active space mapping pages into a2
move.l #2, d0 ; Loop 3 times, count -1 due to dbra looping n+1 times
vaas_loop:
move.l (a0)+, d1 ; Read the next mapping frame into d1
movem.l d0/d1/d2/d3/a0/a1, -(a7)
; Map the mapping page pointed to by a2 to the mapping frame just read into d1
move.l a2, a0
move.l d1, d0
move.l #1, d1
move.l #0, d2
move.l #$2, d3
jsr vmem_map_to
movem.l (a7)+, d0/d1/d2/d3/a0/a1
cmp.l #0, d1 ; If the mapping frame isn't 0, mark it as active
beq.b .1
ori.l #3, d1
.1:
move.l d1, (a1)+ ; Write the frame to the next quarter mapping register in the MMU
adda.l #$1000, a2 ; Advance to the next mapping page
dbra d0, vaas_loop ; Loop back if there are more frames to read
movem.l (a7)+, a2
rts
; Sets the secondary address space
public vmem_set_secondary_addr_space
vmem_set_secondary_addr_space:
move.l a0, vmem_secondary_space_ptr ; Set the pointer to the secondary address space
move.l vmem_secondary_space_mapping_ptr, a1 ; Load the pointer to the secondary space mapping pages into a1
move.l #2, d0 ; Loop 3 times, count -1 due to dbra looping n+1 times
vssas_loop:
move.l (a0)+, d1 ; Read the next mapping frame into d1
movem.l d0/d1/d2/d3/a0, -(a7)
; Map the mapping page pointed to by a1 to the mapping frame just read into d1
move.l a1, a0
move.l d1, d0
move.l #1, d1
move.l #0, d2
move.l #$2, d3
jsr vmem_map_to
movem.l (a7)+, d0/d1/d2/d3/a0
adda.l #$1000, a1 ; Advance to the next mapping page
dbra d0, vssas_loop ; Loop back if there are more frames to read
rts
; Get the pointer to the mapping entry for the page in a0
; Pointer returned in a0
; Address space number in d0
public vmem_get_map_ptr
vmem_get_map_ptr:
move.l d2, -(a7)
; Save the space # in d2
move.l d0, d2
; Get the quarter number * 4096 in d0 (offset of mapping page), and the page number in the quarter * 4 in d1
; qnum_x4k = (addr >> 10 & 0x3000)
; q_pnum_x4 = (addr >> 10 & 0xFFC)
move.l a0, d0
lsr.l #8, d0
lsr.l #2, d0
move.l d0, d1
andi.l #$3000, d0
andi.l #$FFC, d1
; If the quarter is the kernel quarter, the mapping page is fixed
; and we can compute the pointer w/o using the user mapping pages
cmp.l #$3000, d0
bne.b .1
move.l #kernel_map, a0
bra.b .5
.1:
; Get the correct pointers for the space #. Active space for 0, secondary space for 1
cmp.l #0, d2
bne.b .2
move.l vmem_active_space_mapping_ptr, a0
move.l vmem_active_space_ptr, a1
bra.b .4
.2:
cmp.l #1, d2
bne.b .3
move.l vmem_secondary_space_mapping_ptr, a0
move.l vmem_secondary_space_ptr, a1
bra.b .4
.3:
; Halt if invalid space # passed
stop #2700
.4:
; Add the mapping page offset to the base of the mapping pages for the space
adda.l d0, a0
; Shift the mapping page offset to get the quarter # * 4 in d0
lsr.l #8, d0
lsr.l #2, d0
; Get the mapping frame for that quarter
move.l (a1,d0), d0
; Return 0 if the frame is not present
cmp.l #0, d0
bne.b .5
move.l #0, a0
move.l (a7)+, d2
rts
.5:
; Return the mapping page address + (page # * 4)
lea.l (a0,d1), a0
move.l (a7)+, d2
rts
; Unmaps the virtual page at address a0
; Address space number in d0
public vmem_unmap_page
vmem_unmap_page:
movem.l d0/a0, -(a7)
; Clear the page's TLB entry
bsr.w vmem_clear_tlb_entry
movem.l (a7)+, d0/a0
; Set the page entry to 0
bsr.w vmem_get_map_ptr
move.l #0, (a0)
rts
; Sets the mapping for the virtual page at address a0 to d0
; Address space number in d1
vmem_set_page_mapping:
movem.l d2/a2, -(a7)
movem.l d0/d1/a0, -(a7)
; Clear the page's TLB entry
bsr.w vmem_clear_tlb_entry
movem.l (a7)+, d0/d1/a0
movem.l d0/d1/a0, -(a7)
; Get the mapping pointer for the passed-in page
move.l d1, d0
bsr.w vmem_get_map_ptr
move.l a0, a2 ; Save it in a2
movem.l (a7)+, d0/d1/a0
; If there is no pointer, a mapping frame must be allocated, else write the mapping
cmpa.l #0, a2
bne.b .4
move.l d0, -(a7)
movem.l d1/a0, -(a7)
jsr pmem_pop_frame ; Get a physical frame to use
movem.l (a7)+, d1/a0
; Get the passed-in page's quarter # * 4 in d2
move.l a0, d2
lsr.l #8, d2
lsr.l #8, d2
lsr.l #4, d2
andi.l #%1100, d2
; Set the new physical frame as the mapping for the passed-in page's quarter and update the specified address space
cmp.l #0, d1
bne.b .1
movem.l d1/a0, -(a7)
move.l vmem_active_space_ptr, a0
move.l d0, (a0,d2)
bsr.w vmem_activate_addr_space
movem.l (a7)+, d1/a0
bra.b .3
.1:
cmp.l #1, d1
bne.b .2
movem.l d1/a0, -(a7)
move.l vmem_secondary_space_ptr, a0
move.l d0, (a0,d2)
bsr.w vmem_set_secondary_addr_space
movem.l (a7)+, d1/a0
bra.b .3
.2:
; Halt if invalid space # passed
stop #2700
.3:
; Get the new mapping pointer
move.l d1, d0
bsr.w vmem_get_map_ptr
move.l a0, a2
move.l (a7)+, d0
.4:
move.l d0, (a2) ; Write the mapping to the mapping page
movem.l (a7)+, d2/a2
rts
; Sets the permission flags for the virtual page at address a0 to d0
; Address space number in d1
vmem_set_page_flags:
movem.l d2/a2, -(a7)
movem.l d0/d1/a0, -(a7)
; Clear the page's TLB entry
bsr.w vmem_clear_tlb_entry
movem.l (a7)+, d0/d1/a0
movem.l d0/d1/a0, -(a7)
move.l d1, d0
; Get the mapping pointer for the passed-in page
bsr.w vmem_get_map_ptr
move.l a0, a2
movem.l (a7)+, d0/d1/a1
; If there is no pointer, return
cmpa.l #0, a2
beq.b .1
move.l (a2), d1 ; Read the old entry
andi.l #(~$FFE), d1 ; Clear the permission flags on the old entry
or.l d0, d1 ; Set the new permission flags
move.l d1, (a2) ; Write the entry
.1:
movem.l (a7)+, d2/a2
rts
; Maps the virtual page at address a0 to the physical frame at address d0
; d0 must have none of its lower 12 bits set
; Address space number in d1
; Permission flags in d2
vmem_map_page_to:
; Set the passed-in permission flags and the present flag on the passed-in page number
or.l d2, d0
or.l #1, d0
bra.w vmem_set_page_mapping
; Maps the virtual page at address a0 to a free physical frame
; Address space number in d0
; Permission flags in d1
vmem_map_page:
; Separate the register pushes to allow popping them directly
; into the right registers for vmem_map_page_to's arguments
move.l d2, -(a7)
move.l a0, -(a7)
move.l d0, -(a7)
move.l d1, -(a7)
jsr pmem_pop_frame
movem.l (a7)+, d2
movem.l (a7)+, d1
movem.l (a7)+, a0
bsr.w vmem_map_page_to
move.l (a7)+, d2
rts
; Unmaps the range of virtual pages at address a0 with length d0
; Address space number in d1
public vmem_unmap
vmem_unmap:
subi.l #1, d0 ; Subtract 1 to account for the extra loop done by dbra
vmem_um_loop:
movem.l d0/d1/a0, -(a7)
move.l d1, d0
bsr.w vmem_unmap_page
movem.l (a7)+, d0/d1/a0
adda.l #$1000, a0
dbra d0, vmem_um_loop
rts
; Sets the permission flags of the range of virtual pages starting at address a0 with length d1 to d0
; Address space number in d2
public vmem_set_flags
vmem_set_flags:
subi.l #1, d1 ; Subtract 1 to account for the extra loop done by dbra
vmem_sf_loop:
movem.l d0/d1/a0, -(a7)
move.l d2, d1
bsr.w vmem_set_page_flags
movem.l (a7)+, d0/d1/a0
adda.l #$1000, a0
dbra d1, vmem_sf_loop
rts
; Maps the range of virtual pages starting at address a0 with length d1 to the range of physical frames starting at d0
; d0 must have none of its lower 12 bits set
; Address space number in d2
; Permission flags in d3
public vmem_map_to
vmem_map_to:
subi.l #1, d1 ; Subtract 1 to account for the extra loop done by dbra
vmem_mt_loop:
movem.l d0/d1/d2/a0, -(a7)
move.l d2, d1
move.l d3, d2
bsr.w vmem_map_page_to
movem.l (a7)+, d0/d1/d2/a0
adda.l #$1000, a0
add.l #$1000, d0
dbra d1, vmem_mt_loop
rts
; Maps the range of virtual pages starting at address a0 with length d0 to free physical frames
; Address space number in d1
; Permission flags in d2
public vmem_map
vmem_map:
subi.l #1, d0 ; Subtract 1 to account for the extra loop done by dbra
vmem_m_loop:
movem.l d0/d1/a0, -(a7)
move.l d1, d0
move.l d2, d1
bsr.w vmem_map_page
movem.l (a7)+, d0/d1/a0
adda.l #$1000, a0
dbra d0, vmem_m_loop
rts
; Maps a free range of virtual pages with length d0 to free physical frames
; Returns the range start in a0
; Address space number in d1
; Permission flags in d2
public vmem_map_free
vmem_map_free:
movem.l d0/d1/d3, -(a7)
move.l d1, d3
; Use bit 2 of the address space # to choose between getting free user or kernel
; pages as only 0 and 1 are valid address space #s, requiring only bit 0.
andi.l #$2, d3
beq.b .1
bsr.w vmem_get_free_user_pages
bra.b .2
.1:
bsr.b vmem_get_free_kernel_pages
.2:
movem.l (a7)+, d0/d1/d3
; Clear bit 2 to make the passed-in address space # valid for the rest of the VMM code
andi.l #$1, d1
move.l a0, -(a7)
bsr.b vmem_map
move.l (a7)+, a0
rts
; Maps a free range of virtual pages with length d1 to the range of physical frames starting at d0
; Returns the range start in a0
; Address space number in d2
; Permission flags in d3
public vmem_map_free_to
vmem_map_free_to:
movem.l d0/d1/d3, -(a7)
move.l d1, d0
move.l d2, d3
; Use bit 2 of the address space # to choose between getting free user or kernel
; pages as only 0 and 1 are valid address space #s, requiring only bit 0.
andi.l #$2, d3
beq.b .1
bsr.w vmem_get_free_user_pages
bra.b .2
.1:
bsr.b vmem_get_free_kernel_pages
.2:
movem.l (a7)+, d0/d1/d3
; Clear bit 2 to make the passed-in address space # valid for the rest of the VMM code
andi.l #$1, d2
move.l a0, -(a7)
bsr.w vmem_map_to
move.l (a7)+, a0
rts
; Copies the range of page mappings at address a0 in the primary space with length d0 to the secondary space starting at address a1
public vmem_copy_to_secondary
vmem_copy_to_secondary:
2024-03-19 13:38:41 -05:00
movem.l d0/a1, -(a7)
2024-03-19 09:23:45 -05:00
; Get the mapping pointer for the start page in the primary address space
move.l #0, d0
bsr.w vmem_get_map_ptr
2024-03-19 13:38:41 -05:00
movem.l (a7)+, d0/a1
2024-03-19 09:23:45 -05:00
subi.l #1, d0 ; Subtract 1 to account for the extra loop done by dbra
vmem_cts_loop:
; Read the next page mapping and increment the pointer
move.l (a0)+, d1
movem.l d0/a0/a1, -(a7)
; Set the same mapping in the secondary address space at address a1
move.l d1, d0
move.l #1, d1
move.l a1, a0
bsr.w vmem_set_page_mapping
movem.l (a7)+, d0/a0/a1
adda.l #$1000, a1
dbra d0, vmem_cts_loop
rts
; Get a range of free kernel pages with length in d0 and return its start in a0
public vmem_get_free_kernel_pages
vmem_get_free_kernel_pages:
move.l d2, -(a7)
move.l d0, d1 ; Set the remaining page # count to the length of the requested range
move.l #$C00000, a0 ; Put the start page of the search in a0
vmem_gfkp_loop:
movem.l d0/d1/a0, -(a7)
move.l #0, d0 ; Get the pointer for the current page
bsr.w vmem_get_map_ptr
; If NULL, load dummy free mapping, otherwise read page mapping into d2
cmpa.l #0, a0
beq.b .1
move.l (a0), d2
bra.b .2
.1:
move.l #0, d2
.2:
movem.l (a7)+, d0/d1/a0
btst #0, d2 ; If the page is not free, skip it
beq.b .3
subi.l #1, d1
bne.b .4 ; If we have not found a run of pages of the right length, continue to check for free pages
adda.l #$1000, a0 ; Start address is (current_page + $1000 - (length * $1000)), compute this in d0 and return
lsl.l #8, d0
lsl.l #4, d0
suba.l d0, a0
move.l (a7)+, d2
rts
.3:
move.l d0, d1 ; Page not free, reset the page # count and try again
.4:
adda.l #$1000, a0 ; Move to the next page and continue the search
bra.b vmem_gfkp_loop
; Get a range of free user pages with length in d0 and return its start in a0
public vmem_get_free_user_pages
vmem_get_free_user_pages:
move.l d2, -(a7)
move.l d0, d1 ; Set the remaining page # count to the length of the requested range
move.l #$1000, a0 ; Put the start page of the search in a0
vmem_gfup_loop:
movem.l d0/d1/a0, -(a7)
move.l #0, d0 ; Get the pointer for the current page
bsr.w vmem_get_map_ptr
; If NULL, load dummy free mapping, otherwise read page mapping into d2
cmpa.l #0, a0
beq.b .1
move.l (a0), d2
bra.b .2
.1:
move.l #0, d2
.2:
movem.l (a7)+, d0/d1/a0
btst #0, d2 ; If the page is not free, skip it
beq.b .3
subi.l #1, d1
bne.b .4 ; If we have not found a run of pages of the right length, continue to check for free pages
adda.l #$1000, a0 ; Start address is (current_page + $1000 - (length * $1000)), compute this in d0 and return
lsl.l #8, d0
lsl.l #4, d0
suba.l d0, a0
move.l (a7)+, d2
rts
.3:
move.l d0, d1 ; Page not free, reset the page # count and try again
.4:
adda.l #$1000, a0 ; Move to the next page and continue the search
bra.b vmem_gfup_loop
section .data,data
public kernel_address_space
kernel_address_space:
dc.l $0, $0, $0, kernel_map - $C00000
vmem_active_space_ptr: dc.l kernel_address_space
section .bss,bss
public vmem_mmu_base_addr
vmem_mmu_base_addr: ds.l 1
vmem_secondary_space_ptr: ds.l 1
vmem_active_space_mapping_ptr: ds.l 1
vmem_secondary_space_mapping_ptr: ds.l 1