nuclear@17: #include nuclear@17: #include nuclear@17: #include nuclear@17: #include "vm.h" nuclear@17: #include nuclear@17: #include "intr.h" nuclear@17: #include "mem.h" nuclear@17: #include "panic.h" nuclear@17: nuclear@17: nuclear@17: #define KMEM_START 0xc0000000 nuclear@17: #define IDMAP_START 0xa0000 nuclear@17: nuclear@24: #define PGDIR_ADDR 0xfffff000 nuclear@24: #define PGTBL_BASE (0xffffffff - 4096 * 1024 + 1) nuclear@24: #define PGTBL(x) ((uint32_t*)(PGTBL_BASE + PGSIZE * (x))) nuclear@24: nuclear@17: #define ATTR_PGDIR_MASK 0x3f nuclear@17: #define ATTR_PGTBL_MASK 0x1ff nuclear@17: #define ADDR_PGENT_MASK 0xfffff000 nuclear@17: nuclear@17: #define PAGEFAULT 14 nuclear@17: nuclear@22: nuclear@22: struct page_range { nuclear@22: int start, end; nuclear@22: struct page_range *next; nuclear@22: }; nuclear@22: nuclear@22: /* defined in vm-asm.S */ nuclear@22: void enable_paging(void); nuclear@23: void disable_paging(void); nuclear@23: int get_paging_status(void); nuclear@22: void set_pgdir_addr(uint32_t addr); nuclear@23: void flush_tlb(void); nuclear@23: void flush_tlb_addr(uint32_t addr); nuclear@23: #define flush_tlb_page(p) flush_tlb_addr(PAGE_TO_ADDR(p)) nuclear@22: uint32_t get_fault_addr(void); nuclear@22: nuclear@23: static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high); nuclear@22: static void pgfault(int inum, uint32_t err); nuclear@22: static struct page_range *alloc_node(void); nuclear@22: static void free_node(struct page_range *node); nuclear@22: nuclear@22: /* page directory */ nuclear@22: static uint32_t *pgdir; nuclear@22: nuclear@22: /* 2 lists of free ranges, for kernel memory and user memory */ nuclear@22: static struct page_range *pglist[2]; nuclear@22: /* list of free page_range structures to be used in the lists */ nuclear@22: static struct page_range *node_pool; nuclear@23: /* the first page range for the whole kernel address space, to get things started */ nuclear@23: static struct page_range first_node; nuclear@22: nuclear@22: nuclear@17: void init_vm(struct mboot_info *mb) nuclear@17: { nuclear@19: uint32_t idmap_end; nuclear@19: nuclear@23: /* initialize the physical memory map and allocator */ nuclear@17: init_mem(mb); nuclear@17: nuclear@23: /* setup the page tables */ nuclear@18: pgdir = (uint32_t*)alloc_phys_page(); nuclear@23: memset(pgdir, 0, PGSIZE); nuclear@24: set_pgdir_addr((uint32_t)pgdir); nuclear@17: nuclear@17: /* map the video memory and kernel code 1-1 */ nuclear@19: get_kernel_mem_range(0, &idmap_end); nuclear@19: map_mem_range(IDMAP_START, idmap_end - IDMAP_START, IDMAP_START, 0); nuclear@17: nuclear@24: /* make the last page directory entry point to the page directory */ nuclear@24: pgdir[1023] = ((uint32_t)pgdir & ADDR_PGENT_MASK) | PG_PRESENT; nuclear@24: pgdir = (uint32_t*)PGDIR_ADDR; nuclear@24: nuclear@23: /* set the page fault handler */ nuclear@17: interrupt(PAGEFAULT, pgfault); nuclear@17: nuclear@23: /* we can enable paging now */ nuclear@17: enable_paging(); nuclear@23: nuclear@23: /* initialize the virtual page allocator */ nuclear@23: node_pool = 0; nuclear@23: nuclear@23: first_node.start = ADDR_TO_PAGE(KMEM_START); nuclear@23: first_node.end = PAGE_COUNT; nuclear@23: first_node.next = 0; nuclear@23: pglist[MEM_KERNEL] = &first_node; nuclear@23: nuclear@23: pglist[MEM_USER] = alloc_node(); nuclear@23: pglist[MEM_USER]->start = 0; nuclear@23: pglist[MEM_USER]->end = ADDR_TO_PAGE(KMEM_START); nuclear@23: pglist[MEM_USER]->next = 0; nuclear@17: } nuclear@17: nuclear@23: /* if ppage == -1 we allocate a physical page by calling alloc_phys_page */ nuclear@23: int map_page(int vpage, int ppage, unsigned int attr) nuclear@17: { nuclear@17: uint32_t *pgtbl; nuclear@25: int diridx, pgidx, pgon, intr_state; nuclear@25: nuclear@25: intr_state = get_intr_state(); nuclear@25: disable_intr(); nuclear@23: nuclear@23: pgon = get_paging_status(); nuclear@23: nuclear@23: if(ppage < 0) { nuclear@23: uint32_t addr = alloc_phys_page(); nuclear@23: if(!addr) { nuclear@25: set_intr_state(intr_state); nuclear@23: return -1; nuclear@23: } nuclear@23: ppage = ADDR_TO_PAGE(addr); nuclear@23: } nuclear@23: nuclear@23: diridx = PAGE_TO_PGTBL(vpage); nuclear@23: pgidx = PAGE_TO_PGTBL_PG(vpage); nuclear@17: nuclear@17: if(!(pgdir[diridx] & PG_PRESENT)) { nuclear@17: uint32_t addr = alloc_phys_page(); nuclear@24: pgdir[diridx] = addr | (attr & ATTR_PGDIR_MASK) | PG_PRESENT; nuclear@24: nuclear@24: pgtbl = pgon ? PGTBL(diridx) : (uint32_t*)addr; nuclear@18: memset(pgtbl, 0, PGSIZE); nuclear@17: } else { nuclear@24: if(pgon) { nuclear@24: pgtbl = PGTBL(diridx); nuclear@24: } else { nuclear@24: pgtbl = (uint32_t*)(pgdir[diridx] & ADDR_PGENT_MASK); nuclear@24: } nuclear@17: } nuclear@17: nuclear@17: pgtbl[pgidx] = PAGE_TO_ADDR(ppage) | (attr & ATTR_PGTBL_MASK) | PG_PRESENT; nuclear@23: flush_tlb_page(vpage); nuclear@23: nuclear@25: set_intr_state(intr_state); nuclear@23: return 0; nuclear@17: } nuclear@17: nuclear@17: void unmap_page(int vpage) nuclear@17: { nuclear@17: uint32_t *pgtbl; nuclear@17: int diridx = PAGE_TO_PGTBL(vpage); nuclear@17: int pgidx = PAGE_TO_PGTBL_PG(vpage); nuclear@17: nuclear@25: int intr_state = get_intr_state(); nuclear@25: disable_intr(); nuclear@25: nuclear@17: if(!(pgdir[diridx] & PG_PRESENT)) { nuclear@17: goto err; nuclear@17: } nuclear@17: pgtbl = (uint32_t*)(pgdir[diridx] & ADDR_PGENT_MASK); nuclear@17: nuclear@17: if(!(pgtbl[pgidx] & PG_PRESENT)) { nuclear@17: goto err; nuclear@17: } nuclear@17: pgtbl[pgidx] = 0; nuclear@23: flush_tlb_page(vpage); nuclear@17: nuclear@25: if(0) { nuclear@17: err: nuclear@25: printf("unmap_page(%d): page already not mapped\n", vpage); nuclear@25: } nuclear@25: set_intr_state(intr_state); nuclear@17: } nuclear@17: nuclear@22: /* if ppg_start is -1, we allocate physical pages to map with alloc_phys_page() */ nuclear@23: int map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr) nuclear@17: { nuclear@23: int i, phys_pg; nuclear@23: uint32_t paddr; nuclear@17: nuclear@17: for(i=0; i 0 ? ADDR_TO_PAGE(paddr) : -1; nuclear@17: num_pages = ADDR_TO_PAGE(sz) + 1; nuclear@17: nuclear@23: return map_page_range(vpg_start, num_pages, ppg_start, attr); nuclear@17: } nuclear@17: nuclear@18: uint32_t virt_to_phys(uint32_t vaddr) nuclear@18: { nuclear@18: uint32_t pgaddr, *pgtbl; nuclear@18: int diridx = ADDR_TO_PGTBL(vaddr); nuclear@18: int pgidx = ADDR_TO_PGTBL_PG(vaddr); nuclear@18: nuclear@18: if(!(pgdir[diridx] & PG_PRESENT)) { nuclear@18: panic("virt_to_phys(%x): page table %d not present\n", vaddr, diridx); nuclear@18: } nuclear@18: pgtbl = (uint32_t*)(pgdir[diridx] & PGENT_ADDR_MASK); nuclear@18: nuclear@18: if(!(pgtbl[pgidx] & PG_PRESENT)) { nuclear@18: panic("virt_to_phys(%x): page %d not present\n", vaddr, ADDR_TO_PAGE(vaddr)); nuclear@18: } nuclear@18: pgaddr = pgtbl[pgidx] & PGENT_ADDR_MASK; nuclear@18: nuclear@18: return pgaddr | ADDR_TO_PGOFFS(vaddr); nuclear@18: } nuclear@18: nuclear@22: /* allocate a contiguous block of virtual memory pages along with nuclear@22: * backing physical memory for them, and update the page table. nuclear@22: */ nuclear@22: int pgalloc(int num, int area) nuclear@22: { nuclear@25: int intr_state, ret = -1; nuclear@22: struct page_range *node, *prev, dummy; nuclear@22: nuclear@25: intr_state = get_intr_state(); nuclear@25: disable_intr(); nuclear@25: nuclear@22: dummy.next = pglist[area]; nuclear@22: node = pglist[area]; nuclear@22: prev = &dummy; nuclear@22: nuclear@22: while(node) { nuclear@22: if(node->end - node->start >= num) { nuclear@22: ret = node->start; nuclear@22: node->start += num; nuclear@22: nuclear@22: if(node->start == node->end) { nuclear@22: prev->next = node->next; nuclear@22: node->next = 0; nuclear@22: nuclear@22: if(node == pglist[area]) { nuclear@22: pglist[area] = 0; nuclear@22: } nuclear@22: free_node(node); nuclear@22: } nuclear@22: break; nuclear@22: } nuclear@22: nuclear@22: prev = node; nuclear@22: node = node->next; nuclear@22: } nuclear@22: nuclear@22: if(ret >= 0) { nuclear@23: /* allocate physical storage and map */ nuclear@23: if(map_page_range(ret, num, -1, 0) == -1) { nuclear@23: ret = -1; nuclear@23: } nuclear@22: } nuclear@22: nuclear@25: set_intr_state(intr_state); nuclear@22: return ret; nuclear@22: } nuclear@22: nuclear@22: void pgfree(int start, int num) nuclear@22: { nuclear@25: int area, end, intr_state; nuclear@23: struct page_range *node, *new, *prev, *next; nuclear@23: nuclear@25: intr_state = get_intr_state(); nuclear@25: disable_intr(); nuclear@25: nuclear@23: if(!(new = alloc_node())) { nuclear@23: panic("pgfree: can't allocate new page_range node to add the freed pages\n"); nuclear@23: } nuclear@23: new->start = start; nuclear@23: end = new->end = start + num; nuclear@23: nuclear@23: area = PAGE_TO_ADDR(start) >= KMEM_START ? MEM_KERNEL : MEM_USER; nuclear@23: nuclear@23: if(!pglist[area] || pglist[area]->start > start) { nuclear@23: next = new->next = pglist[area]; nuclear@23: pglist[area] = new; nuclear@23: prev = 0; nuclear@23: nuclear@23: } else { nuclear@23: nuclear@23: prev = 0; nuclear@23: node = pglist[area]; nuclear@23: next = node ? node->next : 0; nuclear@23: nuclear@23: while(node) { nuclear@23: if(!next || next->start > start) { nuclear@23: /* place here, after node */ nuclear@23: new->next = next; nuclear@23: node->next = new; nuclear@23: prev = node; /* needed by coalesce after the loop */ nuclear@23: break; nuclear@23: } nuclear@23: nuclear@23: prev = node; nuclear@23: node = next; nuclear@23: next = node ? node->next : 0; nuclear@23: } nuclear@23: } nuclear@23: nuclear@23: coalesce(prev, new, next); nuclear@25: set_intr_state(intr_state); nuclear@23: } nuclear@23: nuclear@23: static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high) nuclear@23: { nuclear@23: if(high) { nuclear@23: if(mid->end == high->start) { nuclear@23: mid->end = high->end; nuclear@23: mid->next = high->next; nuclear@23: free_node(high); nuclear@23: } nuclear@23: } nuclear@23: nuclear@23: if(low) { nuclear@23: if(low->end == mid->start) { nuclear@23: low->end += mid->end; nuclear@23: low->next = mid->next; nuclear@23: free_node(mid); nuclear@23: } nuclear@23: } nuclear@22: } nuclear@22: nuclear@17: static void pgfault(int inum, uint32_t err) nuclear@17: { nuclear@17: printf("~~~~ PAGE FAULT ~~~~\n"); nuclear@17: nuclear@17: printf("fault address: %x\n", get_fault_addr()); nuclear@17: nuclear@17: if(err & PG_PRESENT) { nuclear@17: if(err & 8) { nuclear@17: printf("reserved bit set in some paging structure\n"); nuclear@17: } else { nuclear@17: printf("%s protection violation ", (err & PG_WRITABLE) ? "write" : "read"); nuclear@17: printf("in %s mode\n", err & PG_USER ? "user" : "kernel"); nuclear@17: } nuclear@17: } else { nuclear@17: printf("page not present\n"); nuclear@17: } nuclear@19: nuclear@19: panic("unhandled page fault\n"); nuclear@17: } nuclear@22: nuclear@22: /* --- page range list node management --- */ nuclear@23: #define NODES_IN_PAGE (PGSIZE / sizeof(struct page_range)) nuclear@23: nuclear@22: static struct page_range *alloc_node(void) nuclear@22: { nuclear@22: struct page_range *node; nuclear@23: int pg, i; nuclear@22: nuclear@22: if(node_pool) { nuclear@22: node = node_pool; nuclear@22: node_pool = node_pool->next; nuclear@23: printf("alloc_node -> %x\n", (unsigned int)node); nuclear@22: return node; nuclear@22: } nuclear@22: nuclear@23: /* no node structures in the pool, we need to allocate a new page, nuclear@23: * split it up into node structures, add them in the pool, and nuclear@23: * allocate one of them. nuclear@22: */ nuclear@23: if(!(pg = pgalloc(1, MEM_KERNEL))) { nuclear@22: panic("ran out of physical memory while allocating VM range structures\n"); nuclear@22: } nuclear@23: node_pool = (struct page_range*)PAGE_TO_ADDR(pg); nuclear@22: nuclear@23: /* link them up, skip the first as we'll just allocate it anyway */ nuclear@23: for(i=2; i %x\n", (unsigned int)node); nuclear@23: return node; nuclear@22: } nuclear@22: nuclear@22: static void free_node(struct page_range *node) nuclear@22: { nuclear@22: node->next = node_pool; nuclear@22: node_pool = node; nuclear@23: printf("free_node\n"); nuclear@22: } nuclear@23: nuclear@23: nuclear@23: void dbg_print_vm(int area) nuclear@23: { nuclear@25: struct page_range *node; nuclear@25: int last, intr_state; nuclear@25: nuclear@25: intr_state = get_intr_state(); nuclear@25: disable_intr(); nuclear@25: nuclear@25: node = pglist[area]; nuclear@25: last = area == MEM_USER ? 0 : ADDR_TO_PAGE(KMEM_START); nuclear@23: nuclear@23: printf("%s vm space\n", area == MEM_USER ? "user" : "kernel"); nuclear@23: nuclear@23: while(node) { nuclear@23: if(node->start > last) { nuclear@23: printf(" vm-used: %x -> %x\n", PAGE_TO_ADDR(last), PAGE_TO_ADDR(node->start)); nuclear@23: } nuclear@23: nuclear@23: printf(" vm-free: %x -> ", PAGE_TO_ADDR(node->start)); nuclear@23: if(node->end >= PAGE_COUNT) { nuclear@23: printf("END\n"); nuclear@23: } else { nuclear@23: printf("%x\n", PAGE_TO_ADDR(node->end)); nuclear@23: } nuclear@23: nuclear@23: last = node->end; nuclear@23: node = node->next; nuclear@23: } nuclear@25: nuclear@25: set_intr_state(intr_state); nuclear@23: }