# HG changeset patch # User John Tsiombikas # Date 1301949246 -10800 # Node ID 5454cee245a3b17cb497cf7ed6a76f9464c896ca # Parent 7ece008f09c5592660edd0c01d171b87003d4649 - fixed tragic mistake in the initial kernel image mapping - page table modifications by disabling paging first - page allocation completed diff -r 7ece008f09c5 -r 5454cee245a3 src/main.c --- a/src/main.c Sun Apr 03 18:42:19 2011 +0300 +++ b/src/main.c Mon Apr 04 23:34:06 2011 +0300 @@ -59,6 +59,8 @@ init_vm(mbinf); + dbg_print_vm(MEM_USER); + dbg_print_vm(MEM_KERNEL); for(;;) { char c, keypress; diff -r 7ece008f09c5 -r 5454cee245a3 src/mem.c --- a/src/mem.c Sun Apr 03 18:42:19 2011 +0300 +++ b/src/mem.c Mon Apr 04 23:34:06 2011 +0300 @@ -185,7 +185,7 @@ uint32_t e = (uint32_t)bitmap + bmsize; if(e & PGOFFS_MASK) { - *end = (e + 4096) & PGOFFS_MASK; + *end = (e + 4096) & ~PGOFFS_MASK; } else { *end = e; } diff -r 7ece008f09c5 -r 5454cee245a3 src/vm-asm.S --- a/src/vm-asm.S Sun Apr 03 18:42:19 2011 +0300 +++ b/src/vm-asm.S Mon Apr 04 23:34:06 2011 +0300 @@ -8,6 +8,21 @@ movl %eax, %cr0 ret +/* disable_paging(void) + * clears the cr0 bit 31 */ + .globl disable_paging +disable_paging: + movl %cr0, %eax + andl $0x7fffffff, %eax + movl %eax, %cr0 + ret + + .globl get_paging_status +get_paging_status: + movl %cr0, %eax + shr $31, %eax + ret + /* set_pgdir_addr(uint32_t addr) * sets the address of the page directory by writing to cr3, which * also results in a TLB flush. */ @@ -17,6 +32,24 @@ movl %eax, %cr3 ret +/* flush_tlb(void) + * invalidates the whole TLB. entries for pages marked as global + * are unaffected */ + .globl flush_tlb +flush_tlb: + movl %cr3, %eax + movl %eax, %cr3 + ret + +/* flush_tlb_addr(uint32_t addr) + * flushes the TLB entry for the page containing a particular + * virtual address */ + .globl flush_tlb_addr +flush_tlb_addr: + movl 4(%esp), %eax + invlpg (%eax) + ret + /* get_fault_addr(void) * returns the contents of control register 2, which provides * the faulting address during a page fault exception diff -r 7ece008f09c5 -r 5454cee245a3 src/vm.c --- a/src/vm.c Sun Apr 03 18:42:19 2011 +0300 +++ b/src/vm.c Mon Apr 04 23:34:06 2011 +0300 @@ -25,9 +25,15 @@ /* defined in vm-asm.S */ void enable_paging(void); +void disable_paging(void); +int get_paging_status(void); void set_pgdir_addr(uint32_t addr); +void flush_tlb(void); +void flush_tlb_addr(uint32_t addr); +#define flush_tlb_page(p) flush_tlb_addr(PAGE_TO_ADDR(p)) uint32_t get_fault_addr(void); +static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high); static void pgfault(int inum, uint32_t err); static struct page_range *alloc_node(void); static void free_node(struct page_range *node); @@ -39,32 +45,65 @@ static struct page_range *pglist[2]; /* list of free page_range structures to be used in the lists */ static struct page_range *node_pool; +/* the first page range for the whole kernel address space, to get things started */ +static struct page_range first_node; void init_vm(struct mboot_info *mb) { uint32_t idmap_end; + /* initialize the physical memory map and allocator */ init_mem(mb); + /* setup the page tables */ pgdir = (uint32_t*)alloc_phys_page(); - memset(pgdir, 0, sizeof pgdir); + memset(pgdir, 0, PGSIZE); + set_pgdir_addr((int32_t)pgdir); /* map the video memory and kernel code 1-1 */ get_kernel_mem_range(0, &idmap_end); map_mem_range(IDMAP_START, idmap_end - IDMAP_START, IDMAP_START, 0); + /* set the page fault handler */ interrupt(PAGEFAULT, pgfault); - set_pgdir_addr((int32_t)pgdir); + /* we can enable paging now */ enable_paging(); + + /* initialize the virtual page allocator */ + node_pool = 0; + + first_node.start = ADDR_TO_PAGE(KMEM_START); + first_node.end = PAGE_COUNT; + first_node.next = 0; + pglist[MEM_KERNEL] = &first_node; + + pglist[MEM_USER] = alloc_node(); + pglist[MEM_USER]->start = 0; + pglist[MEM_USER]->end = ADDR_TO_PAGE(KMEM_START); + pglist[MEM_USER]->next = 0; } -void map_page(int vpage, int ppage, unsigned int attr) +/* if ppage == -1 we allocate a physical page by calling alloc_phys_page */ +int map_page(int vpage, int ppage, unsigned int attr) { uint32_t *pgtbl; - int diridx = PAGE_TO_PGTBL(vpage); - int pgidx = PAGE_TO_PGTBL_PG(vpage); + int diridx, pgidx, pgon; + + pgon = get_paging_status(); + disable_paging(); + + if(ppage < 0) { + uint32_t addr = alloc_phys_page(); + if(!addr) { + return -1; + } + ppage = ADDR_TO_PAGE(addr); + } + + diridx = PAGE_TO_PGTBL(vpage); + pgidx = PAGE_TO_PGTBL_PG(vpage); if(!(pgdir[diridx] & PG_PRESENT)) { uint32_t addr = alloc_phys_page(); @@ -77,6 +116,12 @@ } pgtbl[pgidx] = PAGE_TO_ADDR(ppage) | (attr & ATTR_PGTBL_MASK) | PG_PRESENT; + flush_tlb_page(vpage); + + if(pgon) { + enable_paging(); + } + return 0; } void unmap_page(int vpage) @@ -94,6 +139,7 @@ goto err; } pgtbl[pgidx] = 0; + flush_tlb_page(vpage); return; err: @@ -101,32 +147,42 @@ } /* if ppg_start is -1, we allocate physical pages to map with alloc_phys_page() */ -void map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr) +int map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr) { - int i; + int i, phys_pg; + uint32_t paddr; for(i=0; i 0 ? ADDR_TO_PAGE(paddr) : -1; num_pages = ADDR_TO_PAGE(sz) + 1; - map_page_range(vpg_start, num_pages, ppg_start, attr); + return map_page_range(vpg_start, num_pages, ppg_start, attr); } uint32_t virt_to_phys(uint32_t vaddr) @@ -182,8 +238,10 @@ } if(ret >= 0) { - /* allocate physical storage and map them */ - map_page_range(ret, num, -1, 0); + /* allocate physical storage and map */ + if(map_page_range(ret, num, -1, 0) == -1) { + ret = -1; + } } return ret; @@ -191,7 +249,63 @@ void pgfree(int start, int num) { - /* TODO */ + int area, end; + struct page_range *node, *new, *prev, *next; + + if(!(new = alloc_node())) { + panic("pgfree: can't allocate new page_range node to add the freed pages\n"); + } + new->start = start; + end = new->end = start + num; + + area = PAGE_TO_ADDR(start) >= KMEM_START ? MEM_KERNEL : MEM_USER; + + if(!pglist[area] || pglist[area]->start > start) { + next = new->next = pglist[area]; + pglist[area] = new; + prev = 0; + + } else { + + prev = 0; + node = pglist[area]; + next = node ? node->next : 0; + + while(node) { + if(!next || next->start > start) { + /* place here, after node */ + new->next = next; + node->next = new; + prev = node; /* needed by coalesce after the loop */ + break; + } + + prev = node; + node = next; + next = node ? node->next : 0; + } + } + + coalesce(prev, new, next); +} + +static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high) +{ + if(high) { + if(mid->end == high->start) { + mid->end = high->end; + mid->next = high->next; + free_node(high); + } + } + + if(low) { + if(low->end == mid->start) { + low->end += mid->end; + low->next = mid->next; + free_node(mid); + } + } } static void pgfault(int inum, uint32_t err) @@ -215,31 +329,69 @@ } /* --- page range list node management --- */ +#define NODES_IN_PAGE (PGSIZE / sizeof(struct page_range)) + static struct page_range *alloc_node(void) { struct page_range *node; - uint32_t paddr; + int pg, i; if(node_pool) { node = node_pool; node_pool = node_pool->next; + printf("alloc_node -> %x\n", (unsigned int)node); return node; } - /* no node structures in the pool, we need to allocate and map - * a page, split it up into node structures, add them in the pool - * and allocate one of them. + /* no node structures in the pool, we need to allocate a new page, + * split it up into node structures, add them in the pool, and + * allocate one of them. */ - if(!(paddr = alloc_phys_page())) { + if(!(pg = pgalloc(1, MEM_KERNEL))) { panic("ran out of physical memory while allocating VM range structures\n"); } + node_pool = (struct page_range*)PAGE_TO_ADDR(pg); - /* TODO cont. */ - return 0; + /* link them up, skip the first as we'll just allocate it anyway */ + for(i=2; i %x\n", (unsigned int)node); + return node; } static void free_node(struct page_range *node) { node->next = node_pool; node_pool = node; + printf("free_node\n"); } + + +void dbg_print_vm(int area) +{ + struct page_range *node = pglist[area]; + int last = area == MEM_USER ? 0 : ADDR_TO_PAGE(KMEM_START); + + printf("%s vm space\n", area == MEM_USER ? "user" : "kernel"); + + while(node) { + if(node->start > last) { + printf(" vm-used: %x -> %x\n", PAGE_TO_ADDR(last), PAGE_TO_ADDR(node->start)); + } + + printf(" vm-free: %x -> ", PAGE_TO_ADDR(node->start)); + if(node->end >= PAGE_COUNT) { + printf("END\n"); + } else { + printf("%x\n", PAGE_TO_ADDR(node->end)); + } + + last = node->end; + node = node->next; + } +} diff -r 7ece008f09c5 -r 5454cee245a3 src/vm.h --- a/src/vm.h Sun Apr 03 18:42:19 2011 +0300 +++ b/src/vm.h Mon Apr 04 23:34:06 2011 +0300 @@ -18,12 +18,14 @@ #define PGSIZE 4096 +#define PAGE_COUNT (1024 * 1024) + #define PGOFFS_MASK 0xfff #define PGNUM_MASK 0xfffff000 #define PGENT_ADDR_MASK PGNUM_MASK -#define ADDR_TO_PAGE(x) ((uint32_t)(x) >> 12) -#define PAGE_TO_ADDR(x) ((uint32_t)(x) << 12) +#define ADDR_TO_PAGE(x) ((uint32_t)(x) >> 12) +#define PAGE_TO_ADDR(x) ((uint32_t)(x) << 12) #define ADDR_TO_PGTBL(x) ((uint32_t)(x) >> 22) #define ADDR_TO_PGTBL_PG(x) (((uint32_t)(x) >> 12) & 0x3ff) @@ -35,10 +37,9 @@ void init_vm(struct mboot_info *mb); -void map_page(int vpage, int ppage, unsigned int attr); -void map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr); - -void map_mem_range(uint32_t vaddr, size_t sz, uint32_t paddr, unsigned int attr); +int map_page(int vpage, int ppage, unsigned int attr); +int map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr); +int map_mem_range(uint32_t vaddr, size_t sz, uint32_t paddr, unsigned int attr); uint32_t virt_to_phys(uint32_t vaddr); @@ -48,5 +49,8 @@ }; int pgalloc(int num, int area); +void pgfree(int start, int num); + +void dbg_print_vm(int area); #endif /* VM_H_ */