kern
changeset 23:5454cee245a3
- fixed tragic mistake in the initial kernel image mapping
- page table modifications by disabling paging first
- page allocation completed
author | John Tsiombikas <nuclear@member.fsf.org> |
---|---|
date | Mon, 04 Apr 2011 23:34:06 +0300 |
parents | 7ece008f09c5 |
children | 53588744382c |
files | src/main.c src/mem.c src/vm-asm.S src/vm.c src/vm.h |
diffstat | 5 files changed, 221 insertions(+), 30 deletions(-) [+] |
line diff
1.1 --- a/src/main.c Sun Apr 03 18:42:19 2011 +0300 1.2 +++ b/src/main.c Mon Apr 04 23:34:06 2011 +0300 1.3 @@ -59,6 +59,8 @@ 1.4 1.5 init_vm(mbinf); 1.6 1.7 + dbg_print_vm(MEM_USER); 1.8 + dbg_print_vm(MEM_KERNEL); 1.9 1.10 for(;;) { 1.11 char c, keypress;
2.1 --- a/src/mem.c Sun Apr 03 18:42:19 2011 +0300 2.2 +++ b/src/mem.c Mon Apr 04 23:34:06 2011 +0300 2.3 @@ -185,7 +185,7 @@ 2.4 uint32_t e = (uint32_t)bitmap + bmsize; 2.5 2.6 if(e & PGOFFS_MASK) { 2.7 - *end = (e + 4096) & PGOFFS_MASK; 2.8 + *end = (e + 4096) & ~PGOFFS_MASK; 2.9 } else { 2.10 *end = e; 2.11 }
3.1 --- a/src/vm-asm.S Sun Apr 03 18:42:19 2011 +0300 3.2 +++ b/src/vm-asm.S Mon Apr 04 23:34:06 2011 +0300 3.3 @@ -8,6 +8,21 @@ 3.4 movl %eax, %cr0 3.5 ret 3.6 3.7 +/* disable_paging(void) 3.8 + * clears the cr0 bit 31 */ 3.9 + .globl disable_paging 3.10 +disable_paging: 3.11 + movl %cr0, %eax 3.12 + andl $0x7fffffff, %eax 3.13 + movl %eax, %cr0 3.14 + ret 3.15 + 3.16 + .globl get_paging_status 3.17 +get_paging_status: 3.18 + movl %cr0, %eax 3.19 + shr $31, %eax 3.20 + ret 3.21 + 3.22 /* set_pgdir_addr(uint32_t addr) 3.23 * sets the address of the page directory by writing to cr3, which 3.24 * also results in a TLB flush. */ 3.25 @@ -17,6 +32,24 @@ 3.26 movl %eax, %cr3 3.27 ret 3.28 3.29 +/* flush_tlb(void) 3.30 + * invalidates the whole TLB. entries for pages marked as global 3.31 + * are unaffected */ 3.32 + .globl flush_tlb 3.33 +flush_tlb: 3.34 + movl %cr3, %eax 3.35 + movl %eax, %cr3 3.36 + ret 3.37 + 3.38 +/* flush_tlb_addr(uint32_t addr) 3.39 + * flushes the TLB entry for the page containing a particular 3.40 + * virtual address */ 3.41 + .globl flush_tlb_addr 3.42 +flush_tlb_addr: 3.43 + movl 4(%esp), %eax 3.44 + invlpg (%eax) 3.45 + ret 3.46 + 3.47 /* get_fault_addr(void) 3.48 * returns the contents of control register 2, which provides 3.49 * the faulting address during a page fault exception
4.1 --- a/src/vm.c Sun Apr 03 18:42:19 2011 +0300 4.2 +++ b/src/vm.c Mon Apr 04 23:34:06 2011 +0300 4.3 @@ -25,9 +25,15 @@ 4.4 4.5 /* defined in vm-asm.S */ 4.6 void enable_paging(void); 4.7 +void disable_paging(void); 4.8 +int get_paging_status(void); 4.9 void set_pgdir_addr(uint32_t addr); 4.10 +void flush_tlb(void); 4.11 +void flush_tlb_addr(uint32_t addr); 4.12 +#define flush_tlb_page(p) flush_tlb_addr(PAGE_TO_ADDR(p)) 4.13 uint32_t get_fault_addr(void); 4.14 4.15 +static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high); 4.16 static void pgfault(int inum, uint32_t err); 4.17 static struct page_range *alloc_node(void); 4.18 static void free_node(struct page_range *node); 4.19 @@ -39,32 +45,65 @@ 4.20 static struct page_range *pglist[2]; 4.21 /* list of free page_range structures to be used in the lists */ 4.22 static struct page_range *node_pool; 4.23 +/* the first page range for the whole kernel address space, to get things started */ 4.24 +static struct page_range first_node; 4.25 4.26 4.27 void init_vm(struct mboot_info *mb) 4.28 { 4.29 uint32_t idmap_end; 4.30 4.31 + /* initialize the physical memory map and allocator */ 4.32 init_mem(mb); 4.33 4.34 + /* setup the page tables */ 4.35 pgdir = (uint32_t*)alloc_phys_page(); 4.36 - memset(pgdir, 0, sizeof pgdir); 4.37 + memset(pgdir, 0, PGSIZE); 4.38 + set_pgdir_addr((int32_t)pgdir); 4.39 4.40 /* map the video memory and kernel code 1-1 */ 4.41 get_kernel_mem_range(0, &idmap_end); 4.42 map_mem_range(IDMAP_START, idmap_end - IDMAP_START, IDMAP_START, 0); 4.43 4.44 + /* set the page fault handler */ 4.45 interrupt(PAGEFAULT, pgfault); 4.46 4.47 - set_pgdir_addr((int32_t)pgdir); 4.48 + /* we can enable paging now */ 4.49 enable_paging(); 4.50 + 4.51 + /* initialize the virtual page allocator */ 4.52 + node_pool = 0; 4.53 + 4.54 + first_node.start = ADDR_TO_PAGE(KMEM_START); 4.55 + first_node.end = PAGE_COUNT; 4.56 + first_node.next = 0; 4.57 + pglist[MEM_KERNEL] = &first_node; 4.58 + 4.59 + pglist[MEM_USER] = alloc_node(); 4.60 + pglist[MEM_USER]->start = 0; 4.61 + pglist[MEM_USER]->end = ADDR_TO_PAGE(KMEM_START); 4.62 + pglist[MEM_USER]->next = 0; 4.63 } 4.64 4.65 -void map_page(int vpage, int ppage, unsigned int attr) 4.66 +/* if ppage == -1 we allocate a physical page by calling alloc_phys_page */ 4.67 +int map_page(int vpage, int ppage, unsigned int attr) 4.68 { 4.69 uint32_t *pgtbl; 4.70 - int diridx = PAGE_TO_PGTBL(vpage); 4.71 - int pgidx = PAGE_TO_PGTBL_PG(vpage); 4.72 + int diridx, pgidx, pgon; 4.73 + 4.74 + pgon = get_paging_status(); 4.75 + disable_paging(); 4.76 + 4.77 + if(ppage < 0) { 4.78 + uint32_t addr = alloc_phys_page(); 4.79 + if(!addr) { 4.80 + return -1; 4.81 + } 4.82 + ppage = ADDR_TO_PAGE(addr); 4.83 + } 4.84 + 4.85 + diridx = PAGE_TO_PGTBL(vpage); 4.86 + pgidx = PAGE_TO_PGTBL_PG(vpage); 4.87 4.88 if(!(pgdir[diridx] & PG_PRESENT)) { 4.89 uint32_t addr = alloc_phys_page(); 4.90 @@ -77,6 +116,12 @@ 4.91 } 4.92 4.93 pgtbl[pgidx] = PAGE_TO_ADDR(ppage) | (attr & ATTR_PGTBL_MASK) | PG_PRESENT; 4.94 + flush_tlb_page(vpage); 4.95 + 4.96 + if(pgon) { 4.97 + enable_paging(); 4.98 + } 4.99 + return 0; 4.100 } 4.101 4.102 void unmap_page(int vpage) 4.103 @@ -94,6 +139,7 @@ 4.104 goto err; 4.105 } 4.106 pgtbl[pgidx] = 0; 4.107 + flush_tlb_page(vpage); 4.108 4.109 return; 4.110 err: 4.111 @@ -101,32 +147,42 @@ 4.112 } 4.113 4.114 /* if ppg_start is -1, we allocate physical pages to map with alloc_phys_page() */ 4.115 -void map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr) 4.116 +int map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr) 4.117 { 4.118 - int i; 4.119 + int i, phys_pg; 4.120 + uint32_t paddr; 4.121 4.122 for(i=0; i<pgcount; i++) { 4.123 - uint32_t paddr = ppg_start == -1 ? alloc_phys_page() : ppg_start + i; 4.124 + if(ppg_start < 0) { 4.125 + if(!(paddr = alloc_phys_page())) { 4.126 + return -1; 4.127 + } 4.128 + phys_pg = ADDR_TO_PAGE(paddr); 4.129 + } else { 4.130 + phys_pg = ppg_start + i; 4.131 + } 4.132 4.133 - map_page(vpg_start + i, paddr, attr); 4.134 + map_page(vpg_start + i, phys_pg, attr); 4.135 } 4.136 + return 0; 4.137 } 4.138 4.139 -void map_mem_range(uint32_t vaddr, size_t sz, uint32_t paddr, unsigned int attr) 4.140 +/* if paddr is 0, we allocate physical pages with alloc_phys_page() */ 4.141 +int map_mem_range(uint32_t vaddr, size_t sz, uint32_t paddr, unsigned int attr) 4.142 { 4.143 int vpg_start, ppg_start, num_pages; 4.144 4.145 - if(!sz) return; 4.146 + if(!sz) return -1; 4.147 4.148 if(ADDR_TO_PGOFFS(paddr)) { 4.149 panic("map_mem_range called with unaligned physical address: %x\n", paddr); 4.150 } 4.151 4.152 vpg_start = ADDR_TO_PAGE(vaddr); 4.153 - ppg_start = ADDR_TO_PAGE(paddr); 4.154 + ppg_start = paddr > 0 ? ADDR_TO_PAGE(paddr) : -1; 4.155 num_pages = ADDR_TO_PAGE(sz) + 1; 4.156 4.157 - map_page_range(vpg_start, num_pages, ppg_start, attr); 4.158 + return map_page_range(vpg_start, num_pages, ppg_start, attr); 4.159 } 4.160 4.161 uint32_t virt_to_phys(uint32_t vaddr) 4.162 @@ -182,8 +238,10 @@ 4.163 } 4.164 4.165 if(ret >= 0) { 4.166 - /* allocate physical storage and map them */ 4.167 - map_page_range(ret, num, -1, 0); 4.168 + /* allocate physical storage and map */ 4.169 + if(map_page_range(ret, num, -1, 0) == -1) { 4.170 + ret = -1; 4.171 + } 4.172 } 4.173 4.174 return ret; 4.175 @@ -191,7 +249,63 @@ 4.176 4.177 void pgfree(int start, int num) 4.178 { 4.179 - /* TODO */ 4.180 + int area, end; 4.181 + struct page_range *node, *new, *prev, *next; 4.182 + 4.183 + if(!(new = alloc_node())) { 4.184 + panic("pgfree: can't allocate new page_range node to add the freed pages\n"); 4.185 + } 4.186 + new->start = start; 4.187 + end = new->end = start + num; 4.188 + 4.189 + area = PAGE_TO_ADDR(start) >= KMEM_START ? MEM_KERNEL : MEM_USER; 4.190 + 4.191 + if(!pglist[area] || pglist[area]->start > start) { 4.192 + next = new->next = pglist[area]; 4.193 + pglist[area] = new; 4.194 + prev = 0; 4.195 + 4.196 + } else { 4.197 + 4.198 + prev = 0; 4.199 + node = pglist[area]; 4.200 + next = node ? node->next : 0; 4.201 + 4.202 + while(node) { 4.203 + if(!next || next->start > start) { 4.204 + /* place here, after node */ 4.205 + new->next = next; 4.206 + node->next = new; 4.207 + prev = node; /* needed by coalesce after the loop */ 4.208 + break; 4.209 + } 4.210 + 4.211 + prev = node; 4.212 + node = next; 4.213 + next = node ? node->next : 0; 4.214 + } 4.215 + } 4.216 + 4.217 + coalesce(prev, new, next); 4.218 +} 4.219 + 4.220 +static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high) 4.221 +{ 4.222 + if(high) { 4.223 + if(mid->end == high->start) { 4.224 + mid->end = high->end; 4.225 + mid->next = high->next; 4.226 + free_node(high); 4.227 + } 4.228 + } 4.229 + 4.230 + if(low) { 4.231 + if(low->end == mid->start) { 4.232 + low->end += mid->end; 4.233 + low->next = mid->next; 4.234 + free_node(mid); 4.235 + } 4.236 + } 4.237 } 4.238 4.239 static void pgfault(int inum, uint32_t err) 4.240 @@ -215,31 +329,69 @@ 4.241 } 4.242 4.243 /* --- page range list node management --- */ 4.244 +#define NODES_IN_PAGE (PGSIZE / sizeof(struct page_range)) 4.245 + 4.246 static struct page_range *alloc_node(void) 4.247 { 4.248 struct page_range *node; 4.249 - uint32_t paddr; 4.250 + int pg, i; 4.251 4.252 if(node_pool) { 4.253 node = node_pool; 4.254 node_pool = node_pool->next; 4.255 + printf("alloc_node -> %x\n", (unsigned int)node); 4.256 return node; 4.257 } 4.258 4.259 - /* no node structures in the pool, we need to allocate and map 4.260 - * a page, split it up into node structures, add them in the pool 4.261 - * and allocate one of them. 4.262 + /* no node structures in the pool, we need to allocate a new page, 4.263 + * split it up into node structures, add them in the pool, and 4.264 + * allocate one of them. 4.265 */ 4.266 - if(!(paddr = alloc_phys_page())) { 4.267 + if(!(pg = pgalloc(1, MEM_KERNEL))) { 4.268 panic("ran out of physical memory while allocating VM range structures\n"); 4.269 } 4.270 + node_pool = (struct page_range*)PAGE_TO_ADDR(pg); 4.271 4.272 - /* TODO cont. */ 4.273 - return 0; 4.274 + /* link them up, skip the first as we'll just allocate it anyway */ 4.275 + for(i=2; i<NODES_IN_PAGE; i++) { 4.276 + node_pool[i - 1].next = node_pool + i; 4.277 + } 4.278 + node_pool[NODES_IN_PAGE - 1].next = 0; 4.279 + 4.280 + /* grab the first and return it */ 4.281 + node = node_pool++; 4.282 + printf("alloc_node -> %x\n", (unsigned int)node); 4.283 + return node; 4.284 } 4.285 4.286 static void free_node(struct page_range *node) 4.287 { 4.288 node->next = node_pool; 4.289 node_pool = node; 4.290 + printf("free_node\n"); 4.291 } 4.292 + 4.293 + 4.294 +void dbg_print_vm(int area) 4.295 +{ 4.296 + struct page_range *node = pglist[area]; 4.297 + int last = area == MEM_USER ? 0 : ADDR_TO_PAGE(KMEM_START); 4.298 + 4.299 + printf("%s vm space\n", area == MEM_USER ? "user" : "kernel"); 4.300 + 4.301 + while(node) { 4.302 + if(node->start > last) { 4.303 + printf(" vm-used: %x -> %x\n", PAGE_TO_ADDR(last), PAGE_TO_ADDR(node->start)); 4.304 + } 4.305 + 4.306 + printf(" vm-free: %x -> ", PAGE_TO_ADDR(node->start)); 4.307 + if(node->end >= PAGE_COUNT) { 4.308 + printf("END\n"); 4.309 + } else { 4.310 + printf("%x\n", PAGE_TO_ADDR(node->end)); 4.311 + } 4.312 + 4.313 + last = node->end; 4.314 + node = node->next; 4.315 + } 4.316 +}
5.1 --- a/src/vm.h Sun Apr 03 18:42:19 2011 +0300 5.2 +++ b/src/vm.h Mon Apr 04 23:34:06 2011 +0300 5.3 @@ -18,12 +18,14 @@ 5.4 5.5 5.6 #define PGSIZE 4096 5.7 +#define PAGE_COUNT (1024 * 1024) 5.8 + 5.9 #define PGOFFS_MASK 0xfff 5.10 #define PGNUM_MASK 0xfffff000 5.11 #define PGENT_ADDR_MASK PGNUM_MASK 5.12 5.13 -#define ADDR_TO_PAGE(x) ((uint32_t)(x) >> 12) 5.14 -#define PAGE_TO_ADDR(x) ((uint32_t)(x) << 12) 5.15 +#define ADDR_TO_PAGE(x) ((uint32_t)(x) >> 12) 5.16 +#define PAGE_TO_ADDR(x) ((uint32_t)(x) << 12) 5.17 5.18 #define ADDR_TO_PGTBL(x) ((uint32_t)(x) >> 22) 5.19 #define ADDR_TO_PGTBL_PG(x) (((uint32_t)(x) >> 12) & 0x3ff) 5.20 @@ -35,10 +37,9 @@ 5.21 5.22 void init_vm(struct mboot_info *mb); 5.23 5.24 -void map_page(int vpage, int ppage, unsigned int attr); 5.25 -void map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr); 5.26 - 5.27 -void map_mem_range(uint32_t vaddr, size_t sz, uint32_t paddr, unsigned int attr); 5.28 +int map_page(int vpage, int ppage, unsigned int attr); 5.29 +int map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr); 5.30 +int map_mem_range(uint32_t vaddr, size_t sz, uint32_t paddr, unsigned int attr); 5.31 5.32 uint32_t virt_to_phys(uint32_t vaddr); 5.33 5.34 @@ -48,5 +49,8 @@ 5.35 }; 5.36 5.37 int pgalloc(int num, int area); 5.38 +void pgfree(int start, int num); 5.39 + 5.40 +void dbg_print_vm(int area); 5.41 5.42 #endif /* VM_H_ */