kern
view src/vm.c @ 23:5454cee245a3
- fixed tragic mistake in the initial kernel image mapping
- page table modifications by disabling paging first
- page allocation completed
author | John Tsiombikas <nuclear@member.fsf.org> |
---|---|
date | Mon, 04 Apr 2011 23:34:06 +0300 |
parents | 7ece008f09c5 |
children | 53588744382c |
line source
1 #include <stdio.h>
2 #include <string.h>
3 #include <inttypes.h>
4 #include "vm.h"
5 #include <stdio.h>
6 #include "intr.h"
7 #include "mem.h"
8 #include "panic.h"
11 #define KMEM_START 0xc0000000
12 #define IDMAP_START 0xa0000
14 #define ATTR_PGDIR_MASK 0x3f
15 #define ATTR_PGTBL_MASK 0x1ff
16 #define ADDR_PGENT_MASK 0xfffff000
18 #define PAGEFAULT 14
21 struct page_range {
22 int start, end;
23 struct page_range *next;
24 };
26 /* defined in vm-asm.S */
27 void enable_paging(void);
28 void disable_paging(void);
29 int get_paging_status(void);
30 void set_pgdir_addr(uint32_t addr);
31 void flush_tlb(void);
32 void flush_tlb_addr(uint32_t addr);
33 #define flush_tlb_page(p) flush_tlb_addr(PAGE_TO_ADDR(p))
34 uint32_t get_fault_addr(void);
36 static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high);
37 static void pgfault(int inum, uint32_t err);
38 static struct page_range *alloc_node(void);
39 static void free_node(struct page_range *node);
41 /* page directory */
42 static uint32_t *pgdir;
44 /* 2 lists of free ranges, for kernel memory and user memory */
45 static struct page_range *pglist[2];
46 /* list of free page_range structures to be used in the lists */
47 static struct page_range *node_pool;
48 /* the first page range for the whole kernel address space, to get things started */
49 static struct page_range first_node;
52 void init_vm(struct mboot_info *mb)
53 {
54 uint32_t idmap_end;
56 /* initialize the physical memory map and allocator */
57 init_mem(mb);
59 /* setup the page tables */
60 pgdir = (uint32_t*)alloc_phys_page();
61 memset(pgdir, 0, PGSIZE);
62 set_pgdir_addr((int32_t)pgdir);
64 /* map the video memory and kernel code 1-1 */
65 get_kernel_mem_range(0, &idmap_end);
66 map_mem_range(IDMAP_START, idmap_end - IDMAP_START, IDMAP_START, 0);
68 /* set the page fault handler */
69 interrupt(PAGEFAULT, pgfault);
71 /* we can enable paging now */
72 enable_paging();
74 /* initialize the virtual page allocator */
75 node_pool = 0;
77 first_node.start = ADDR_TO_PAGE(KMEM_START);
78 first_node.end = PAGE_COUNT;
79 first_node.next = 0;
80 pglist[MEM_KERNEL] = &first_node;
82 pglist[MEM_USER] = alloc_node();
83 pglist[MEM_USER]->start = 0;
84 pglist[MEM_USER]->end = ADDR_TO_PAGE(KMEM_START);
85 pglist[MEM_USER]->next = 0;
86 }
88 /* if ppage == -1 we allocate a physical page by calling alloc_phys_page */
89 int map_page(int vpage, int ppage, unsigned int attr)
90 {
91 uint32_t *pgtbl;
92 int diridx, pgidx, pgon;
94 pgon = get_paging_status();
95 disable_paging();
97 if(ppage < 0) {
98 uint32_t addr = alloc_phys_page();
99 if(!addr) {
100 return -1;
101 }
102 ppage = ADDR_TO_PAGE(addr);
103 }
105 diridx = PAGE_TO_PGTBL(vpage);
106 pgidx = PAGE_TO_PGTBL_PG(vpage);
108 if(!(pgdir[diridx] & PG_PRESENT)) {
109 uint32_t addr = alloc_phys_page();
110 pgtbl = (uint32_t*)addr;
111 memset(pgtbl, 0, PGSIZE);
113 pgdir[diridx] = addr | (attr & ATTR_PGDIR_MASK) | PG_PRESENT;
114 } else {
115 pgtbl = (uint32_t*)(pgdir[diridx] & ADDR_PGENT_MASK);
116 }
118 pgtbl[pgidx] = PAGE_TO_ADDR(ppage) | (attr & ATTR_PGTBL_MASK) | PG_PRESENT;
119 flush_tlb_page(vpage);
121 if(pgon) {
122 enable_paging();
123 }
124 return 0;
125 }
127 void unmap_page(int vpage)
128 {
129 uint32_t *pgtbl;
130 int diridx = PAGE_TO_PGTBL(vpage);
131 int pgidx = PAGE_TO_PGTBL_PG(vpage);
133 if(!(pgdir[diridx] & PG_PRESENT)) {
134 goto err;
135 }
136 pgtbl = (uint32_t*)(pgdir[diridx] & ADDR_PGENT_MASK);
138 if(!(pgtbl[pgidx] & PG_PRESENT)) {
139 goto err;
140 }
141 pgtbl[pgidx] = 0;
142 flush_tlb_page(vpage);
144 return;
145 err:
146 printf("unmap_page(%d): page already not mapped\n", vpage);
147 }
149 /* if ppg_start is -1, we allocate physical pages to map with alloc_phys_page() */
150 int map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr)
151 {
152 int i, phys_pg;
153 uint32_t paddr;
155 for(i=0; i<pgcount; i++) {
156 if(ppg_start < 0) {
157 if(!(paddr = alloc_phys_page())) {
158 return -1;
159 }
160 phys_pg = ADDR_TO_PAGE(paddr);
161 } else {
162 phys_pg = ppg_start + i;
163 }
165 map_page(vpg_start + i, phys_pg, attr);
166 }
167 return 0;
168 }
170 /* if paddr is 0, we allocate physical pages with alloc_phys_page() */
171 int map_mem_range(uint32_t vaddr, size_t sz, uint32_t paddr, unsigned int attr)
172 {
173 int vpg_start, ppg_start, num_pages;
175 if(!sz) return -1;
177 if(ADDR_TO_PGOFFS(paddr)) {
178 panic("map_mem_range called with unaligned physical address: %x\n", paddr);
179 }
181 vpg_start = ADDR_TO_PAGE(vaddr);
182 ppg_start = paddr > 0 ? ADDR_TO_PAGE(paddr) : -1;
183 num_pages = ADDR_TO_PAGE(sz) + 1;
185 return map_page_range(vpg_start, num_pages, ppg_start, attr);
186 }
188 uint32_t virt_to_phys(uint32_t vaddr)
189 {
190 uint32_t pgaddr, *pgtbl;
191 int diridx = ADDR_TO_PGTBL(vaddr);
192 int pgidx = ADDR_TO_PGTBL_PG(vaddr);
194 if(!(pgdir[diridx] & PG_PRESENT)) {
195 panic("virt_to_phys(%x): page table %d not present\n", vaddr, diridx);
196 }
197 pgtbl = (uint32_t*)(pgdir[diridx] & PGENT_ADDR_MASK);
199 if(!(pgtbl[pgidx] & PG_PRESENT)) {
200 panic("virt_to_phys(%x): page %d not present\n", vaddr, ADDR_TO_PAGE(vaddr));
201 }
202 pgaddr = pgtbl[pgidx] & PGENT_ADDR_MASK;
204 return pgaddr | ADDR_TO_PGOFFS(vaddr);
205 }
207 /* allocate a contiguous block of virtual memory pages along with
208 * backing physical memory for them, and update the page table.
209 */
210 int pgalloc(int num, int area)
211 {
212 int ret = -1;
213 struct page_range *node, *prev, dummy;
215 dummy.next = pglist[area];
216 node = pglist[area];
217 prev = &dummy;
219 while(node) {
220 if(node->end - node->start >= num) {
221 ret = node->start;
222 node->start += num;
224 if(node->start == node->end) {
225 prev->next = node->next;
226 node->next = 0;
228 if(node == pglist[area]) {
229 pglist[area] = 0;
230 }
231 free_node(node);
232 }
233 break;
234 }
236 prev = node;
237 node = node->next;
238 }
240 if(ret >= 0) {
241 /* allocate physical storage and map */
242 if(map_page_range(ret, num, -1, 0) == -1) {
243 ret = -1;
244 }
245 }
247 return ret;
248 }
250 void pgfree(int start, int num)
251 {
252 int area, end;
253 struct page_range *node, *new, *prev, *next;
255 if(!(new = alloc_node())) {
256 panic("pgfree: can't allocate new page_range node to add the freed pages\n");
257 }
258 new->start = start;
259 end = new->end = start + num;
261 area = PAGE_TO_ADDR(start) >= KMEM_START ? MEM_KERNEL : MEM_USER;
263 if(!pglist[area] || pglist[area]->start > start) {
264 next = new->next = pglist[area];
265 pglist[area] = new;
266 prev = 0;
268 } else {
270 prev = 0;
271 node = pglist[area];
272 next = node ? node->next : 0;
274 while(node) {
275 if(!next || next->start > start) {
276 /* place here, after node */
277 new->next = next;
278 node->next = new;
279 prev = node; /* needed by coalesce after the loop */
280 break;
281 }
283 prev = node;
284 node = next;
285 next = node ? node->next : 0;
286 }
287 }
289 coalesce(prev, new, next);
290 }
292 static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high)
293 {
294 if(high) {
295 if(mid->end == high->start) {
296 mid->end = high->end;
297 mid->next = high->next;
298 free_node(high);
299 }
300 }
302 if(low) {
303 if(low->end == mid->start) {
304 low->end += mid->end;
305 low->next = mid->next;
306 free_node(mid);
307 }
308 }
309 }
311 static void pgfault(int inum, uint32_t err)
312 {
313 printf("~~~~ PAGE FAULT ~~~~\n");
315 printf("fault address: %x\n", get_fault_addr());
317 if(err & PG_PRESENT) {
318 if(err & 8) {
319 printf("reserved bit set in some paging structure\n");
320 } else {
321 printf("%s protection violation ", (err & PG_WRITABLE) ? "write" : "read");
322 printf("in %s mode\n", err & PG_USER ? "user" : "kernel");
323 }
324 } else {
325 printf("page not present\n");
326 }
328 panic("unhandled page fault\n");
329 }
331 /* --- page range list node management --- */
332 #define NODES_IN_PAGE (PGSIZE / sizeof(struct page_range))
334 static struct page_range *alloc_node(void)
335 {
336 struct page_range *node;
337 int pg, i;
339 if(node_pool) {
340 node = node_pool;
341 node_pool = node_pool->next;
342 printf("alloc_node -> %x\n", (unsigned int)node);
343 return node;
344 }
346 /* no node structures in the pool, we need to allocate a new page,
347 * split it up into node structures, add them in the pool, and
348 * allocate one of them.
349 */
350 if(!(pg = pgalloc(1, MEM_KERNEL))) {
351 panic("ran out of physical memory while allocating VM range structures\n");
352 }
353 node_pool = (struct page_range*)PAGE_TO_ADDR(pg);
355 /* link them up, skip the first as we'll just allocate it anyway */
356 for(i=2; i<NODES_IN_PAGE; i++) {
357 node_pool[i - 1].next = node_pool + i;
358 }
359 node_pool[NODES_IN_PAGE - 1].next = 0;
361 /* grab the first and return it */
362 node = node_pool++;
363 printf("alloc_node -> %x\n", (unsigned int)node);
364 return node;
365 }
367 static void free_node(struct page_range *node)
368 {
369 node->next = node_pool;
370 node_pool = node;
371 printf("free_node\n");
372 }
375 void dbg_print_vm(int area)
376 {
377 struct page_range *node = pglist[area];
378 int last = area == MEM_USER ? 0 : ADDR_TO_PAGE(KMEM_START);
380 printf("%s vm space\n", area == MEM_USER ? "user" : "kernel");
382 while(node) {
383 if(node->start > last) {
384 printf(" vm-used: %x -> %x\n", PAGE_TO_ADDR(last), PAGE_TO_ADDR(node->start));
385 }
387 printf(" vm-free: %x -> ", PAGE_TO_ADDR(node->start));
388 if(node->end >= PAGE_COUNT) {
389 printf("END\n");
390 } else {
391 printf("%x\n", PAGE_TO_ADDR(node->end));
392 }
394 last = node->end;
395 node = node->next;
396 }
397 }