kern
diff src/vm.c @ 25:9939a6d7a45a
protected critical sections in VM and the physical memory manager by disabling interrupts
author | John Tsiombikas <nuclear@member.fsf.org> |
---|---|
date | Wed, 06 Apr 2011 07:42:44 +0300 |
parents | 53588744382c |
children | 387078ef5c0d |
line diff
1.1 --- a/src/vm.c Tue Apr 05 02:09:02 2011 +0300 1.2 +++ b/src/vm.c Wed Apr 06 07:42:44 2011 +0300 1.3 @@ -97,13 +97,17 @@ 1.4 int map_page(int vpage, int ppage, unsigned int attr) 1.5 { 1.6 uint32_t *pgtbl; 1.7 - int diridx, pgidx, pgon; 1.8 + int diridx, pgidx, pgon, intr_state; 1.9 + 1.10 + intr_state = get_intr_state(); 1.11 + disable_intr(); 1.12 1.13 pgon = get_paging_status(); 1.14 1.15 if(ppage < 0) { 1.16 uint32_t addr = alloc_phys_page(); 1.17 if(!addr) { 1.18 + set_intr_state(intr_state); 1.19 return -1; 1.20 } 1.21 ppage = ADDR_TO_PAGE(addr); 1.22 @@ -129,6 +133,7 @@ 1.23 pgtbl[pgidx] = PAGE_TO_ADDR(ppage) | (attr & ATTR_PGTBL_MASK) | PG_PRESENT; 1.24 flush_tlb_page(vpage); 1.25 1.26 + set_intr_state(intr_state); 1.27 return 0; 1.28 } 1.29 1.30 @@ -138,6 +143,9 @@ 1.31 int diridx = PAGE_TO_PGTBL(vpage); 1.32 int pgidx = PAGE_TO_PGTBL_PG(vpage); 1.33 1.34 + int intr_state = get_intr_state(); 1.35 + disable_intr(); 1.36 + 1.37 if(!(pgdir[diridx] & PG_PRESENT)) { 1.38 goto err; 1.39 } 1.40 @@ -149,9 +157,11 @@ 1.41 pgtbl[pgidx] = 0; 1.42 flush_tlb_page(vpage); 1.43 1.44 - return; 1.45 + if(0) { 1.46 err: 1.47 - printf("unmap_page(%d): page already not mapped\n", vpage); 1.48 + printf("unmap_page(%d): page already not mapped\n", vpage); 1.49 + } 1.50 + set_intr_state(intr_state); 1.51 } 1.52 1.53 /* if ppg_start is -1, we allocate physical pages to map with alloc_phys_page() */ 1.54 @@ -217,9 +227,12 @@ 1.55 */ 1.56 int pgalloc(int num, int area) 1.57 { 1.58 - int ret = -1; 1.59 + int intr_state, ret = -1; 1.60 struct page_range *node, *prev, dummy; 1.61 1.62 + intr_state = get_intr_state(); 1.63 + disable_intr(); 1.64 + 1.65 dummy.next = pglist[area]; 1.66 node = pglist[area]; 1.67 prev = &dummy; 1.68 @@ -252,14 +265,18 @@ 1.69 } 1.70 } 1.71 1.72 + set_intr_state(intr_state); 1.73 return ret; 1.74 } 1.75 1.76 void pgfree(int start, int num) 1.77 { 1.78 - int area, end; 1.79 + int area, end, intr_state; 1.80 struct page_range *node, *new, *prev, *next; 1.81 1.82 + intr_state = get_intr_state(); 1.83 + disable_intr(); 1.84 + 1.85 if(!(new = alloc_node())) { 1.86 panic("pgfree: can't allocate new page_range node to add the freed pages\n"); 1.87 } 1.88 @@ -295,6 +312,7 @@ 1.89 } 1.90 1.91 coalesce(prev, new, next); 1.92 + set_intr_state(intr_state); 1.93 } 1.94 1.95 static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high) 1.96 @@ -382,8 +400,14 @@ 1.97 1.98 void dbg_print_vm(int area) 1.99 { 1.100 - struct page_range *node = pglist[area]; 1.101 - int last = area == MEM_USER ? 0 : ADDR_TO_PAGE(KMEM_START); 1.102 + struct page_range *node; 1.103 + int last, intr_state; 1.104 + 1.105 + intr_state = get_intr_state(); 1.106 + disable_intr(); 1.107 + 1.108 + node = pglist[area]; 1.109 + last = area == MEM_USER ? 0 : ADDR_TO_PAGE(KMEM_START); 1.110 1.111 printf("%s vm space\n", area == MEM_USER ? "user" : "kernel"); 1.112 1.113 @@ -402,4 +426,6 @@ 1.114 last = node->end; 1.115 node = node->next; 1.116 } 1.117 + 1.118 + set_intr_state(intr_state); 1.119 }