# HG changeset patch # User John Tsiombikas # Date 1302064964 -10800 # Node ID 9939a6d7a45a1ea08d4094cffa302a1890c726d8 # Parent 53588744382ceb01121a3b06759e638749a0509a protected critical sections in VM and the physical memory manager by disabling interrupts diff -r 53588744382c -r 9939a6d7a45a src/intr-asm.S --- a/src/intr-asm.S Tue Apr 05 02:09:02 2011 +0300 +++ b/src/intr-asm.S Wed Apr 06 07:42:44 2011 +0300 @@ -17,6 +17,28 @@ lidt (lim) ret +/* get_intr_state() + * returns 1 if interrutps are enabled, 0 if disabled */ + .globl get_intr_state +get_intr_state: + pushf + popl %eax + shr $9, %eax /* bit 9 of eflags is IF */ + andl $1, %eax + ret + +/* set_intr_state(int state) + * enables interrupts if the argument is non-zero, disables them otherwise */ + .globl set_intr_state +set_intr_state: + cmpl $0, 4(%esp) + jz 0f + sti + ret +0: cli + ret + + /* interrupt entry with error code macro * this macro generates an interrupt entry point for the * exceptions which include error codes in the stack frame diff -r 53588744382c -r 9939a6d7a45a src/intr.h --- a/src/intr.h Tue Apr 05 02:09:02 2011 +0300 +++ b/src/intr.h Wed Apr 06 07:42:44 2011 +0300 @@ -2,6 +2,7 @@ #define INTR_H_ #include +#include "asmops.h" typedef void (*intr_func_t)(int, uint32_t); @@ -10,4 +11,8 @@ void interrupt(int intr_num, intr_func_t func); +/* defined in intr-asm.S */ +int get_intr_state(void); +void set_intr_state(int s); + #endif /* INTR_H_ */ diff -r 53588744382c -r 9939a6d7a45a src/mem.c --- a/src/mem.c Tue Apr 05 02:09:02 2011 +0300 +++ b/src/mem.c Wed Apr 06 07:42:44 2011 +0300 @@ -3,6 +3,7 @@ #include "mem.h" #include "panic.h" #include "vm.h" +#include "intr.h" #define FREE 0 #define USED 1 @@ -120,7 +121,10 @@ */ uint32_t alloc_phys_page(void) { - int i, idx, max; + int i, idx, max, intr_state; + + intr_state = get_intr_state(); + disable_intr(); idx = last_alloc_idx; max = bmsize / 4; @@ -139,6 +143,8 @@ last_alloc_idx = idx; printf("alloc_phys_page() -> %x (page: %d)\n", PAGE_TO_ADDR(pg), pg); + + set_intr_state(intr_state); return PAGE_TO_ADDR(pg); } } @@ -147,6 +153,7 @@ idx++; } + set_intr_state(intr_state); return 0; } @@ -163,6 +170,9 @@ int pg = ADDR_TO_PAGE(addr); int bmidx = BM_IDX(pg); + int intr_state = get_intr_state(); + disable_intr(); + if(!IS_FREE(pg)) { panic("free_phys_page(%d): I thought that was already free!\n", pg); } @@ -171,6 +181,8 @@ if(bmidx < last_alloc_idx) { last_alloc_idx = bmidx; } + + set_intr_state(intr_state); } /* this is only ever used by the VM init code to find out what the extends of diff -r 53588744382c -r 9939a6d7a45a src/vm.c --- a/src/vm.c Tue Apr 05 02:09:02 2011 +0300 +++ b/src/vm.c Wed Apr 06 07:42:44 2011 +0300 @@ -97,13 +97,17 @@ int map_page(int vpage, int ppage, unsigned int attr) { uint32_t *pgtbl; - int diridx, pgidx, pgon; + int diridx, pgidx, pgon, intr_state; + + intr_state = get_intr_state(); + disable_intr(); pgon = get_paging_status(); if(ppage < 0) { uint32_t addr = alloc_phys_page(); if(!addr) { + set_intr_state(intr_state); return -1; } ppage = ADDR_TO_PAGE(addr); @@ -129,6 +133,7 @@ pgtbl[pgidx] = PAGE_TO_ADDR(ppage) | (attr & ATTR_PGTBL_MASK) | PG_PRESENT; flush_tlb_page(vpage); + set_intr_state(intr_state); return 0; } @@ -138,6 +143,9 @@ int diridx = PAGE_TO_PGTBL(vpage); int pgidx = PAGE_TO_PGTBL_PG(vpage); + int intr_state = get_intr_state(); + disable_intr(); + if(!(pgdir[diridx] & PG_PRESENT)) { goto err; } @@ -149,9 +157,11 @@ pgtbl[pgidx] = 0; flush_tlb_page(vpage); - return; + if(0) { err: - printf("unmap_page(%d): page already not mapped\n", vpage); + printf("unmap_page(%d): page already not mapped\n", vpage); + } + set_intr_state(intr_state); } /* if ppg_start is -1, we allocate physical pages to map with alloc_phys_page() */ @@ -217,9 +227,12 @@ */ int pgalloc(int num, int area) { - int ret = -1; + int intr_state, ret = -1; struct page_range *node, *prev, dummy; + intr_state = get_intr_state(); + disable_intr(); + dummy.next = pglist[area]; node = pglist[area]; prev = &dummy; @@ -252,14 +265,18 @@ } } + set_intr_state(intr_state); return ret; } void pgfree(int start, int num) { - int area, end; + int area, end, intr_state; struct page_range *node, *new, *prev, *next; + intr_state = get_intr_state(); + disable_intr(); + if(!(new = alloc_node())) { panic("pgfree: can't allocate new page_range node to add the freed pages\n"); } @@ -295,6 +312,7 @@ } coalesce(prev, new, next); + set_intr_state(intr_state); } static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high) @@ -382,8 +400,14 @@ void dbg_print_vm(int area) { - struct page_range *node = pglist[area]; - int last = area == MEM_USER ? 0 : ADDR_TO_PAGE(KMEM_START); + struct page_range *node; + int last, intr_state; + + intr_state = get_intr_state(); + disable_intr(); + + node = pglist[area]; + last = area == MEM_USER ? 0 : ADDR_TO_PAGE(KMEM_START); printf("%s vm space\n", area == MEM_USER ? "user" : "kernel"); @@ -402,4 +426,6 @@ last = node->end; node = node->next; } + + set_intr_state(intr_state); }