kern

diff src/vm.c @ 55:88a6c4e192f9

Fixed most important task switching bugs. Now it seems that I can switch in and out of user space reliably.
author John Tsiombikas <nuclear@member.fsf.org>
date Mon, 15 Aug 2011 04:03:39 +0300
parents fa65b4f45366
children 437360696883
line diff
     1.1 --- a/src/vm.c	Sun Aug 14 16:57:23 2011 +0300
     1.2 +++ b/src/vm.c	Mon Aug 15 04:03:39 2011 +0300
     1.3 @@ -129,8 +129,19 @@
     1.4  	pgidx = PAGE_TO_PGTBL_PG(vpage);
     1.5  
     1.6  	if(!(pgdir[diridx] & PG_PRESENT)) {
     1.7 +		/* no page table present, we must allocate one */
     1.8  		uint32_t addr = alloc_phys_page();
     1.9 -		pgdir[diridx] = addr | (attr & ATTR_PGDIR_MASK) | PG_PRESENT;
    1.10 +
    1.11 +		/* make sure all page directory entries in the below the kernel vm
    1.12 +		 * split have the user and writable bits set, otherwise further user
    1.13 +		 * mappings on the same 4mb block will be unusable in user space.
    1.14 +		 */
    1.15 +		unsigned int pgdir_attr = attr;
    1.16 +		if(vpage < ADDR_TO_PAGE(KMEM_START)) {
    1.17 +			pgdir_attr |= PG_USER | PG_WRITABLE;
    1.18 +		}
    1.19 +
    1.20 +		pgdir[diridx] = addr | (pgdir_attr & ATTR_PGDIR_MASK) | PG_PRESENT;
    1.21  
    1.22  		pgtbl = pgon ? PGTBL(diridx) : (uint32_t*)addr;
    1.23  		memset(pgtbl, 0, PGSIZE);
    1.24 @@ -265,7 +276,6 @@
    1.25  {
    1.26  	int intr_state, ret = -1;
    1.27  	struct page_range *node, *prev, dummy;
    1.28 -	unsigned int attr = 0;	/* TODO */
    1.29  
    1.30  	intr_state = get_intr_state();
    1.31  	disable_intr();
    1.32 @@ -296,6 +306,9 @@
    1.33  	}
    1.34  
    1.35  	if(ret >= 0) {
    1.36 +		/*unsigned int attr = (area == MEM_USER) ? (PG_USER | PG_WRITABLE) : PG_GLOBAL;*/
    1.37 +		unsigned int attr = (area == MEM_USER) ? (PG_USER | PG_WRITABLE) : 0;
    1.38 +
    1.39  		/* allocate physical storage and map */
    1.40  		if(map_page_range(ret, num, -1, attr) == -1) {
    1.41  			ret = -1;
    1.42 @@ -310,7 +323,6 @@
    1.43  {
    1.44  	struct page_range *node, *prev, dummy;
    1.45  	int area, intr_state, ret = -1;
    1.46 -	unsigned int attr = 0;	/* TODO */
    1.47  
    1.48  	area = (start >= ADDR_TO_PAGE(KMEM_START)) ? MEM_KERNEL : MEM_USER;
    1.49  	if(area == MEM_USER && start + num > ADDR_TO_PAGE(KMEM_START)) {
    1.50 @@ -376,6 +388,9 @@
    1.51  	}
    1.52  
    1.53  	if(ret >= 0) {
    1.54 +		/*unsigned int attr = (area == MEM_USER) ? (PG_USER | PG_WRITABLE) : PG_GLOBAL;*/
    1.55 +		unsigned int attr = (area == MEM_USER) ? (PG_USER | PG_WRITABLE) : 0;
    1.56 +
    1.57  		/* allocate physical storage and map */
    1.58  		if(map_page_range(ret, num, -1, attr) == -1) {
    1.59  			ret = -1;
    1.60 @@ -464,21 +479,20 @@
    1.61  	uint32_t fault_addr = get_fault_addr();
    1.62  
    1.63  	/* the fault occured in user space */
    1.64 -	if(frm->esp < KMEM_START + 1) {
    1.65 +	if(frm->err & PG_USER) {
    1.66  		int fault_page = ADDR_TO_PAGE(fault_addr);
    1.67  		struct process *proc = get_current_proc();
    1.68 +		printf("DBG: page fault in user space\n");
    1.69  		assert(proc);
    1.70  
    1.71 -		printf("DBG: page fault in user space\n");
    1.72 -
    1.73  		if(frm->err & PG_PRESENT) {
    1.74  			/* it's not due to a missing page, just panic */
    1.75  			goto unhandled;
    1.76  		}
    1.77  
    1.78  		/* detect if it's an automatic stack growth deal */
    1.79 -		if(fault_page < proc->stack_start_pg && proc->stack_start_pg - fault_page < USTACK_MAXGROW) {
    1.80 -			int num_pages = proc->stack_start_pg - fault_page;
    1.81 +		if(fault_page < proc->user_stack_pg && proc->user_stack_pg - fault_page < USTACK_MAXGROW) {
    1.82 +			int num_pages = proc->user_stack_pg - fault_page;
    1.83  			printf("growing user (%d) stack by %d pages\n", proc->id, num_pages);
    1.84  
    1.85  			if(pgalloc_vrange(fault_page, num_pages) != fault_page) {
    1.86 @@ -486,8 +500,7 @@
    1.87  				/* TODO: in the future we'd SIGSEGV the process here, for now just panic */
    1.88  				goto unhandled;
    1.89  			}
    1.90 -			proc->stack_start_pg = fault_page;
    1.91 -
    1.92 +			proc->user_stack_pg = fault_page;
    1.93  			return;
    1.94  		}
    1.95  	}
    1.96 @@ -500,8 +513,8 @@
    1.97  		if(frm->err & 8) {
    1.98  			printf("reserved bit set in some paging structure\n");
    1.99  		} else {
   1.100 -			printf("%s protection violation ", (frm->err & PG_WRITABLE) ? "write" : "read");
   1.101 -			printf("in %s mode\n", frm->err & PG_USER ? "user" : "kernel");
   1.102 +			printf("%s protection violation ", (frm->err & PG_WRITABLE) ? "WRITE" : "READ");
   1.103 +			printf("in %s mode\n", (frm->err & PG_USER) ? "user" : "kernel");
   1.104  		}
   1.105  	} else {
   1.106  		printf("page not present\n");
   1.107 @@ -553,7 +566,7 @@
   1.108  	/*printf("free_node\n");*/
   1.109  }
   1.110  
   1.111 -
   1.112 +#if 0
   1.113  /* clone_vm makes a copy of the current page tables, thus duplicating the
   1.114   * virtual address space.
   1.115   *
   1.116 @@ -594,7 +607,7 @@
   1.117  	for(i=0; i<kstart_dirent; i++) {
   1.118  		if(pgdir[i] & PG_PRESENT) {
   1.119  			paddr = alloc_phys_page();
   1.120 -			map_page(tblpg, ADDR_TO_PAGE(paddr), 0);
   1.121 +			map_page(tblpg, ADDR_TO_PAGE(paddr), PG_USER | PG_WRITABLE);
   1.122  
   1.123  			/* copy the page table */
   1.124  			memcpy(ntbl, PGTBL(i), PGSIZE);
   1.125 @@ -606,7 +619,7 @@
   1.126  		}
   1.127  	}
   1.128  
   1.129 -	/* kernel space */
   1.130 +	/* for the kernel space we'll just use the same page tables */
   1.131  	for(i=kstart_dirent; i<1024; i++) {
   1.132  		ndir[i] = pgdir[i];
   1.133  	}
   1.134 @@ -622,6 +635,7 @@
   1.135  
   1.136  	return paddr;
   1.137  }
   1.138 +#endif
   1.139  
   1.140  
   1.141  void dbg_print_vm(int area)