kern

diff src/vm.c @ 68:0a205396e1a0

- added a generic red-black tree data structure - added a VM map as an red-black tree of vm_pages in the process structure - constructed the vm map of the memory passed by the kernel initially to the first process.
author John Tsiombikas <nuclear@mutantstargoat.com>
date Mon, 10 Oct 2011 04:16:01 +0300
parents c2692696f9ab
children b45e2d5f0ae1
line diff
     1.1 --- a/src/vm.c	Sun Oct 09 20:38:35 2011 +0300
     1.2 +++ b/src/vm.c	Mon Oct 10 04:16:01 2011 +0300
     1.3 @@ -17,7 +17,6 @@
     1.4  
     1.5  #define ATTR_PGDIR_MASK	0x3f
     1.6  #define ATTR_PGTBL_MASK	0x1ff
     1.7 -#define ADDR_PGENT_MASK	0xfffff000
     1.8  
     1.9  #define PAGEFAULT		14
    1.10  
    1.11 @@ -68,7 +67,7 @@
    1.12  	map_mem_range(IDMAP_START, idmap_end - IDMAP_START, IDMAP_START, 0);
    1.13  
    1.14  	/* make the last page directory entry point to the page directory */
    1.15 -	pgdir[1023] = ((uint32_t)pgdir & ADDR_PGENT_MASK) | PG_PRESENT;
    1.16 +	pgdir[1023] = ((uint32_t)pgdir & PGENT_ADDR_MASK) | PG_PRESENT;
    1.17  	pgdir = (uint32_t*)PGDIR_ADDR;
    1.18  
    1.19  	/* set the page fault handler */
    1.20 @@ -149,7 +148,7 @@
    1.21  		if(pgon) {
    1.22  			pgtbl = PGTBL(diridx);
    1.23  		} else {
    1.24 -			pgtbl = (uint32_t*)(pgdir[diridx] & ADDR_PGENT_MASK);
    1.25 +			pgtbl = (uint32_t*)(pgdir[diridx] & PGENT_ADDR_MASK);
    1.26  		}
    1.27  	}
    1.28  
    1.29 @@ -614,7 +613,8 @@
    1.30  				 * page table and unset the writable bits.
    1.31  				 */
    1.32  				for(j=0; j<1024; j++) {
    1.33 -					PGTBL(i)[j] &= ~(uint32_t)PG_WRITABLE;
    1.34 +					clear_page_bit(i * 1024 + j, PG_WRITABLE, PAGE_ONLY);
    1.35 +					/*PGTBL(i)[j] &= ~(uint32_t)PG_WRITABLE;*/
    1.36  				}
    1.37  			}
    1.38  
    1.39 @@ -699,6 +699,40 @@
    1.40  }
    1.41  
    1.42  
    1.43 +#define USER_PGDIR_ENTRIES	PAGE_TO_PGTBL(KMEM_START_PAGE)
    1.44 +int cons_vmmap(struct rbtree *vmmap)
    1.45 +{
    1.46 +	int i, j;
    1.47 +
    1.48 +	rb_init(vmmap, RB_KEY_INT);
    1.49 +
    1.50 +	for(i=0; i<USER_PGDIR_ENTRIES; i++) {
    1.51 +		if(pgdir[i] & PG_PRESENT) {
    1.52 +			/* page table is present, iterate through its 1024 pages */
    1.53 +			uint32_t *pgtbl = PGTBL(i);
    1.54 +
    1.55 +			for(j=0; j<1024; j++) {
    1.56 +				if(pgtbl[j] & PG_PRESENT) {
    1.57 +					struct vm_page *vmp;
    1.58 +
    1.59 +					if(!(vmp = malloc(sizeof *vmp))) {
    1.60 +						panic("cons_vmap failed to allocate memory");
    1.61 +					}
    1.62 +					vmp->vpage = i * 1024 + j;
    1.63 +					vmp->ppage = ADDR_TO_PAGE(pgtbl[j] & PGENT_ADDR_MASK);
    1.64 +					vmp->flags = pgtbl[j] & ATTR_PGTBL_MASK;
    1.65 +					vmp->nref = 1;	/* when first created assume no sharing */
    1.66 +
    1.67 +					rb_inserti(vmmap, vmp->ppage, vmp);
    1.68 +				}
    1.69 +			}
    1.70 +		}
    1.71 +	}
    1.72 +
    1.73 +	return 0;
    1.74 +}
    1.75 +
    1.76 +
    1.77  void dbg_print_vm(int area)
    1.78  {
    1.79  	struct page_range *node;