kern

diff src/vm.c @ 47:f65b348780e3

continuing with the process implementation. not done yet, panics.
author John Tsiombikas <nuclear@member.fsf.org>
date Thu, 28 Jul 2011 05:43:04 +0300
parents b8f02479e3f4
children 4c9c16754b59
line diff
     1.1 --- a/src/vm.c	Thu Jul 28 05:33:59 2011 +0300
     1.2 +++ b/src/vm.c	Thu Jul 28 05:43:04 2011 +0300
     1.3 @@ -8,7 +8,6 @@
     1.4  #include "panic.h"
     1.5  
     1.6  
     1.7 -#define KMEM_START		0xc0000000
     1.8  #define IDMAP_START		0xa0000
     1.9  
    1.10  #define PGDIR_ADDR		0xfffff000
    1.11 @@ -56,6 +55,7 @@
    1.12  void init_vm(void)
    1.13  {
    1.14  	uint32_t idmap_end;
    1.15 +	int i, kmem_start_pg, pgtbl_base_pg;
    1.16  
    1.17  	/* setup the page tables */
    1.18  	pgdir = (uint32_t*)alloc_phys_page();
    1.19 @@ -79,15 +79,29 @@
    1.20  	/* initialize the virtual page allocator */
    1.21  	node_pool = 0;
    1.22  
    1.23 -	first_node.start = ADDR_TO_PAGE(KMEM_START);
    1.24 -	first_node.end = ADDR_TO_PAGE(PGTBL_BASE);
    1.25 +	kmem_start_pg = ADDR_TO_PAGE(KMEM_START);
    1.26 +	pgtbl_base_pg = ADDR_TO_PAGE(PGTBL_BASE);
    1.27 +
    1.28 +	first_node.start = kmem_start_pg;
    1.29 +	first_node.end = pgtbl_base_pg;
    1.30  	first_node.next = 0;
    1.31  	pglist[MEM_KERNEL] = &first_node;
    1.32  
    1.33  	pglist[MEM_USER] = alloc_node();
    1.34  	pglist[MEM_USER]->start = ADDR_TO_PAGE(idmap_end);
    1.35 -	pglist[MEM_USER]->end = ADDR_TO_PAGE(KMEM_START);
    1.36 +	pglist[MEM_USER]->end = kmem_start_pg;
    1.37  	pglist[MEM_USER]->next = 0;
    1.38 +
    1.39 +	/* temporaroly map something into every 1024th page of the kernel address
    1.40 +	 * space to force pre-allocation of all the kernel page-tables
    1.41 +	 */
    1.42 +	for(i=kmem_start_pg; i<pgtbl_base_pg; i+=1024) {
    1.43 +		/* if there's already something mapped here, leave it alone */
    1.44 +		if(virt_to_phys_page(i) == -1) {
    1.45 +			map_page(i, 0, 0);
    1.46 +			unmap_page(i);
    1.47 +		}
    1.48 +	}
    1.49  }
    1.50  
    1.51  /* if ppage == -1 we allocate a physical page by calling alloc_phys_page */
    1.52 @@ -298,7 +312,7 @@
    1.53  	unsigned int attr = 0;	/* TODO */
    1.54  
    1.55  	area = (start >= ADDR_TO_PAGE(KMEM_START)) ? MEM_KERNEL : MEM_USER;
    1.56 -	if(area == KMEM_USER && start + num > ADDR_TO_PAGE(KMEM_START)) {
    1.57 +	if(area == MEM_USER && start + num > ADDR_TO_PAGE(KMEM_START)) {
    1.58  		printf("pgalloc_vrange: invalid range request crossing user/kernel split\n");
    1.59  		return -1;
    1.60  	}
    1.61 @@ -447,7 +461,7 @@
    1.62  	if(node_pool) {
    1.63  		node = node_pool;
    1.64  		node_pool = node_pool->next;
    1.65 -		printf("alloc_node -> %x\n", (unsigned int)node);
    1.66 +		/*printf("alloc_node -> %x\n", (unsigned int)node);*/
    1.67  		return node;
    1.68  	}
    1.69  
    1.70 @@ -468,7 +482,7 @@
    1.71  
    1.72  	/* grab the first and return it */
    1.73  	node = node_pool++;
    1.74 -	printf("alloc_node -> %x\n", (unsigned int)node);
    1.75 +	/*printf("alloc_node -> %x\n", (unsigned int)node);*/
    1.76  	return node;
    1.77  }
    1.78  
    1.79 @@ -476,26 +490,34 @@
    1.80  {
    1.81  	node->next = node_pool;
    1.82  	node_pool = node;
    1.83 -	printf("free_node\n");
    1.84 +	/*printf("free_node\n");*/
    1.85  }
    1.86  
    1.87  
    1.88 -/* clone_vmem makes a copy of the current page tables, thus duplicating
    1.89 - * the virtual address space.
    1.90 +/* clone_vm makes a copy of the current page tables, thus duplicating the
    1.91 + * virtual address space.
    1.92 + *
    1.93 + * For the kernel part of the address space (last 256 page directory entries)
    1.94 + * we don't want to diplicate the page tables, just point all page directory
    1.95 + * entries to the same set of page tables.
    1.96   *
    1.97   * Returns the physical address of the new page directory.
    1.98   */
    1.99 -uint32_t clone_vmem(void)
   1.100 +uint32_t clone_vm(void)
   1.101  {
   1.102 -	int i, dirpg, tblpg;
   1.103 +	int i, dirpg, tblpg, kmem_start_pg;
   1.104  	uint32_t paddr;
   1.105  	uint32_t *ndir, *ntbl;
   1.106  
   1.107 +	/* allocate the new page directory */
   1.108  	if((dirpg = pgalloc(1, MEM_KERNEL)) == -1) {
   1.109  		panic("clone_vmem: failed to allocate page directory page\n");
   1.110  	}
   1.111  	ndir = (uint32_t*)PAGE_TO_ADDR(dirpg);
   1.112  
   1.113 +	/* allocate a virtual page for temporarily mapping all new
   1.114 +	 * page tables while we populate them.
   1.115 +	 */
   1.116  	if((tblpg = pgalloc(1, MEM_KERNEL)) == -1) {
   1.117  		panic("clone_vmem: failed to allocate page table page\n");
   1.118  	}
   1.119 @@ -506,7 +528,10 @@
   1.120  	 */
   1.121  	free_phys_page(virt_to_phys(tblpg));
   1.122  
   1.123 -	for(i=0; i<1024; i++) {
   1.124 +	kmem_start_pg = ADDR_TO_PAGE(KMEM_START);
   1.125 +
   1.126 +	/* user space */
   1.127 +	for(i=0; i<kmem_start_pg; i++) {
   1.128  		if(pgdir[i] & PG_PRESENT) {
   1.129  			paddr = alloc_phys_page();
   1.130  			map_page(tblpg, ADDR_TO_PAGE(paddr), 0);
   1.131 @@ -521,6 +546,11 @@
   1.132  		}
   1.133  	}
   1.134  
   1.135 +	/* kernel space */
   1.136 +	for(i=kmem_start_pg; i<1024; i++) {
   1.137 +		ndir[i] = *PGTBL(i);
   1.138 +	}
   1.139 +
   1.140  	paddr = virt_to_phys(dirpg);
   1.141  
   1.142  	/* unmap before freeing to avoid deallocating the physical pages */