kern

changeset 22:7ece008f09c5

writing the vm
author John Tsiombikas <nuclear@member.fsf.org>
date Sun, 03 Apr 2011 18:42:19 +0300
parents 3ba93d8f586c
children 5454cee245a3
files src/vm.c src/vm.h
diffstat 2 files changed, 111 insertions(+), 11 deletions(-) [+]
line diff
     1.1 --- a/src/vm.c	Sun Apr 03 08:23:07 2011 +0300
     1.2 +++ b/src/vm.c	Sun Apr 03 18:42:19 2011 +0300
     1.3 @@ -8,16 +8,6 @@
     1.4  #include "panic.h"
     1.5  
     1.6  
     1.7 -/* defined in vm-asm.S */
     1.8 -void enable_paging(void);
     1.9 -void set_pgdir_addr(uint32_t addr);
    1.10 -uint32_t get_fault_addr(void);
    1.11 -
    1.12 -static void pgfault(int inum, uint32_t err);
    1.13 -
    1.14 -/* page directory */
    1.15 -static uint32_t *pgdir;
    1.16 -
    1.17  #define KMEM_START		0xc0000000
    1.18  #define IDMAP_START		0xa0000
    1.19  
    1.20 @@ -27,6 +17,30 @@
    1.21  
    1.22  #define PAGEFAULT		14
    1.23  
    1.24 +
    1.25 +struct page_range {
    1.26 +	int start, end;
    1.27 +	struct page_range *next;
    1.28 +};
    1.29 +
    1.30 +/* defined in vm-asm.S */
    1.31 +void enable_paging(void);
    1.32 +void set_pgdir_addr(uint32_t addr);
    1.33 +uint32_t get_fault_addr(void);
    1.34 +
    1.35 +static void pgfault(int inum, uint32_t err);
    1.36 +static struct page_range *alloc_node(void);
    1.37 +static void free_node(struct page_range *node);
    1.38 +
    1.39 +/* page directory */
    1.40 +static uint32_t *pgdir;
    1.41 +
    1.42 +/* 2 lists of free ranges, for kernel memory and user memory */
    1.43 +static struct page_range *pglist[2];
    1.44 +/* list of free page_range structures to be used in the lists */
    1.45 +static struct page_range *node_pool;
    1.46 +
    1.47 +
    1.48  void init_vm(struct mboot_info *mb)
    1.49  {
    1.50  	uint32_t idmap_end;
    1.51 @@ -86,12 +100,15 @@
    1.52  	printf("unmap_page(%d): page already not mapped\n", vpage);
    1.53  }
    1.54  
    1.55 +/* if ppg_start is -1, we allocate physical pages to map with alloc_phys_page() */
    1.56  void map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr)
    1.57  {
    1.58  	int i;
    1.59  
    1.60  	for(i=0; i<pgcount; i++) {
    1.61 -		map_page(vpg_start + i, ppg_start + i, attr);
    1.62 +		uint32_t paddr = ppg_start == -1 ? alloc_phys_page() : ppg_start + i;
    1.63 +
    1.64 +		map_page(vpg_start + i, paddr, attr);
    1.65  	}
    1.66  }
    1.67  
    1.68 @@ -131,6 +148,52 @@
    1.69  	return pgaddr | ADDR_TO_PGOFFS(vaddr);
    1.70  }
    1.71  
    1.72 +/* allocate a contiguous block of virtual memory pages along with
    1.73 + * backing physical memory for them, and update the page table.
    1.74 + */
    1.75 +int pgalloc(int num, int area)
    1.76 +{
    1.77 +	int ret = -1;
    1.78 +	struct page_range *node, *prev, dummy;
    1.79 +
    1.80 +	dummy.next = pglist[area];
    1.81 +	node = pglist[area];
    1.82 +	prev = &dummy;
    1.83 +
    1.84 +	while(node) {
    1.85 +		if(node->end - node->start >= num) {
    1.86 +			ret = node->start;
    1.87 +			node->start += num;
    1.88 +
    1.89 +			if(node->start == node->end) {
    1.90 +				prev->next = node->next;
    1.91 +				node->next = 0;
    1.92 +
    1.93 +				if(node == pglist[area]) {
    1.94 +					pglist[area] = 0;
    1.95 +				}
    1.96 +				free_node(node);
    1.97 +			}
    1.98 +			break;
    1.99 +		}
   1.100 +
   1.101 +		prev = node;
   1.102 +		node = node->next;
   1.103 +	}
   1.104 +
   1.105 +	if(ret >= 0) {
   1.106 +		/* allocate physical storage and map them */
   1.107 +		map_page_range(ret, num, -1, 0);
   1.108 +	}
   1.109 +
   1.110 +	return ret;
   1.111 +}
   1.112 +
   1.113 +void pgfree(int start, int num)
   1.114 +{
   1.115 +	/* TODO */
   1.116 +}
   1.117 +
   1.118  static void pgfault(int inum, uint32_t err)
   1.119  {
   1.120  	printf("~~~~ PAGE FAULT ~~~~\n");
   1.121 @@ -150,3 +213,33 @@
   1.122  
   1.123  	panic("unhandled page fault\n");
   1.124  }
   1.125 +
   1.126 +/* --- page range list node management --- */
   1.127 +static struct page_range *alloc_node(void)
   1.128 +{
   1.129 +	struct page_range *node;
   1.130 +	uint32_t paddr;
   1.131 +
   1.132 +	if(node_pool) {
   1.133 +		node = node_pool;
   1.134 +		node_pool = node_pool->next;
   1.135 +		return node;
   1.136 +	}
   1.137 +
   1.138 +	/* no node structures in the pool, we need to allocate and map
   1.139 +	 * a page, split it up into node structures, add them in the pool
   1.140 +	 * and allocate one of them.
   1.141 +	 */
   1.142 +	if(!(paddr = alloc_phys_page())) {
   1.143 +		panic("ran out of physical memory while allocating VM range structures\n");
   1.144 +	}
   1.145 +
   1.146 +	/* TODO cont. */
   1.147 +	return 0;
   1.148 +}
   1.149 +
   1.150 +static void free_node(struct page_range *node)
   1.151 +{
   1.152 +	node->next = node_pool;
   1.153 +	node_pool = node;
   1.154 +}
     2.1 --- a/src/vm.h	Sun Apr 03 08:23:07 2011 +0300
     2.2 +++ b/src/vm.h	Sun Apr 03 18:42:19 2011 +0300
     2.3 @@ -42,4 +42,11 @@
     2.4  
     2.5  uint32_t virt_to_phys(uint32_t vaddr);
     2.6  
     2.7 +enum {
     2.8 +	MEM_KERNEL,
     2.9 +	MEM_USER
    2.10 +};
    2.11 +
    2.12 +int pgalloc(int num, int area);
    2.13 +
    2.14  #endif	/* VM_H_ */