kern

diff src/vm.c @ 23:5454cee245a3

- fixed tragic mistake in the initial kernel image mapping - page table modifications by disabling paging first - page allocation completed
author John Tsiombikas <nuclear@member.fsf.org>
date Mon, 04 Apr 2011 23:34:06 +0300
parents 7ece008f09c5
children 53588744382c
line diff
     1.1 --- a/src/vm.c	Sun Apr 03 18:42:19 2011 +0300
     1.2 +++ b/src/vm.c	Mon Apr 04 23:34:06 2011 +0300
     1.3 @@ -25,9 +25,15 @@
     1.4  
     1.5  /* defined in vm-asm.S */
     1.6  void enable_paging(void);
     1.7 +void disable_paging(void);
     1.8 +int get_paging_status(void);
     1.9  void set_pgdir_addr(uint32_t addr);
    1.10 +void flush_tlb(void);
    1.11 +void flush_tlb_addr(uint32_t addr);
    1.12 +#define flush_tlb_page(p)	flush_tlb_addr(PAGE_TO_ADDR(p))
    1.13  uint32_t get_fault_addr(void);
    1.14  
    1.15 +static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high);
    1.16  static void pgfault(int inum, uint32_t err);
    1.17  static struct page_range *alloc_node(void);
    1.18  static void free_node(struct page_range *node);
    1.19 @@ -39,32 +45,65 @@
    1.20  static struct page_range *pglist[2];
    1.21  /* list of free page_range structures to be used in the lists */
    1.22  static struct page_range *node_pool;
    1.23 +/* the first page range for the whole kernel address space, to get things started */
    1.24 +static struct page_range first_node;
    1.25  
    1.26  
    1.27  void init_vm(struct mboot_info *mb)
    1.28  {
    1.29  	uint32_t idmap_end;
    1.30  
    1.31 +	/* initialize the physical memory map and allocator */
    1.32  	init_mem(mb);
    1.33  
    1.34 +	/* setup the page tables */
    1.35  	pgdir = (uint32_t*)alloc_phys_page();
    1.36 -	memset(pgdir, 0, sizeof pgdir);
    1.37 +	memset(pgdir, 0, PGSIZE);
    1.38 +	set_pgdir_addr((int32_t)pgdir);
    1.39  
    1.40  	/* map the video memory and kernel code 1-1 */
    1.41  	get_kernel_mem_range(0, &idmap_end);
    1.42  	map_mem_range(IDMAP_START, idmap_end - IDMAP_START, IDMAP_START, 0);
    1.43  
    1.44 +	/* set the page fault handler */
    1.45  	interrupt(PAGEFAULT, pgfault);
    1.46  
    1.47 -	set_pgdir_addr((int32_t)pgdir);
    1.48 +	/* we can enable paging now */
    1.49  	enable_paging();
    1.50 +
    1.51 +	/* initialize the virtual page allocator */
    1.52 +	node_pool = 0;
    1.53 +
    1.54 +	first_node.start = ADDR_TO_PAGE(KMEM_START);
    1.55 +	first_node.end = PAGE_COUNT;
    1.56 +	first_node.next = 0;
    1.57 +	pglist[MEM_KERNEL] = &first_node;
    1.58 +
    1.59 +	pglist[MEM_USER] = alloc_node();
    1.60 +	pglist[MEM_USER]->start = 0;
    1.61 +	pglist[MEM_USER]->end = ADDR_TO_PAGE(KMEM_START);
    1.62 +	pglist[MEM_USER]->next = 0;
    1.63  }
    1.64  
    1.65 -void map_page(int vpage, int ppage, unsigned int attr)
    1.66 +/* if ppage == -1 we allocate a physical page by calling alloc_phys_page */
    1.67 +int map_page(int vpage, int ppage, unsigned int attr)
    1.68  {
    1.69  	uint32_t *pgtbl;
    1.70 -	int diridx = PAGE_TO_PGTBL(vpage);
    1.71 -	int pgidx = PAGE_TO_PGTBL_PG(vpage);
    1.72 +	int diridx, pgidx, pgon;
    1.73 +
    1.74 +	pgon = get_paging_status();
    1.75 +	disable_paging();
    1.76 +
    1.77 +	if(ppage < 0) {
    1.78 +		uint32_t addr = alloc_phys_page();
    1.79 +		if(!addr) {
    1.80 +			return -1;
    1.81 +		}
    1.82 +		ppage = ADDR_TO_PAGE(addr);
    1.83 +	}
    1.84 +
    1.85 +	diridx = PAGE_TO_PGTBL(vpage);
    1.86 +	pgidx = PAGE_TO_PGTBL_PG(vpage);
    1.87  
    1.88  	if(!(pgdir[diridx] & PG_PRESENT)) {
    1.89  		uint32_t addr = alloc_phys_page();
    1.90 @@ -77,6 +116,12 @@
    1.91  	}
    1.92  
    1.93  	pgtbl[pgidx] = PAGE_TO_ADDR(ppage) | (attr & ATTR_PGTBL_MASK) | PG_PRESENT;
    1.94 +	flush_tlb_page(vpage);
    1.95 +
    1.96 +	if(pgon) {
    1.97 +		enable_paging();
    1.98 +	}
    1.99 +	return 0;
   1.100  }
   1.101  
   1.102  void unmap_page(int vpage)
   1.103 @@ -94,6 +139,7 @@
   1.104  		goto err;
   1.105  	}
   1.106  	pgtbl[pgidx] = 0;
   1.107 +	flush_tlb_page(vpage);
   1.108  
   1.109  	return;
   1.110  err:
   1.111 @@ -101,32 +147,42 @@
   1.112  }
   1.113  
   1.114  /* if ppg_start is -1, we allocate physical pages to map with alloc_phys_page() */
   1.115 -void map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr)
   1.116 +int map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr)
   1.117  {
   1.118 -	int i;
   1.119 +	int i, phys_pg;
   1.120 +	uint32_t paddr;
   1.121  
   1.122  	for(i=0; i<pgcount; i++) {
   1.123 -		uint32_t paddr = ppg_start == -1 ? alloc_phys_page() : ppg_start + i;
   1.124 +		if(ppg_start < 0) {
   1.125 +			if(!(paddr = alloc_phys_page())) {
   1.126 +				return -1;
   1.127 +			}
   1.128 +			phys_pg = ADDR_TO_PAGE(paddr);
   1.129 +		} else {
   1.130 +			phys_pg = ppg_start + i;
   1.131 +		}
   1.132  
   1.133 -		map_page(vpg_start + i, paddr, attr);
   1.134 +		map_page(vpg_start + i, phys_pg, attr);
   1.135  	}
   1.136 +	return 0;
   1.137  }
   1.138  
   1.139 -void map_mem_range(uint32_t vaddr, size_t sz, uint32_t paddr, unsigned int attr)
   1.140 +/* if paddr is 0, we allocate physical pages with alloc_phys_page() */
   1.141 +int map_mem_range(uint32_t vaddr, size_t sz, uint32_t paddr, unsigned int attr)
   1.142  {
   1.143  	int vpg_start, ppg_start, num_pages;
   1.144  
   1.145 -	if(!sz) return;
   1.146 +	if(!sz) return -1;
   1.147  
   1.148  	if(ADDR_TO_PGOFFS(paddr)) {
   1.149  		panic("map_mem_range called with unaligned physical address: %x\n", paddr);
   1.150  	}
   1.151  
   1.152  	vpg_start = ADDR_TO_PAGE(vaddr);
   1.153 -	ppg_start = ADDR_TO_PAGE(paddr);
   1.154 +	ppg_start = paddr > 0 ? ADDR_TO_PAGE(paddr) : -1;
   1.155  	num_pages = ADDR_TO_PAGE(sz) + 1;
   1.156  
   1.157 -	map_page_range(vpg_start, num_pages, ppg_start, attr);
   1.158 +	return map_page_range(vpg_start, num_pages, ppg_start, attr);
   1.159  }
   1.160  
   1.161  uint32_t virt_to_phys(uint32_t vaddr)
   1.162 @@ -182,8 +238,10 @@
   1.163  	}
   1.164  
   1.165  	if(ret >= 0) {
   1.166 -		/* allocate physical storage and map them */
   1.167 -		map_page_range(ret, num, -1, 0);
   1.168 +		/* allocate physical storage and map */
   1.169 +		if(map_page_range(ret, num, -1, 0) == -1) {
   1.170 +			ret = -1;
   1.171 +		}
   1.172  	}
   1.173  
   1.174  	return ret;
   1.175 @@ -191,7 +249,63 @@
   1.176  
   1.177  void pgfree(int start, int num)
   1.178  {
   1.179 -	/* TODO */
   1.180 +	int area, end;
   1.181 +	struct page_range *node, *new, *prev, *next;
   1.182 +
   1.183 +	if(!(new = alloc_node())) {
   1.184 +		panic("pgfree: can't allocate new page_range node to add the freed pages\n");
   1.185 +	}
   1.186 +	new->start = start;
   1.187 +	end = new->end = start + num;
   1.188 +
   1.189 +	area = PAGE_TO_ADDR(start) >= KMEM_START ? MEM_KERNEL : MEM_USER;
   1.190 +
   1.191 +	if(!pglist[area] || pglist[area]->start > start) {
   1.192 +		next = new->next = pglist[area];
   1.193 +		pglist[area] = new;
   1.194 +		prev = 0;
   1.195 +
   1.196 +	} else {
   1.197 +
   1.198 +		prev = 0;
   1.199 +		node = pglist[area];
   1.200 +		next = node ? node->next : 0;
   1.201 +
   1.202 +		while(node) {
   1.203 +			if(!next || next->start > start) {
   1.204 +				/* place here, after node */
   1.205 +				new->next = next;
   1.206 +				node->next = new;
   1.207 +				prev = node;	/* needed by coalesce after the loop */
   1.208 +				break;
   1.209 +			}
   1.210 +
   1.211 +			prev = node;
   1.212 +			node = next;
   1.213 +			next = node ? node->next : 0;
   1.214 +		}
   1.215 +	}
   1.216 +
   1.217 +	coalesce(prev, new, next);
   1.218 +}
   1.219 +
   1.220 +static void coalesce(struct page_range *low, struct page_range *mid, struct page_range *high)
   1.221 +{
   1.222 +	if(high) {
   1.223 +		if(mid->end == high->start) {
   1.224 +			mid->end = high->end;
   1.225 +			mid->next = high->next;
   1.226 +			free_node(high);
   1.227 +		}
   1.228 +	}
   1.229 +
   1.230 +	if(low) {
   1.231 +		if(low->end == mid->start) {
   1.232 +			low->end += mid->end;
   1.233 +			low->next = mid->next;
   1.234 +			free_node(mid);
   1.235 +		}
   1.236 +	}
   1.237  }
   1.238  
   1.239  static void pgfault(int inum, uint32_t err)
   1.240 @@ -215,31 +329,69 @@
   1.241  }
   1.242  
   1.243  /* --- page range list node management --- */
   1.244 +#define NODES_IN_PAGE	(PGSIZE / sizeof(struct page_range))
   1.245 +
   1.246  static struct page_range *alloc_node(void)
   1.247  {
   1.248  	struct page_range *node;
   1.249 -	uint32_t paddr;
   1.250 +	int pg, i;
   1.251  
   1.252  	if(node_pool) {
   1.253  		node = node_pool;
   1.254  		node_pool = node_pool->next;
   1.255 +		printf("alloc_node -> %x\n", (unsigned int)node);
   1.256  		return node;
   1.257  	}
   1.258  
   1.259 -	/* no node structures in the pool, we need to allocate and map
   1.260 -	 * a page, split it up into node structures, add them in the pool
   1.261 -	 * and allocate one of them.
   1.262 +	/* no node structures in the pool, we need to allocate a new page,
   1.263 +	 * split it up into node structures, add them in the pool, and
   1.264 +	 * allocate one of them.
   1.265  	 */
   1.266 -	if(!(paddr = alloc_phys_page())) {
   1.267 +	if(!(pg = pgalloc(1, MEM_KERNEL))) {
   1.268  		panic("ran out of physical memory while allocating VM range structures\n");
   1.269  	}
   1.270 +	node_pool = (struct page_range*)PAGE_TO_ADDR(pg);
   1.271  
   1.272 -	/* TODO cont. */
   1.273 -	return 0;
   1.274 +	/* link them up, skip the first as we'll just allocate it anyway */
   1.275 +	for(i=2; i<NODES_IN_PAGE; i++) {
   1.276 +		node_pool[i - 1].next = node_pool + i;
   1.277 +	}
   1.278 +	node_pool[NODES_IN_PAGE - 1].next = 0;
   1.279 +
   1.280 +	/* grab the first and return it */
   1.281 +	node = node_pool++;
   1.282 +	printf("alloc_node -> %x\n", (unsigned int)node);
   1.283 +	return node;
   1.284  }
   1.285  
   1.286  static void free_node(struct page_range *node)
   1.287  {
   1.288  	node->next = node_pool;
   1.289  	node_pool = node;
   1.290 +	printf("free_node\n");
   1.291  }
   1.292 +
   1.293 +
   1.294 +void dbg_print_vm(int area)
   1.295 +{
   1.296 +	struct page_range *node = pglist[area];
   1.297 +	int last = area == MEM_USER ? 0 : ADDR_TO_PAGE(KMEM_START);
   1.298 +
   1.299 +	printf("%s vm space\n", area == MEM_USER ? "user" : "kernel");
   1.300 +
   1.301 +	while(node) {
   1.302 +		if(node->start > last) {
   1.303 +			printf("  vm-used: %x -> %x\n", PAGE_TO_ADDR(last), PAGE_TO_ADDR(node->start));
   1.304 +		}
   1.305 +
   1.306 +		printf("  vm-free: %x -> ", PAGE_TO_ADDR(node->start));
   1.307 +		if(node->end >= PAGE_COUNT) {
   1.308 +			printf("END\n");
   1.309 +		} else {
   1.310 +			printf("%x\n", PAGE_TO_ADDR(node->end));
   1.311 +		}
   1.312 +
   1.313 +		last = node->end;
   1.314 +		node = node->next;
   1.315 +	}
   1.316 +}