kern
changeset 20:369adbbd4bdd
added a few comments in mem.c
author | John Tsiombikas <nuclear@member.fsf.org> |
---|---|
date | Wed, 30 Mar 2011 23:14:29 +0300 |
parents | 8be069e6bb05 |
children | 3ba93d8f586c |
files | src/mem.c |
diffstat | 1 files changed, 42 insertions(+), 10 deletions(-) [+] |
line diff
1.1 --- a/src/mem.c Wed Mar 30 22:42:16 2011 +0300 1.2 +++ b/src/mem.c Wed Mar 30 23:14:29 2011 +0300 1.3 @@ -18,9 +18,18 @@ 1.4 /* end of kernel image */ 1.5 extern int _end; 1.6 1.7 +/* A bitmap is used to track which physical memory pages are used or available 1.8 + * for allocation by alloc_phys_page. 1.9 + * 1.10 + * last_alloc_idx keeps track of the last 32bit element in the bitmap array 1.11 + * where a free page was found. It's guaranteed that all the elements before 1.12 + * this have no free pages, but it doesn't imply that there will be another 1.13 + * free page there. So it's used as a starting point for the search. 1.14 + */ 1.15 static uint32_t *bitmap; 1.16 static int bmsize, last_alloc_idx; 1.17 1.18 + 1.19 void init_mem(struct mboot_info *mb) 1.20 { 1.21 int i, num_pages, max_pg = 0; 1.22 @@ -29,12 +38,19 @@ 1.23 num_pages = 0; 1.24 last_alloc_idx = 0; 1.25 1.26 + /* the allocation bitmap starts right at the end of the ELF image */ 1.27 bitmap = (uint32_t*)&_end; 1.28 1.29 - /* start by marking all posible pages as used */ 1.30 + /* start by marking all posible pages (2**20) as used. We do not "reserve" 1.31 + * all this space. Pages beyond the end of the useful bitmap area 1.32 + * ((char*)bitmap + bmsize), which will be determined after we traverse the 1.33 + * memory map, are going to be marked as available for allocation. 1.34 + */ 1.35 memset(bitmap, 0xff, 1024 * 1024 / 8); 1.36 1.37 - /* build the memory map */ 1.38 + /* if the bootloader gave us an available memory map, traverse it and mark 1.39 + * all the corresponding pages as free. 1.40 + */ 1.41 if(mb->flags & MB_MMAP) { 1.42 struct mboot_mmap *mem, *mmap_end; 1.43 1.44 @@ -62,16 +78,20 @@ 1.45 mem = (struct mboot_mmap*)((char*)mem + mem->skip + sizeof mem->skip); 1.46 } 1.47 } else if(mb->flags & MB_MEM) { 1.48 + /* if we don't have a detailed memory map, just use the lower and upper 1.49 + * memory block sizes to determine which pages should be available. 1.50 + */ 1.51 add_memory(0, mb->mem_lower); 1.52 add_memory(0x100000, mb->mem_upper * 1024); 1.53 max_pg = mb->mem_upper / 4; 1.54 1.55 printf("lower memory: %ukb, upper mem: %ukb\n", mb->mem_lower, mb->mem_upper); 1.56 } else { 1.57 + /* I don't think this should ever happen with a multiboot-compliant boot loader */ 1.58 panic("didn't get any memory info from the boot loader, I give up\n"); 1.59 } 1.60 1.61 - bmsize = max_pg / 8; /* size of the bitmap in bytes */ 1.62 + bmsize = max_pg / 8; /* size of the useful bitmap in bytes */ 1.63 1.64 /* mark all the used pages as ... well ... used */ 1.65 used_end = ((uint32_t)bitmap + bmsize - 1); 1.66 @@ -83,14 +103,12 @@ 1.67 for(i=0; i<=used_end; i++) { 1.68 mark_page(i, USED); 1.69 } 1.70 - 1.71 - /*for(i=0; i<bmsize / 4; i++) { 1.72 - printf("%3d [%x]\n", i, bitmap[i]); 1.73 - asm("hlt"); 1.74 - } 1.75 - putchar('\n');*/ 1.76 } 1.77 1.78 +/* alloc_phys_page finds the first available page of physical memory, 1.79 + * marks it as used in the bitmap, and returns its address. If there's 1.80 + * no unused physical page, 0 is returned. 1.81 + */ 1.82 uint32_t alloc_phys_page(void) 1.83 { 1.84 int i, idx, max; 1.85 @@ -120,10 +138,17 @@ 1.86 idx++; 1.87 } 1.88 1.89 - panic("alloc_phys_page(): out of memory\n"); 1.90 return 0; 1.91 } 1.92 1.93 +/* free_phys_page marks the physical page which corresponds to the specified 1.94 + * address as free in the allocation bitmap. 1.95 + * 1.96 + * CAUTION: no checks are done that this page should actually be freed or not. 1.97 + * If you call free_phys_page with the address of some part of memory that was 1.98 + * originally reserved due to it being in a memory hole or part of the kernel 1.99 + * image or whatever, it will be subsequently allocatable by alloc_phys_page. 1.100 + */ 1.101 void free_phys_page(uint32_t addr) 1.102 { 1.103 int pg = ADDR_TO_PAGE(addr); 1.104 @@ -139,6 +164,9 @@ 1.105 } 1.106 } 1.107 1.108 +/* this is only ever used by the VM init code to find out what the extends of 1.109 + * the kernel image are, in order to map them 1-1 before enabling paging. 1.110 + */ 1.111 void get_kernel_mem_range(uint32_t *start, uint32_t *end) 1.112 { 1.113 if(start) { 1.114 @@ -155,6 +183,9 @@ 1.115 } 1.116 } 1.117 1.118 +/* adds a range of physical memory to the available pool. used during init_mem 1.119 + * when traversing the memory map. 1.120 + */ 1.121 static void add_memory(uint32_t start, size_t sz) 1.122 { 1.123 int i, szpg, pg; 1.124 @@ -167,6 +198,7 @@ 1.125 } 1.126 } 1.127 1.128 +/* maps a page as used or free in the allocation bitmap */ 1.129 static void mark_page(int pg, int used) 1.130 { 1.131 int idx = BM_IDX(pg);