kern

annotate src/mem.c @ 23:5454cee245a3

- fixed tragic mistake in the initial kernel image mapping - page table modifications by disabling paging first - page allocation completed
author John Tsiombikas <nuclear@member.fsf.org>
date Mon, 04 Apr 2011 23:34:06 +0300
parents 3ba93d8f586c
children 9939a6d7a45a
rev   line source
nuclear@17 1 #include <stdio.h>
nuclear@19 2 #include <string.h>
nuclear@17 3 #include "mem.h"
nuclear@17 4 #include "panic.h"
nuclear@17 5 #include "vm.h"
nuclear@17 6
nuclear@19 7 #define FREE 0
nuclear@19 8 #define USED 1
nuclear@19 9
nuclear@19 10 #define BM_IDX(pg) ((pg) / 32)
nuclear@19 11 #define BM_BIT(pg) ((pg) & 0x1f)
nuclear@19 12
nuclear@19 13 #define IS_FREE(pg) ((bitmap[BM_IDX(pg)] & (1 << BM_BIT(pg))) == 0)
nuclear@19 14
nuclear@19 15 static void mark_page(int pg, int free);
nuclear@19 16 static void add_memory(uint32_t start, size_t size);
nuclear@19 17
nuclear@17 18 /* end of kernel image */
nuclear@17 19 extern int _end;
nuclear@17 20
nuclear@20 21 /* A bitmap is used to track which physical memory pages are used or available
nuclear@20 22 * for allocation by alloc_phys_page.
nuclear@20 23 *
nuclear@20 24 * last_alloc_idx keeps track of the last 32bit element in the bitmap array
nuclear@20 25 * where a free page was found. It's guaranteed that all the elements before
nuclear@20 26 * this have no free pages, but it doesn't imply that there will be another
nuclear@20 27 * free page there. So it's used as a starting point for the search.
nuclear@20 28 */
nuclear@19 29 static uint32_t *bitmap;
nuclear@19 30 static int bmsize, last_alloc_idx;
nuclear@17 31
nuclear@20 32
nuclear@17 33 void init_mem(struct mboot_info *mb)
nuclear@17 34 {
nuclear@19 35 int i, num_pages, max_pg = 0;
nuclear@19 36 uint32_t used_end;
nuclear@19 37
nuclear@19 38 num_pages = 0;
nuclear@19 39 last_alloc_idx = 0;
nuclear@19 40
nuclear@20 41 /* the allocation bitmap starts right at the end of the ELF image */
nuclear@19 42 bitmap = (uint32_t*)&_end;
nuclear@19 43
nuclear@20 44 /* start by marking all posible pages (2**20) as used. We do not "reserve"
nuclear@20 45 * all this space. Pages beyond the end of the useful bitmap area
nuclear@20 46 * ((char*)bitmap + bmsize), which will be determined after we traverse the
nuclear@20 47 * memory map, are going to be marked as available for allocation.
nuclear@20 48 */
nuclear@19 49 memset(bitmap, 0xff, 1024 * 1024 / 8);
nuclear@19 50
nuclear@20 51 /* if the bootloader gave us an available memory map, traverse it and mark
nuclear@20 52 * all the corresponding pages as free.
nuclear@20 53 */
nuclear@19 54 if(mb->flags & MB_MMAP) {
nuclear@19 55 struct mboot_mmap *mem, *mmap_end;
nuclear@19 56
nuclear@19 57 mem = mb->mmap;
nuclear@19 58 mmap_end = (struct mboot_mmap*)((char*)mb->mmap + mb->mmap_len);
nuclear@19 59
nuclear@19 60 printf("memory map:\n");
nuclear@19 61 while(mem < mmap_end) {
nuclear@21 62 /* ignore memory ranges that start beyond the 4gb mark */
nuclear@21 63 if(mem->base_high == 0 && mem->base_low != 0xffffffff) {
nuclear@21 64 char *type;
nuclear@21 65 unsigned int end, rest = 0xffffffff - mem->base_low;
nuclear@19 66
nuclear@21 67 /* make sure the length does not extend beyond 4gb */
nuclear@21 68 if(mem->length_high || mem->length_low > rest) {
nuclear@21 69 mem->length_low = rest;
nuclear@21 70 }
nuclear@21 71 end = mem->base_low + mem->length_low;
nuclear@19 72
nuclear@21 73 if(mem->type == MB_MEM_VALID) {
nuclear@21 74 type = "free:";
nuclear@21 75 add_memory(mem->base_low, mem->length_low);
nuclear@21 76
nuclear@21 77 num_pages = ADDR_TO_PAGE(mem->base_low + mem->length_low);
nuclear@21 78 if(max_pg < num_pages) {
nuclear@21 79 max_pg = num_pages;
nuclear@21 80 }
nuclear@21 81 } else {
nuclear@21 82 type = "hole:";
nuclear@19 83 }
nuclear@21 84
nuclear@21 85 printf(" %s %x - %x (%u bytes)\n", type, mem->base_low, end, mem->length_low);
nuclear@19 86 }
nuclear@19 87 mem = (struct mboot_mmap*)((char*)mem + mem->skip + sizeof mem->skip);
nuclear@19 88 }
nuclear@19 89 } else if(mb->flags & MB_MEM) {
nuclear@20 90 /* if we don't have a detailed memory map, just use the lower and upper
nuclear@20 91 * memory block sizes to determine which pages should be available.
nuclear@20 92 */
nuclear@19 93 add_memory(0, mb->mem_lower);
nuclear@19 94 add_memory(0x100000, mb->mem_upper * 1024);
nuclear@19 95 max_pg = mb->mem_upper / 4;
nuclear@19 96
nuclear@19 97 printf("lower memory: %ukb, upper mem: %ukb\n", mb->mem_lower, mb->mem_upper);
nuclear@19 98 } else {
nuclear@20 99 /* I don't think this should ever happen with a multiboot-compliant boot loader */
nuclear@19 100 panic("didn't get any memory info from the boot loader, I give up\n");
nuclear@19 101 }
nuclear@19 102
nuclear@20 103 bmsize = max_pg / 8; /* size of the useful bitmap in bytes */
nuclear@19 104
nuclear@19 105 /* mark all the used pages as ... well ... used */
nuclear@19 106 used_end = ((uint32_t)bitmap + bmsize - 1);
nuclear@19 107
nuclear@19 108 printf("marking pages up to %x ", used_end);
nuclear@19 109 used_end = ADDR_TO_PAGE(used_end);
nuclear@19 110 printf("(page: %d) inclusive as used\n", used_end);
nuclear@19 111
nuclear@19 112 for(i=0; i<=used_end; i++) {
nuclear@19 113 mark_page(i, USED);
nuclear@19 114 }
nuclear@17 115 }
nuclear@17 116
nuclear@20 117 /* alloc_phys_page finds the first available page of physical memory,
nuclear@20 118 * marks it as used in the bitmap, and returns its address. If there's
nuclear@20 119 * no unused physical page, 0 is returned.
nuclear@20 120 */
nuclear@17 121 uint32_t alloc_phys_page(void)
nuclear@17 122 {
nuclear@19 123 int i, idx, max;
nuclear@17 124
nuclear@19 125 idx = last_alloc_idx;
nuclear@19 126 max = bmsize / 4;
nuclear@19 127
nuclear@19 128 while(idx <= max) {
nuclear@19 129 /* if at least one bit is 0 then we have at least
nuclear@19 130 * one free page. find it and allocate it.
nuclear@19 131 */
nuclear@19 132 if(bitmap[idx] != 0xffffffff) {
nuclear@19 133 for(i=0; i<32; i++) {
nuclear@19 134 int pg = idx * 32 + i;
nuclear@19 135
nuclear@19 136 if(IS_FREE(pg)) {
nuclear@19 137 mark_page(pg, USED);
nuclear@19 138
nuclear@19 139 last_alloc_idx = idx;
nuclear@19 140
nuclear@19 141 printf("alloc_phys_page() -> %x (page: %d)\n", PAGE_TO_ADDR(pg), pg);
nuclear@19 142 return PAGE_TO_ADDR(pg);
nuclear@19 143 }
nuclear@19 144 }
nuclear@19 145 panic("can't happen: alloc_phys_page (mem.c)\n");
nuclear@19 146 }
nuclear@19 147 idx++;
nuclear@17 148 }
nuclear@17 149
nuclear@19 150 return 0;
nuclear@19 151 }
nuclear@19 152
nuclear@20 153 /* free_phys_page marks the physical page which corresponds to the specified
nuclear@20 154 * address as free in the allocation bitmap.
nuclear@20 155 *
nuclear@20 156 * CAUTION: no checks are done that this page should actually be freed or not.
nuclear@20 157 * If you call free_phys_page with the address of some part of memory that was
nuclear@20 158 * originally reserved due to it being in a memory hole or part of the kernel
nuclear@20 159 * image or whatever, it will be subsequently allocatable by alloc_phys_page.
nuclear@20 160 */
nuclear@19 161 void free_phys_page(uint32_t addr)
nuclear@19 162 {
nuclear@19 163 int pg = ADDR_TO_PAGE(addr);
nuclear@19 164 int bmidx = BM_IDX(pg);
nuclear@19 165
nuclear@19 166 if(!IS_FREE(pg)) {
nuclear@19 167 panic("free_phys_page(%d): I thought that was already free!\n", pg);
nuclear@17 168 }
nuclear@17 169
nuclear@19 170 mark_page(pg, FREE);
nuclear@19 171 if(bmidx < last_alloc_idx) {
nuclear@19 172 last_alloc_idx = bmidx;
nuclear@19 173 }
nuclear@19 174 }
nuclear@17 175
nuclear@20 176 /* this is only ever used by the VM init code to find out what the extends of
nuclear@20 177 * the kernel image are, in order to map them 1-1 before enabling paging.
nuclear@20 178 */
nuclear@19 179 void get_kernel_mem_range(uint32_t *start, uint32_t *end)
nuclear@19 180 {
nuclear@19 181 if(start) {
nuclear@19 182 *start = 0x100000;
nuclear@19 183 }
nuclear@19 184 if(end) {
nuclear@19 185 uint32_t e = (uint32_t)bitmap + bmsize;
nuclear@19 186
nuclear@19 187 if(e & PGOFFS_MASK) {
nuclear@23 188 *end = (e + 4096) & ~PGOFFS_MASK;
nuclear@19 189 } else {
nuclear@19 190 *end = e;
nuclear@19 191 }
nuclear@19 192 }
nuclear@17 193 }
nuclear@19 194
nuclear@20 195 /* adds a range of physical memory to the available pool. used during init_mem
nuclear@20 196 * when traversing the memory map.
nuclear@20 197 */
nuclear@19 198 static void add_memory(uint32_t start, size_t sz)
nuclear@19 199 {
nuclear@19 200 int i, szpg, pg;
nuclear@19 201
nuclear@19 202 szpg = ADDR_TO_PAGE(sz);
nuclear@19 203 pg = ADDR_TO_PAGE(start);
nuclear@19 204
nuclear@19 205 for(i=0; i<szpg; i++) {
nuclear@19 206 mark_page(pg++, FREE);
nuclear@19 207 }
nuclear@19 208 }
nuclear@19 209
nuclear@20 210 /* maps a page as used or free in the allocation bitmap */
nuclear@19 211 static void mark_page(int pg, int used)
nuclear@19 212 {
nuclear@19 213 int idx = BM_IDX(pg);
nuclear@19 214 int bit = BM_BIT(pg);
nuclear@19 215
nuclear@19 216 if(used) {
nuclear@19 217 bitmap[idx] |= 1 << bit;
nuclear@19 218 } else {
nuclear@19 219 bitmap[idx] &= ~(1 << bit);
nuclear@19 220 }
nuclear@19 221 }
nuclear@19 222