nuclear@17: #include nuclear@19: #include nuclear@17: #include "mem.h" nuclear@17: #include "panic.h" nuclear@17: #include "vm.h" nuclear@25: #include "intr.h" nuclear@17: nuclear@19: #define FREE 0 nuclear@19: #define USED 1 nuclear@19: nuclear@19: #define BM_IDX(pg) ((pg) / 32) nuclear@19: #define BM_BIT(pg) ((pg) & 0x1f) nuclear@19: nuclear@19: #define IS_FREE(pg) ((bitmap[BM_IDX(pg)] & (1 << BM_BIT(pg))) == 0) nuclear@19: nuclear@19: static void mark_page(int pg, int free); nuclear@19: static void add_memory(uint32_t start, size_t size); nuclear@19: nuclear@17: /* end of kernel image */ nuclear@17: extern int _end; nuclear@17: nuclear@20: /* A bitmap is used to track which physical memory pages are used or available nuclear@20: * for allocation by alloc_phys_page. nuclear@20: * nuclear@20: * last_alloc_idx keeps track of the last 32bit element in the bitmap array nuclear@20: * where a free page was found. It's guaranteed that all the elements before nuclear@20: * this have no free pages, but it doesn't imply that there will be another nuclear@20: * free page there. So it's used as a starting point for the search. nuclear@20: */ nuclear@19: static uint32_t *bitmap; nuclear@19: static int bmsize, last_alloc_idx; nuclear@17: nuclear@20: nuclear@17: void init_mem(struct mboot_info *mb) nuclear@17: { nuclear@19: int i, num_pages, max_pg = 0; nuclear@19: uint32_t used_end; nuclear@19: nuclear@19: num_pages = 0; nuclear@19: last_alloc_idx = 0; nuclear@19: nuclear@20: /* the allocation bitmap starts right at the end of the ELF image */ nuclear@19: bitmap = (uint32_t*)&_end; nuclear@19: nuclear@20: /* start by marking all posible pages (2**20) as used. We do not "reserve" nuclear@20: * all this space. Pages beyond the end of the useful bitmap area nuclear@20: * ((char*)bitmap + bmsize), which will be determined after we traverse the nuclear@20: * memory map, are going to be marked as available for allocation. nuclear@20: */ nuclear@19: memset(bitmap, 0xff, 1024 * 1024 / 8); nuclear@19: nuclear@20: /* if the bootloader gave us an available memory map, traverse it and mark nuclear@20: * all the corresponding pages as free. nuclear@20: */ nuclear@19: if(mb->flags & MB_MMAP) { nuclear@19: struct mboot_mmap *mem, *mmap_end; nuclear@19: nuclear@19: mem = mb->mmap; nuclear@19: mmap_end = (struct mboot_mmap*)((char*)mb->mmap + mb->mmap_len); nuclear@19: nuclear@19: printf("memory map:\n"); nuclear@19: while(mem < mmap_end) { nuclear@21: /* ignore memory ranges that start beyond the 4gb mark */ nuclear@21: if(mem->base_high == 0 && mem->base_low != 0xffffffff) { nuclear@21: char *type; nuclear@21: unsigned int end, rest = 0xffffffff - mem->base_low; nuclear@19: nuclear@21: /* make sure the length does not extend beyond 4gb */ nuclear@21: if(mem->length_high || mem->length_low > rest) { nuclear@21: mem->length_low = rest; nuclear@21: } nuclear@21: end = mem->base_low + mem->length_low; nuclear@19: nuclear@21: if(mem->type == MB_MEM_VALID) { nuclear@21: type = "free:"; nuclear@21: add_memory(mem->base_low, mem->length_low); nuclear@21: nuclear@21: num_pages = ADDR_TO_PAGE(mem->base_low + mem->length_low); nuclear@21: if(max_pg < num_pages) { nuclear@21: max_pg = num_pages; nuclear@21: } nuclear@21: } else { nuclear@21: type = "hole:"; nuclear@19: } nuclear@21: nuclear@21: printf(" %s %x - %x (%u bytes)\n", type, mem->base_low, end, mem->length_low); nuclear@19: } nuclear@19: mem = (struct mboot_mmap*)((char*)mem + mem->skip + sizeof mem->skip); nuclear@19: } nuclear@19: } else if(mb->flags & MB_MEM) { nuclear@20: /* if we don't have a detailed memory map, just use the lower and upper nuclear@20: * memory block sizes to determine which pages should be available. nuclear@20: */ nuclear@19: add_memory(0, mb->mem_lower); nuclear@19: add_memory(0x100000, mb->mem_upper * 1024); nuclear@19: max_pg = mb->mem_upper / 4; nuclear@19: nuclear@19: printf("lower memory: %ukb, upper mem: %ukb\n", mb->mem_lower, mb->mem_upper); nuclear@19: } else { nuclear@20: /* I don't think this should ever happen with a multiboot-compliant boot loader */ nuclear@19: panic("didn't get any memory info from the boot loader, I give up\n"); nuclear@19: } nuclear@19: nuclear@20: bmsize = max_pg / 8; /* size of the useful bitmap in bytes */ nuclear@19: nuclear@19: /* mark all the used pages as ... well ... used */ nuclear@19: used_end = ((uint32_t)bitmap + bmsize - 1); nuclear@19: nuclear@19: printf("marking pages up to %x ", used_end); nuclear@19: used_end = ADDR_TO_PAGE(used_end); nuclear@19: printf("(page: %d) inclusive as used\n", used_end); nuclear@19: nuclear@19: for(i=0; i<=used_end; i++) { nuclear@19: mark_page(i, USED); nuclear@19: } nuclear@17: } nuclear@17: nuclear@20: /* alloc_phys_page finds the first available page of physical memory, nuclear@20: * marks it as used in the bitmap, and returns its address. If there's nuclear@20: * no unused physical page, 0 is returned. nuclear@20: */ nuclear@17: uint32_t alloc_phys_page(void) nuclear@17: { nuclear@25: int i, idx, max, intr_state; nuclear@25: nuclear@25: intr_state = get_intr_state(); nuclear@25: disable_intr(); nuclear@17: nuclear@19: idx = last_alloc_idx; nuclear@19: max = bmsize / 4; nuclear@19: nuclear@19: while(idx <= max) { nuclear@19: /* if at least one bit is 0 then we have at least nuclear@19: * one free page. find it and allocate it. nuclear@19: */ nuclear@19: if(bitmap[idx] != 0xffffffff) { nuclear@19: for(i=0; i<32; i++) { nuclear@19: int pg = idx * 32 + i; nuclear@19: nuclear@19: if(IS_FREE(pg)) { nuclear@19: mark_page(pg, USED); nuclear@19: nuclear@19: last_alloc_idx = idx; nuclear@19: nuclear@46: /*printf("alloc_phys_page() -> %x (page: %d)\n", PAGE_TO_ADDR(pg), pg);*/ nuclear@25: nuclear@25: set_intr_state(intr_state); nuclear@19: return PAGE_TO_ADDR(pg); nuclear@19: } nuclear@19: } nuclear@19: panic("can't happen: alloc_phys_page (mem.c)\n"); nuclear@19: } nuclear@19: idx++; nuclear@17: } nuclear@17: nuclear@25: set_intr_state(intr_state); nuclear@19: return 0; nuclear@19: } nuclear@19: nuclear@20: /* free_phys_page marks the physical page which corresponds to the specified nuclear@20: * address as free in the allocation bitmap. nuclear@20: * nuclear@20: * CAUTION: no checks are done that this page should actually be freed or not. nuclear@20: * If you call free_phys_page with the address of some part of memory that was nuclear@20: * originally reserved due to it being in a memory hole or part of the kernel nuclear@20: * image or whatever, it will be subsequently allocatable by alloc_phys_page. nuclear@20: */ nuclear@19: void free_phys_page(uint32_t addr) nuclear@19: { nuclear@19: int pg = ADDR_TO_PAGE(addr); nuclear@19: int bmidx = BM_IDX(pg); nuclear@19: nuclear@25: int intr_state = get_intr_state(); nuclear@25: disable_intr(); nuclear@25: nuclear@46: if(IS_FREE(pg)) { nuclear@19: panic("free_phys_page(%d): I thought that was already free!\n", pg); nuclear@17: } nuclear@17: nuclear@19: mark_page(pg, FREE); nuclear@19: if(bmidx < last_alloc_idx) { nuclear@19: last_alloc_idx = bmidx; nuclear@19: } nuclear@25: nuclear@25: set_intr_state(intr_state); nuclear@19: } nuclear@17: nuclear@20: /* this is only ever used by the VM init code to find out what the extends of nuclear@20: * the kernel image are, in order to map them 1-1 before enabling paging. nuclear@20: */ nuclear@19: void get_kernel_mem_range(uint32_t *start, uint32_t *end) nuclear@19: { nuclear@19: if(start) { nuclear@19: *start = 0x100000; nuclear@19: } nuclear@19: if(end) { nuclear@19: uint32_t e = (uint32_t)bitmap + bmsize; nuclear@19: nuclear@19: if(e & PGOFFS_MASK) { nuclear@23: *end = (e + 4096) & ~PGOFFS_MASK; nuclear@19: } else { nuclear@19: *end = e; nuclear@19: } nuclear@19: } nuclear@17: } nuclear@19: nuclear@20: /* adds a range of physical memory to the available pool. used during init_mem nuclear@20: * when traversing the memory map. nuclear@20: */ nuclear@19: static void add_memory(uint32_t start, size_t sz) nuclear@19: { nuclear@19: int i, szpg, pg; nuclear@19: nuclear@19: szpg = ADDR_TO_PAGE(sz); nuclear@19: pg = ADDR_TO_PAGE(start); nuclear@19: nuclear@19: for(i=0; i