kern

view src/mem.c @ 20:369adbbd4bdd

added a few comments in mem.c
author John Tsiombikas <nuclear@member.fsf.org>
date Wed, 30 Mar 2011 23:14:29 +0300
parents 8be069e6bb05
children 3ba93d8f586c
line source
1 #include <stdio.h>
2 #include <string.h>
3 #include "mem.h"
4 #include "panic.h"
5 #include "vm.h"
7 #define FREE 0
8 #define USED 1
10 #define BM_IDX(pg) ((pg) / 32)
11 #define BM_BIT(pg) ((pg) & 0x1f)
13 #define IS_FREE(pg) ((bitmap[BM_IDX(pg)] & (1 << BM_BIT(pg))) == 0)
15 static void mark_page(int pg, int free);
16 static void add_memory(uint32_t start, size_t size);
18 /* end of kernel image */
19 extern int _end;
21 /* A bitmap is used to track which physical memory pages are used or available
22 * for allocation by alloc_phys_page.
23 *
24 * last_alloc_idx keeps track of the last 32bit element in the bitmap array
25 * where a free page was found. It's guaranteed that all the elements before
26 * this have no free pages, but it doesn't imply that there will be another
27 * free page there. So it's used as a starting point for the search.
28 */
29 static uint32_t *bitmap;
30 static int bmsize, last_alloc_idx;
33 void init_mem(struct mboot_info *mb)
34 {
35 int i, num_pages, max_pg = 0;
36 uint32_t used_end;
38 num_pages = 0;
39 last_alloc_idx = 0;
41 /* the allocation bitmap starts right at the end of the ELF image */
42 bitmap = (uint32_t*)&_end;
44 /* start by marking all posible pages (2**20) as used. We do not "reserve"
45 * all this space. Pages beyond the end of the useful bitmap area
46 * ((char*)bitmap + bmsize), which will be determined after we traverse the
47 * memory map, are going to be marked as available for allocation.
48 */
49 memset(bitmap, 0xff, 1024 * 1024 / 8);
51 /* if the bootloader gave us an available memory map, traverse it and mark
52 * all the corresponding pages as free.
53 */
54 if(mb->flags & MB_MMAP) {
55 struct mboot_mmap *mem, *mmap_end;
57 mem = mb->mmap;
58 mmap_end = (struct mboot_mmap*)((char*)mb->mmap + mb->mmap_len);
60 printf("memory map:\n");
61 while(mem < mmap_end) {
62 char *type;
63 unsigned int end = mem->base_low + mem->length_low;
65 if(mem->type == MB_MEM_VALID) {
66 type = "free:";
67 add_memory(mem->base_low, mem->length_low);
69 num_pages = ADDR_TO_PAGE(mem->base_low + mem->length_low);
70 if(max_pg < num_pages) {
71 max_pg = num_pages;
72 }
73 } else {
74 type = "hole:";
75 }
77 printf(" %s %x - %x (%u bytes)\n", type, mem->base_low, end, mem->length_low);
78 mem = (struct mboot_mmap*)((char*)mem + mem->skip + sizeof mem->skip);
79 }
80 } else if(mb->flags & MB_MEM) {
81 /* if we don't have a detailed memory map, just use the lower and upper
82 * memory block sizes to determine which pages should be available.
83 */
84 add_memory(0, mb->mem_lower);
85 add_memory(0x100000, mb->mem_upper * 1024);
86 max_pg = mb->mem_upper / 4;
88 printf("lower memory: %ukb, upper mem: %ukb\n", mb->mem_lower, mb->mem_upper);
89 } else {
90 /* I don't think this should ever happen with a multiboot-compliant boot loader */
91 panic("didn't get any memory info from the boot loader, I give up\n");
92 }
94 bmsize = max_pg / 8; /* size of the useful bitmap in bytes */
96 /* mark all the used pages as ... well ... used */
97 used_end = ((uint32_t)bitmap + bmsize - 1);
99 printf("marking pages up to %x ", used_end);
100 used_end = ADDR_TO_PAGE(used_end);
101 printf("(page: %d) inclusive as used\n", used_end);
103 for(i=0; i<=used_end; i++) {
104 mark_page(i, USED);
105 }
106 }
108 /* alloc_phys_page finds the first available page of physical memory,
109 * marks it as used in the bitmap, and returns its address. If there's
110 * no unused physical page, 0 is returned.
111 */
112 uint32_t alloc_phys_page(void)
113 {
114 int i, idx, max;
116 idx = last_alloc_idx;
117 max = bmsize / 4;
119 while(idx <= max) {
120 /* if at least one bit is 0 then we have at least
121 * one free page. find it and allocate it.
122 */
123 if(bitmap[idx] != 0xffffffff) {
124 for(i=0; i<32; i++) {
125 int pg = idx * 32 + i;
127 if(IS_FREE(pg)) {
128 mark_page(pg, USED);
130 last_alloc_idx = idx;
132 printf("alloc_phys_page() -> %x (page: %d)\n", PAGE_TO_ADDR(pg), pg);
133 return PAGE_TO_ADDR(pg);
134 }
135 }
136 panic("can't happen: alloc_phys_page (mem.c)\n");
137 }
138 idx++;
139 }
141 return 0;
142 }
144 /* free_phys_page marks the physical page which corresponds to the specified
145 * address as free in the allocation bitmap.
146 *
147 * CAUTION: no checks are done that this page should actually be freed or not.
148 * If you call free_phys_page with the address of some part of memory that was
149 * originally reserved due to it being in a memory hole or part of the kernel
150 * image or whatever, it will be subsequently allocatable by alloc_phys_page.
151 */
152 void free_phys_page(uint32_t addr)
153 {
154 int pg = ADDR_TO_PAGE(addr);
155 int bmidx = BM_IDX(pg);
157 if(!IS_FREE(pg)) {
158 panic("free_phys_page(%d): I thought that was already free!\n", pg);
159 }
161 mark_page(pg, FREE);
162 if(bmidx < last_alloc_idx) {
163 last_alloc_idx = bmidx;
164 }
165 }
167 /* this is only ever used by the VM init code to find out what the extends of
168 * the kernel image are, in order to map them 1-1 before enabling paging.
169 */
170 void get_kernel_mem_range(uint32_t *start, uint32_t *end)
171 {
172 if(start) {
173 *start = 0x100000;
174 }
175 if(end) {
176 uint32_t e = (uint32_t)bitmap + bmsize;
178 if(e & PGOFFS_MASK) {
179 *end = (e + 4096) & PGOFFS_MASK;
180 } else {
181 *end = e;
182 }
183 }
184 }
186 /* adds a range of physical memory to the available pool. used during init_mem
187 * when traversing the memory map.
188 */
189 static void add_memory(uint32_t start, size_t sz)
190 {
191 int i, szpg, pg;
193 szpg = ADDR_TO_PAGE(sz);
194 pg = ADDR_TO_PAGE(start);
196 for(i=0; i<szpg; i++) {
197 mark_page(pg++, FREE);
198 }
199 }
201 /* maps a page as used or free in the allocation bitmap */
202 static void mark_page(int pg, int used)
203 {
204 int idx = BM_IDX(pg);
205 int bit = BM_BIT(pg);
207 if(used) {
208 bitmap[idx] |= 1 << bit;
209 } else {
210 bitmap[idx] &= ~(1 << bit);
211 }
212 }