kern
changeset 30:a2c6110bd24b
refactored the code a bit, and merged with the malloc implementation branch
author | John Tsiombikas <nuclear@siggraph.org> |
---|---|
date | Fri, 27 May 2011 22:09:10 +0300 |
parents | 8ea6debe4265 bdd9bf70269d |
children | 2cb5ab18e76e |
files | src/intr.c |
diffstat | 7 files changed, 240 insertions(+), 26 deletions(-) [+] |
line diff
1.1 --- a/src/intr.c Sat Apr 23 00:51:18 2011 +0300 1.2 +++ b/src/intr.c Fri May 27 22:09:10 2011 +0300 1.3 @@ -95,9 +95,6 @@ 1.4 * setting up the maping of IRQs [0, 15] to interrupts [32, 47] 1.5 */ 1.6 init_pic(IRQ_OFFSET); 1.7 - 1.8 - /* we're done setting up, enable interrupts before returning */ 1.9 - enable_intr(); 1.10 } 1.11 1.12 /* set an interrupt handler function for a particular interrupt */
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/src/klibc/malloc.c Fri May 27 22:09:10 2011 +0300 2.3 @@ -0,0 +1,209 @@ 2.4 +#include <stdlib.h> 2.5 +#include "intr.h" 2.6 +#include "vm.h" 2.7 +#include "panic.h" 2.8 + 2.9 +#define MAGIC 0xbaadbeef 2.10 + 2.11 +struct mem_range { 2.12 + uint32_t start; 2.13 + size_t size; 2.14 + struct mem_range *next; 2.15 +}; 2.16 + 2.17 +struct alloc_desc { 2.18 + size_t size; 2.19 + uint32_t magic; 2.20 +}; 2.21 + 2.22 +static void add_range(struct mem_range *rng); 2.23 +static void coalesce(struct mem_range *low, struct mem_range *mid, struct mem_range *high); 2.24 +static struct mem_range *alloc_node(void); 2.25 +static void free_node(struct mem_range *node); 2.26 + 2.27 +struct mem_range *free_list; 2.28 +struct mem_range *node_pool; 2.29 + 2.30 + 2.31 +void *malloc(size_t sz) 2.32 +{ 2.33 + void *res = 0; 2.34 + struct mem_range *node, *prev, dummy; 2.35 + int intr_state; 2.36 + struct alloc_desc *desc; 2.37 + size_t alloc_size = sz + sizeof *desc; 2.38 + 2.39 + if(!sz) { 2.40 + return 0; 2.41 + } 2.42 + 2.43 + /* entering the critical section, do not disturb */ 2.44 + intr_state = get_intr_state(); 2.45 + disable_intr(); 2.46 + 2.47 +find_range: 2.48 + prev = &dummy; 2.49 + dummy.next = node = free_list; 2.50 + while(node) { 2.51 + /* find a node in the free_list with enough space ... */ 2.52 + if(node->size >= alloc_size) { 2.53 + /* insert the allocation descriptor at the beginning */ 2.54 + desc = (void*)node->start; 2.55 + desc->size = alloc_size; 2.56 + desc->magic = MAGIC; 2.57 + res = desc + 1; /* that's what we'll return to the user */ 2.58 + 2.59 + /* modify the node to reflect the new range after we 2.60 + * grabbed a part at the beginning... 2.61 + */ 2.62 + node->size -= alloc_size; 2.63 + node->start += alloc_size; 2.64 + 2.65 + /* if the range represented by this node now has zero size, 2.66 + * remove and free the node (it goes into the node_pool) 2.67 + */ 2.68 + if(!node->size) { 2.69 + prev->next = node->next; 2.70 + if(free_list == node) { 2.71 + free_list = node->next; 2.72 + } 2.73 + free_node(node); 2.74 + } 2.75 + break; 2.76 + } 2.77 + prev = node; 2.78 + node = node->next; 2.79 + } 2.80 + 2.81 + /* we didn't find a range big enough in the free_list. In that case 2.82 + * we need to allocate some pages, add them to the free_list and try 2.83 + * again. 2.84 + */ 2.85 + if(!res) { 2.86 + struct mem_range *range; 2.87 + int pg, pgcount = (PGSIZE - 1 + alloc_size) / PGSIZE; 2.88 + 2.89 + if((pg = pgalloc(pgcount, MEM_KERNEL)) == -1) { 2.90 + set_intr_state(intr_state); 2.91 + return 0; 2.92 + } 2.93 + 2.94 + range = alloc_node(); 2.95 + range->start = PAGE_TO_ADDR(pg); 2.96 + range->size = pg * PGSIZE; 2.97 + add_range(range); 2.98 + goto find_range; 2.99 + } 2.100 + 2.101 + set_intr_state(intr_state); 2.102 + return res; 2.103 +} 2.104 + 2.105 +void free(void *ptr) 2.106 +{ 2.107 + int intr_state; 2.108 + struct alloc_desc *desc; 2.109 + struct mem_range *rng; 2.110 + 2.111 + if(!ptr) return; 2.112 + 2.113 + intr_state = get_intr_state(); 2.114 + disable_intr(); 2.115 + 2.116 + desc = (struct alloc_desc*)ptr - 1; 2.117 + if(desc->magic != MAGIC) { 2.118 + panic("free(%x) magic missmatch, invalid address.\n", (unsigned int)ptr); 2.119 + } 2.120 + 2.121 + rng = alloc_node(); 2.122 + rng->start = (uint32_t)desc; 2.123 + rng->size = desc->size; 2.124 + add_range(rng); 2.125 + 2.126 + set_intr_state(intr_state); 2.127 +} 2.128 + 2.129 +static void add_range(struct mem_range *rng) 2.130 +{ 2.131 + struct mem_range *node, *prev = 0; 2.132 + 2.133 + if(!free_list || free_list->start > rng->start) { 2.134 + rng->next = free_list; 2.135 + free_list = rng; 2.136 + 2.137 + } else { 2.138 + node = free_list; 2.139 + 2.140 + while(node) { 2.141 + if(!node->next || node->next->start > rng->start) { 2.142 + rng->next = node->next; 2.143 + node->next = rng; 2.144 + prev = node; /* needed by coalesce after the loop */ 2.145 + break; 2.146 + } 2.147 + 2.148 + prev = node; 2.149 + node = node->next; 2.150 + } 2.151 + } 2.152 + 2.153 + coalesce(prev, rng, rng->next); 2.154 +} 2.155 + 2.156 +static void coalesce(struct mem_range *low, struct mem_range *mid, struct mem_range *high) 2.157 +{ 2.158 + if(high) { 2.159 + if(mid->start + mid->size >= high->start) { 2.160 + mid->size = high->size - mid->start; 2.161 + mid->next = high->next; 2.162 + free_node(high); 2.163 + } 2.164 + } 2.165 + 2.166 + if(low) { 2.167 + if(low->start + low->size >= mid->start) { 2.168 + low->size = mid->size - low->start; 2.169 + low->next = mid->next; 2.170 + free_node(mid); 2.171 + } 2.172 + } 2.173 +} 2.174 + 2.175 +static struct mem_range *alloc_node(void) 2.176 +{ 2.177 + struct mem_range *node; 2.178 + 2.179 + /* no nodes available for reuse... 2.180 + * grab a page, slice it into nodes, link them up and hang them in the pool 2.181 + */ 2.182 + if(!node_pool) { 2.183 + int i, num_nodes, pg; 2.184 + struct mem_range *nodepage; 2.185 + 2.186 + pg = pgalloc(1, MEM_KERNEL); 2.187 + if(pg == -1) { 2.188 + panic("failed to allocate page for the malloc node pool\n"); 2.189 + return 0; /* unreachable */ 2.190 + } 2.191 + 2.192 + nodepage = (struct mem_range*)PAGE_TO_ADDR(pg); 2.193 + num_nodes = PGSIZE / sizeof *nodepage; 2.194 + 2.195 + for(i=1; i<num_nodes; i++) { 2.196 + nodepage[i - 1].next = nodepage + i; 2.197 + } 2.198 + nodepage[i - 1].next = 0; 2.199 + node_pool = nodepage; 2.200 + } 2.201 + 2.202 + node = node_pool; 2.203 + node_pool = node->next; 2.204 + node->next = 0; 2.205 + return node; 2.206 +} 2.207 + 2.208 +static void free_node(struct mem_range *node) 2.209 +{ 2.210 + node->next = node_pool; 2.211 + node_pool = node; 2.212 +}
3.1 --- a/src/klibc/stdlib.h Sat Apr 23 00:51:18 2011 +0300 3.2 +++ b/src/klibc/stdlib.h Fri May 27 22:09:10 2011 +0300 3.3 @@ -12,4 +12,8 @@ 3.4 void itoa(int val, char *buf, int base); 3.5 void utoa(unsigned int val, char *buf, int base); 3.6 3.7 +/* defined in malloc.c */ 3.8 +void *malloc(size_t sz); 3.9 +void free(void *ptr); 3.10 + 3.11 #endif /* STDLIB_H_ */
4.1 --- a/src/main.c Sat Apr 23 00:51:18 2011 +0300 4.2 +++ b/src/main.c Fri May 27 22:09:10 2011 +0300 4.3 @@ -5,6 +5,7 @@ 4.4 #include "asmops.h" 4.5 #include "segm.h" 4.6 #include "intr.h" 4.7 +#include "mem.h" 4.8 #include "vm.h" 4.9 4.10 static void do_nothing(); 4.11 @@ -57,7 +58,13 @@ 4.12 /* silence the blasted timer interrupt */ 4.13 interrupt(32, do_nothing); 4.14 4.15 - init_vm(mbinf); 4.16 + /* initialize the physical memory manager */ 4.17 + init_mem(mbinf); 4.18 + /* initialize paging and the virtual memory manager */ 4.19 + init_vm(); 4.20 + 4.21 + /* initialization complete, enable interrupts */ 4.22 + enable_intr(); 4.23 4.24 dbg_print_vm(MEM_USER); 4.25 dbg_print_vm(MEM_KERNEL);
5.1 --- a/src/vm-asm.S Sat Apr 23 00:51:18 2011 +0300 5.2 +++ b/src/vm-asm.S Fri May 27 22:09:10 2011 +0300 5.3 @@ -1,6 +1,6 @@ 5.4 .text 5.5 /* enable_paging(void) 5.6 - * sets the cr0 bit 31 which enables page translation */ 5.7 + * sets bit 31 of cr0 which enables page translation */ 5.8 .globl enable_paging 5.9 enable_paging: 5.10 movl %cr0, %eax 5.11 @@ -9,7 +9,7 @@ 5.12 ret 5.13 5.14 /* disable_paging(void) 5.15 - * clears the cr0 bit 31 */ 5.16 + * clears bit 31 of cr0 which disables page translation */ 5.17 .globl disable_paging 5.18 disable_paging: 5.19 movl %cr0, %eax 5.20 @@ -17,6 +17,8 @@ 5.21 movl %eax, %cr0 5.22 ret 5.23 5.24 +/* get_paging_status(void) 5.25 + * returns 0 if paging is disabled or 1 if it's enabled */ 5.26 .globl get_paging_status 5.27 get_paging_status: 5.28 movl %cr0, %eax
6.1 --- a/src/vm.c Sat Apr 23 00:51:18 2011 +0300 6.2 +++ b/src/vm.c Fri May 27 22:09:10 2011 +0300 6.3 @@ -53,13 +53,10 @@ 6.4 static struct page_range first_node; 6.5 6.6 6.7 -void init_vm(struct mboot_info *mb) 6.8 +void init_vm(void) 6.9 { 6.10 uint32_t idmap_end; 6.11 6.12 - /* initialize the physical memory map and allocator */ 6.13 - init_mem(mb); 6.14 - 6.15 /* setup the page tables */ 6.16 pgdir = (uint32_t*)alloc_phys_page(); 6.17 memset(pgdir, 0, PGSIZE); 6.18 @@ -83,12 +80,12 @@ 6.19 node_pool = 0; 6.20 6.21 first_node.start = ADDR_TO_PAGE(KMEM_START); 6.22 - first_node.end = PAGE_COUNT; 6.23 + first_node.end = ADDR_TO_PAGE(PGTBL_BASE); 6.24 first_node.next = 0; 6.25 pglist[MEM_KERNEL] = &first_node; 6.26 6.27 pglist[MEM_USER] = alloc_node(); 6.28 - pglist[MEM_USER]->start = 0; 6.29 + pglist[MEM_USER]->start = ADDR_TO_PAGE(idmap_end); 6.30 pglist[MEM_USER]->end = ADDR_TO_PAGE(KMEM_START); 6.31 pglist[MEM_USER]->next = 0; 6.32 } 6.33 @@ -149,7 +146,7 @@ 6.34 if(!(pgdir[diridx] & PG_PRESENT)) { 6.35 goto err; 6.36 } 6.37 - pgtbl = (uint32_t*)(pgdir[diridx] & ADDR_PGENT_MASK); 6.38 + pgtbl = PGTBL(diridx); 6.39 6.40 if(!(pgtbl[pgidx] & PG_PRESENT)) { 6.41 goto err; 6.42 @@ -168,18 +165,9 @@ 6.43 int map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr) 6.44 { 6.45 int i, phys_pg; 6.46 - uint32_t paddr; 6.47 6.48 for(i=0; i<pgcount; i++) { 6.49 - if(ppg_start < 0) { 6.50 - if(!(paddr = alloc_phys_page())) { 6.51 - return -1; 6.52 - } 6.53 - phys_pg = ADDR_TO_PAGE(paddr); 6.54 - } else { 6.55 - phys_pg = ppg_start + i; 6.56 - } 6.57 - 6.58 + phys_pg = ppg_start < 0 ? -1 : ppg_start + i; 6.59 map_page(vpg_start + i, phys_pg, attr); 6.60 } 6.61 return 0; 6.62 @@ -212,7 +200,7 @@ 6.63 if(!(pgdir[diridx] & PG_PRESENT)) { 6.64 panic("virt_to_phys(%x): page table %d not present\n", vaddr, diridx); 6.65 } 6.66 - pgtbl = (uint32_t*)(pgdir[diridx] & PGENT_ADDR_MASK); 6.67 + pgtbl = PGTBL(diridx); 6.68 6.69 if(!(pgtbl[pgidx] & PG_PRESENT)) { 6.70 panic("virt_to_phys(%x): page %d not present\n", vaddr, ADDR_TO_PAGE(vaddr)); 6.71 @@ -271,12 +259,19 @@ 6.72 6.73 void pgfree(int start, int num) 6.74 { 6.75 - int area, end, intr_state; 6.76 + int i, area, end, intr_state; 6.77 struct page_range *node, *new, *prev, *next; 6.78 6.79 intr_state = get_intr_state(); 6.80 disable_intr(); 6.81 6.82 + for(i=0; i<num; i++) { 6.83 + uint32_t phys = virt_to_phys(PAGE_TO_ADDR(start + i)); 6.84 + if(phys) { 6.85 + free_phys_page(ADDR_TO_PAGE(phys)); 6.86 + } 6.87 + } 6.88 + 6.89 if(!(new = alloc_node())) { 6.90 panic("pgfree: can't allocate new page_range node to add the freed pages\n"); 6.91 }
7.1 --- a/src/vm.h Sat Apr 23 00:51:18 2011 +0300 7.2 +++ b/src/vm.h Fri May 27 22:09:10 2011 +0300 7.3 @@ -35,7 +35,7 @@ 7.4 #define PAGE_TO_PGTBL_PG(x) ((uint32_t)(x) & 0x3ff) 7.5 7.6 7.7 -void init_vm(struct mboot_info *mb); 7.8 +void init_vm(void); 7.9 7.10 int map_page(int vpage, int ppage, unsigned int attr); 7.11 int map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr);