rev |
line source |
nuclear@28
|
1 #include <stdlib.h>
|
nuclear@28
|
2 #include "intr.h"
|
nuclear@28
|
3 #include "vm.h"
|
nuclear@28
|
4 #include "panic.h"
|
nuclear@28
|
5
|
nuclear@28
|
6 #define MAGIC 0xbaadbeef
|
nuclear@28
|
7
|
nuclear@28
|
8 struct mem_range {
|
nuclear@28
|
9 uint32_t start;
|
nuclear@28
|
10 size_t size;
|
nuclear@28
|
11 struct mem_range *next;
|
nuclear@28
|
12 };
|
nuclear@28
|
13
|
nuclear@28
|
14 struct alloc_desc {
|
nuclear@28
|
15 size_t size;
|
nuclear@28
|
16 uint32_t magic;
|
nuclear@28
|
17 };
|
nuclear@28
|
18
|
nuclear@28
|
19 static void add_range(struct mem_range *rng);
|
nuclear@28
|
20 static void coalesce(struct mem_range *low, struct mem_range *mid, struct mem_range *high);
|
nuclear@28
|
21 static struct mem_range *alloc_node(void);
|
nuclear@28
|
22 static void free_node(struct mem_range *node);
|
nuclear@28
|
23
|
nuclear@28
|
24 struct mem_range *free_list;
|
nuclear@28
|
25 struct mem_range *node_pool;
|
nuclear@28
|
26
|
nuclear@28
|
27
|
nuclear@28
|
28 void *malloc(size_t sz)
|
nuclear@28
|
29 {
|
nuclear@28
|
30 void *res = 0;
|
nuclear@28
|
31 struct mem_range *node, *prev, dummy;
|
nuclear@28
|
32 int intr_state;
|
nuclear@28
|
33 struct alloc_desc *desc;
|
nuclear@28
|
34 size_t alloc_size = sz + sizeof *desc;
|
nuclear@28
|
35
|
nuclear@28
|
36 if(!sz) {
|
nuclear@28
|
37 return 0;
|
nuclear@28
|
38 }
|
nuclear@28
|
39
|
nuclear@28
|
40 /* entering the critical section, do not disturb */
|
nuclear@28
|
41 intr_state = get_intr_state();
|
nuclear@28
|
42 disable_intr();
|
nuclear@28
|
43
|
nuclear@28
|
44 find_range:
|
nuclear@28
|
45 prev = &dummy;
|
nuclear@28
|
46 dummy.next = node = free_list;
|
nuclear@28
|
47 while(node) {
|
nuclear@28
|
48 /* find a node in the free_list with enough space ... */
|
nuclear@28
|
49 if(node->size >= alloc_size) {
|
nuclear@28
|
50 /* insert the allocation descriptor at the beginning */
|
nuclear@28
|
51 desc = (void*)node->start;
|
nuclear@28
|
52 desc->size = alloc_size;
|
nuclear@28
|
53 desc->magic = MAGIC;
|
nuclear@28
|
54 res = desc + 1; /* that's what we'll return to the user */
|
nuclear@28
|
55
|
nuclear@28
|
56 /* modify the node to reflect the new range after we
|
nuclear@28
|
57 * grabbed a part at the beginning...
|
nuclear@28
|
58 */
|
nuclear@28
|
59 node->size -= alloc_size;
|
nuclear@28
|
60 node->start += alloc_size;
|
nuclear@28
|
61
|
nuclear@28
|
62 /* if the range represented by this node now has zero size,
|
nuclear@28
|
63 * remove and free the node (it goes into the node_pool)
|
nuclear@28
|
64 */
|
nuclear@28
|
65 if(!node->size) {
|
nuclear@28
|
66 prev->next = node->next;
|
nuclear@28
|
67 if(free_list == node) {
|
nuclear@28
|
68 free_list = node->next;
|
nuclear@28
|
69 }
|
nuclear@28
|
70 free_node(node);
|
nuclear@28
|
71 }
|
nuclear@28
|
72 break;
|
nuclear@28
|
73 }
|
nuclear@28
|
74 prev = node;
|
nuclear@28
|
75 node = node->next;
|
nuclear@28
|
76 }
|
nuclear@28
|
77
|
nuclear@28
|
78 /* we didn't find a range big enough in the free_list. In that case
|
nuclear@28
|
79 * we need to allocate some pages, add them to the free_list and try
|
nuclear@28
|
80 * again.
|
nuclear@28
|
81 */
|
nuclear@28
|
82 if(!res) {
|
nuclear@28
|
83 struct mem_range *range;
|
nuclear@28
|
84 int pg, pgcount = (PGSIZE - 1 + alloc_size) / PGSIZE;
|
nuclear@28
|
85
|
nuclear@28
|
86 if((pg = pgalloc(pgcount, MEM_KERNEL)) == -1) {
|
nuclear@28
|
87 set_intr_state(intr_state);
|
nuclear@28
|
88 return 0;
|
nuclear@28
|
89 }
|
nuclear@28
|
90
|
nuclear@28
|
91 range = alloc_node();
|
nuclear@28
|
92 range->start = PAGE_TO_ADDR(pg);
|
nuclear@28
|
93 range->size = pg * PGSIZE;
|
nuclear@28
|
94 add_range(range);
|
nuclear@28
|
95 goto find_range;
|
nuclear@28
|
96 }
|
nuclear@28
|
97
|
nuclear@28
|
98 set_intr_state(intr_state);
|
nuclear@28
|
99 return res;
|
nuclear@28
|
100 }
|
nuclear@28
|
101
|
nuclear@28
|
102 void free(void *ptr)
|
nuclear@28
|
103 {
|
nuclear@28
|
104 int intr_state;
|
nuclear@28
|
105 struct alloc_desc *desc;
|
nuclear@28
|
106 struct mem_range *rng;
|
nuclear@28
|
107
|
nuclear@28
|
108 if(!ptr) return;
|
nuclear@28
|
109
|
nuclear@28
|
110 intr_state = get_intr_state();
|
nuclear@28
|
111 disable_intr();
|
nuclear@28
|
112
|
nuclear@28
|
113 desc = (struct alloc_desc*)ptr - 1;
|
nuclear@28
|
114 if(desc->magic != MAGIC) {
|
nuclear@28
|
115 panic("free(%x) magic missmatch, invalid address.\n", (unsigned int)ptr);
|
nuclear@28
|
116 }
|
nuclear@28
|
117
|
nuclear@28
|
118 rng = alloc_node();
|
nuclear@28
|
119 rng->start = (uint32_t)desc;
|
nuclear@28
|
120 rng->size = desc->size;
|
nuclear@28
|
121 add_range(rng);
|
nuclear@28
|
122
|
nuclear@28
|
123 set_intr_state(intr_state);
|
nuclear@28
|
124 }
|
nuclear@28
|
125
|
nuclear@28
|
126 static void add_range(struct mem_range *rng)
|
nuclear@28
|
127 {
|
nuclear@28
|
128 struct mem_range *node, *prev = 0;
|
nuclear@28
|
129
|
nuclear@28
|
130 if(!free_list || free_list->start > rng->start) {
|
nuclear@28
|
131 rng->next = free_list;
|
nuclear@28
|
132 free_list = rng;
|
nuclear@28
|
133
|
nuclear@28
|
134 } else {
|
nuclear@28
|
135 node = free_list;
|
nuclear@28
|
136
|
nuclear@28
|
137 while(node) {
|
nuclear@28
|
138 if(!node->next || node->next->start > rng->start) {
|
nuclear@28
|
139 rng->next = node->next;
|
nuclear@28
|
140 node->next = rng;
|
nuclear@28
|
141 prev = node; /* needed by coalesce after the loop */
|
nuclear@28
|
142 break;
|
nuclear@28
|
143 }
|
nuclear@28
|
144
|
nuclear@28
|
145 prev = node;
|
nuclear@28
|
146 node = node->next;
|
nuclear@28
|
147 }
|
nuclear@28
|
148 }
|
nuclear@28
|
149
|
nuclear@28
|
150 coalesce(prev, rng, rng->next);
|
nuclear@28
|
151 }
|
nuclear@28
|
152
|
nuclear@28
|
153 static void coalesce(struct mem_range *low, struct mem_range *mid, struct mem_range *high)
|
nuclear@28
|
154 {
|
nuclear@28
|
155 if(high) {
|
nuclear@28
|
156 if(mid->start + mid->size >= high->start) {
|
nuclear@28
|
157 mid->size = high->size - mid->start;
|
nuclear@28
|
158 mid->next = high->next;
|
nuclear@28
|
159 free_node(high);
|
nuclear@28
|
160 }
|
nuclear@28
|
161 }
|
nuclear@28
|
162
|
nuclear@28
|
163 if(low) {
|
nuclear@28
|
164 if(low->start + low->size >= mid->start) {
|
nuclear@28
|
165 low->size = mid->size - low->start;
|
nuclear@28
|
166 low->next = mid->next;
|
nuclear@28
|
167 free_node(mid);
|
nuclear@28
|
168 }
|
nuclear@28
|
169 }
|
nuclear@28
|
170 }
|
nuclear@28
|
171
|
nuclear@28
|
172 static struct mem_range *alloc_node(void)
|
nuclear@28
|
173 {
|
nuclear@28
|
174 struct mem_range *node;
|
nuclear@28
|
175
|
nuclear@28
|
176 /* no nodes available for reuse...
|
nuclear@28
|
177 * grab a page, slice it into nodes, link them up and hang them in the pool
|
nuclear@28
|
178 */
|
nuclear@28
|
179 if(!node_pool) {
|
nuclear@28
|
180 int i, num_nodes, pg;
|
nuclear@28
|
181 struct mem_range *nodepage;
|
nuclear@28
|
182
|
nuclear@28
|
183 pg = pgalloc(1, MEM_KERNEL);
|
nuclear@28
|
184 if(pg == -1) {
|
nuclear@28
|
185 panic("failed to allocate page for the malloc node pool\n");
|
nuclear@28
|
186 return 0; /* unreachable */
|
nuclear@28
|
187 }
|
nuclear@28
|
188
|
nuclear@28
|
189 nodepage = (struct mem_range*)PAGE_TO_ADDR(pg);
|
nuclear@28
|
190 num_nodes = PGSIZE / sizeof *nodepage;
|
nuclear@28
|
191
|
nuclear@28
|
192 for(i=1; i<num_nodes; i++) {
|
nuclear@28
|
193 nodepage[i - 1].next = nodepage + i;
|
nuclear@28
|
194 }
|
nuclear@28
|
195 nodepage[i - 1].next = 0;
|
nuclear@28
|
196 node_pool = nodepage;
|
nuclear@28
|
197 }
|
nuclear@28
|
198
|
nuclear@28
|
199 node = node_pool;
|
nuclear@28
|
200 node_pool = node->next;
|
nuclear@28
|
201 node->next = 0;
|
nuclear@28
|
202 return node;
|
nuclear@28
|
203 }
|
nuclear@28
|
204
|
nuclear@28
|
205 static void free_node(struct mem_range *node)
|
nuclear@28
|
206 {
|
nuclear@28
|
207 node->next = node_pool;
|
nuclear@28
|
208 node_pool = node;
|
nuclear@28
|
209 }
|