rev |
line source |
nuclear@51
|
1 #include <stdio.h>
|
nuclear@55
|
2 #include <assert.h>
|
nuclear@51
|
3 #include "sched.h"
|
nuclear@51
|
4 #include "proc.h"
|
nuclear@51
|
5 #include "intr.h"
|
nuclear@51
|
6 #include "asmops.h"
|
nuclear@51
|
7 #include "config.h"
|
nuclear@51
|
8
|
nuclear@55
|
9 #define EMPTY(q) ((q)->head == 0)
|
nuclear@51
|
10
|
nuclear@51
|
11 struct proc_list {
|
nuclear@51
|
12 struct process *head, *tail;
|
nuclear@51
|
13 };
|
nuclear@51
|
14
|
nuclear@55
|
15 static void idle_proc(void);
|
nuclear@55
|
16 static void ins_back(struct proc_list *list, struct process *proc);
|
nuclear@55
|
17 static void ins_front(struct proc_list *list, struct process *proc);
|
nuclear@55
|
18 static void remove(struct proc_list *list, struct process *proc);
|
nuclear@55
|
19 static int hash_addr(void *addr);
|
nuclear@51
|
20
|
nuclear@51
|
21 static struct proc_list runq;
|
nuclear@51
|
22 static struct proc_list zombieq;
|
nuclear@51
|
23
|
nuclear@55
|
24 #define HTBL_SIZE 101
|
nuclear@55
|
25 static struct proc_list wait_htable[HTBL_SIZE];
|
nuclear@55
|
26
|
nuclear@55
|
27
|
nuclear@51
|
28 void schedule(void)
|
nuclear@51
|
29 {
|
nuclear@52
|
30 disable_intr();
|
nuclear@52
|
31
|
nuclear@55
|
32 if(EMPTY(&runq)) {
|
nuclear@56
|
33 if(!get_current_proc()) {
|
nuclear@56
|
34 /* we're already in the idle process, don't reenter it
|
nuclear@56
|
35 * or you'll fill up the stack very quickly.
|
nuclear@56
|
36 */
|
nuclear@56
|
37 return;
|
nuclear@56
|
38 }
|
nuclear@56
|
39
|
nuclear@55
|
40 idle_proc();
|
nuclear@56
|
41 return;
|
nuclear@51
|
42 }
|
nuclear@51
|
43
|
nuclear@51
|
44 /* if the current process exhausted its timeslice,
|
nuclear@51
|
45 * move it to the back of the queue.
|
nuclear@51
|
46 */
|
nuclear@51
|
47 if(runq.head->ticks_left <= 0) {
|
nuclear@51
|
48 if(runq.head->next) {
|
nuclear@51
|
49 struct process *proc = runq.head;
|
nuclear@51
|
50 remove(&runq, proc);
|
nuclear@51
|
51 ins_back(&runq, proc);
|
nuclear@51
|
52 }
|
nuclear@51
|
53
|
nuclear@51
|
54 /* start a new timeslice */
|
nuclear@51
|
55 runq.head->ticks_left = TIMESLICE_TICKS;
|
nuclear@51
|
56 }
|
nuclear@51
|
57
|
nuclear@55
|
58 /* always enter context_switch with interrupts disabled */
|
nuclear@51
|
59 context_switch(runq.head->id);
|
nuclear@51
|
60 }
|
nuclear@51
|
61
|
nuclear@55
|
62 void add_proc(int pid)
|
nuclear@51
|
63 {
|
nuclear@51
|
64 int istate;
|
nuclear@52
|
65 struct process *proc;
|
nuclear@51
|
66
|
nuclear@51
|
67 istate = get_intr_state();
|
nuclear@51
|
68 disable_intr();
|
nuclear@51
|
69
|
nuclear@52
|
70 proc = get_process(pid);
|
nuclear@52
|
71
|
nuclear@51
|
72 ins_back(&runq, proc);
|
nuclear@53
|
73 proc->state = STATE_RUNNABLE;
|
nuclear@51
|
74
|
nuclear@51
|
75 set_intr_state(istate);
|
nuclear@51
|
76 }
|
nuclear@51
|
77
|
nuclear@55
|
78 /* block the process until we get a wakeup call for address ev */
|
nuclear@55
|
79 void wait(void *wait_addr)
|
nuclear@55
|
80 {
|
nuclear@55
|
81 struct process *p;
|
nuclear@55
|
82 int hash_idx;
|
nuclear@51
|
83
|
nuclear@55
|
84 disable_intr();
|
nuclear@55
|
85
|
nuclear@55
|
86 p = get_current_proc();
|
nuclear@55
|
87 assert(p);
|
nuclear@55
|
88
|
nuclear@55
|
89 /* remove it from the runqueue ... */
|
nuclear@55
|
90 remove(&runq, p);
|
nuclear@55
|
91
|
nuclear@55
|
92 /* and place it in the wait hash table based on sleep_addr */
|
nuclear@55
|
93 hash_idx = hash_addr(wait_addr);
|
nuclear@55
|
94 ins_back(wait_htable + hash_idx, p);
|
nuclear@55
|
95
|
nuclear@55
|
96 p->state = STATE_BLOCKED;
|
nuclear@55
|
97 p->wait_addr = wait_addr;
|
nuclear@56
|
98
|
nuclear@56
|
99 /* call the scheduler to give time to another process */
|
nuclear@56
|
100 schedule();
|
nuclear@55
|
101 }
|
nuclear@55
|
102
|
nuclear@55
|
103 /* wake up all the processes sleeping on this address */
|
nuclear@55
|
104 void wakeup(void *wait_addr)
|
nuclear@51
|
105 {
|
nuclear@55
|
106 int hash_idx;
|
nuclear@55
|
107 struct process *iter;
|
nuclear@55
|
108 struct proc_list *list;
|
nuclear@55
|
109
|
nuclear@55
|
110 hash_idx = hash_addr(wait_addr);
|
nuclear@55
|
111 list = wait_htable + hash_idx;
|
nuclear@55
|
112
|
nuclear@55
|
113 iter = list->head;
|
nuclear@55
|
114 while(iter) {
|
nuclear@55
|
115 if(iter->wait_addr == wait_addr) {
|
nuclear@55
|
116 /* found one, remove it, and make it runnable */
|
nuclear@55
|
117 struct process *p = iter;
|
nuclear@55
|
118 iter = iter->next;
|
nuclear@55
|
119
|
nuclear@55
|
120 remove(list, p);
|
nuclear@55
|
121 p->state = STATE_RUNNABLE;
|
nuclear@55
|
122 ins_back(&runq, p);
|
nuclear@55
|
123 } else {
|
nuclear@55
|
124 iter = iter->next;
|
nuclear@55
|
125 }
|
nuclear@55
|
126 }
|
nuclear@55
|
127 }
|
nuclear@55
|
128
|
nuclear@55
|
129 static void idle_proc(void)
|
nuclear@55
|
130 {
|
nuclear@55
|
131 /* make sure we send any pending EOIs if needed.
|
nuclear@55
|
132 * end_of_irq will actually check if it's needed first.
|
nuclear@55
|
133 */
|
nuclear@55
|
134 struct intr_frame *ifrm = get_intr_frame();
|
nuclear@55
|
135 end_of_irq(INTR_TO_IRQ(ifrm->inum));
|
nuclear@55
|
136
|
nuclear@56
|
137 set_current_pid(0);
|
nuclear@56
|
138
|
nuclear@55
|
139 /* make sure interrupts are enabled before halting */
|
nuclear@56
|
140 while(EMPTY(&runq)) {
|
nuclear@56
|
141 enable_intr();
|
nuclear@56
|
142 halt_cpu();
|
nuclear@56
|
143 disable_intr();
|
nuclear@56
|
144 }
|
nuclear@55
|
145 }
|
nuclear@55
|
146
|
nuclear@55
|
147
|
nuclear@55
|
148 /* list operations */
|
nuclear@55
|
149 static void ins_back(struct proc_list *list, struct process *proc)
|
nuclear@55
|
150 {
|
nuclear@55
|
151 if(EMPTY(list)) {
|
nuclear@55
|
152 list->head = proc;
|
nuclear@51
|
153 } else {
|
nuclear@55
|
154 list->tail->next = proc;
|
nuclear@51
|
155 }
|
nuclear@51
|
156
|
nuclear@51
|
157 proc->next = 0;
|
nuclear@55
|
158 proc->prev = list->tail;
|
nuclear@55
|
159 list->tail = proc;
|
nuclear@51
|
160 }
|
nuclear@51
|
161
|
nuclear@55
|
162 static void ins_front(struct proc_list *list, struct process *proc)
|
nuclear@51
|
163 {
|
nuclear@55
|
164 if(EMPTY(list)) {
|
nuclear@55
|
165 list->tail = proc;
|
nuclear@51
|
166 } else {
|
nuclear@55
|
167 list->head->prev = proc;
|
nuclear@51
|
168 }
|
nuclear@51
|
169
|
nuclear@55
|
170 proc->next = list->head;
|
nuclear@51
|
171 proc->prev = 0;
|
nuclear@55
|
172 list->head = proc;
|
nuclear@51
|
173 }
|
nuclear@51
|
174
|
nuclear@55
|
175 static void remove(struct proc_list *list, struct process *proc)
|
nuclear@51
|
176 {
|
nuclear@51
|
177 if(proc->prev) {
|
nuclear@51
|
178 proc->prev->next = proc->next;
|
nuclear@51
|
179 }
|
nuclear@51
|
180 if(proc->next) {
|
nuclear@51
|
181 proc->next->prev = proc->prev;
|
nuclear@51
|
182 }
|
nuclear@55
|
183 if(list->head == proc) {
|
nuclear@55
|
184 list->head = proc->next;
|
nuclear@51
|
185 }
|
nuclear@55
|
186 if(list->tail == proc) {
|
nuclear@55
|
187 list->tail = proc->prev;
|
nuclear@51
|
188 }
|
nuclear@51
|
189 }
|
nuclear@55
|
190
|
nuclear@55
|
191 static int hash_addr(void *addr)
|
nuclear@55
|
192 {
|
nuclear@55
|
193 return (uint32_t)addr % HTBL_SIZE;
|
nuclear@55
|
194 }
|