Rev 15 | Rev 22 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | #include <proc/scheduler.h> |
||
30 | #include <proc/thread.h> |
||
31 | #include <proc/task.h> |
||
32 | #include <mm/heap.h> |
||
33 | #include <mm/frame.h> |
||
34 | #include <mm/page.h> |
||
35 | #include <arch/asm.h> |
||
36 | #include <arch.h> |
||
37 | #include <synch/synch.h> |
||
38 | #include <synch/spinlock.h> |
||
39 | #include <synch/waitq.h> |
||
40 | #include <synch/rwlock.h> |
||
41 | #include <cpu.h> |
||
42 | #include <func.h> |
||
43 | #include <context.h> |
||
44 | #include <list.h> |
||
45 | #include <typedefs.h> |
||
46 | #include <time/clock.h> |
||
47 | #include <list.h> |
||
7 | jermar | 48 | #include <config.h> |
49 | #include <arch/interrupt.h> |
||
10 | jermar | 50 | #include <smp/ipi.h> |
7 | jermar | 51 | |
1 | jermar | 52 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; |
53 | |||
54 | spinlock_t threads_lock; |
||
55 | link_t threads_head; |
||
56 | |||
57 | static spinlock_t tidlock; |
||
58 | __u32 last_tid = 0; |
||
59 | |||
60 | /* |
||
61 | * cushion() is provided to ensure that every thread |
||
62 | * makes a call to thread_exit() when its implementing |
||
63 | * function returns. |
||
64 | * |
||
65 | * cpu_priority_high()'d |
||
66 | */ |
||
67 | void cushion(void) |
||
68 | { |
||
15 | jermar | 69 | void (*f)(void *) = THREAD->thread_code; |
70 | void *arg = THREAD->thread_arg; |
||
1 | jermar | 71 | |
72 | /* this is where each thread wakes up after its creation */ |
||
15 | jermar | 73 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 74 | cpu_priority_low(); |
75 | |||
76 | f(arg); |
||
77 | thread_exit(); |
||
78 | /* not reached */ |
||
79 | } |
||
80 | |||
81 | void thread_init(void) |
||
82 | { |
||
15 | jermar | 83 | THREAD = NULL; |
1 | jermar | 84 | nrdy = 0; |
85 | spinlock_initialize(&threads_lock); |
||
86 | list_initialize(&threads_head); |
||
87 | } |
||
88 | |||
89 | void thread_ready(thread_t *t) |
||
90 | { |
||
91 | cpu_t *cpu; |
||
92 | runq_t *r; |
||
93 | pri_t pri; |
||
7 | jermar | 94 | int i, avg, send_ipi = 0; |
1 | jermar | 95 | |
96 | pri = cpu_priority_high(); |
||
97 | |||
98 | spinlock_lock(&t->lock); |
||
99 | |||
100 | i = (t->pri < RQ_COUNT -1) ? ++t->pri : t->pri; |
||
101 | |||
16 | jermar | 102 | cpu = CPU; |
1 | jermar | 103 | if (t->flags & X_WIRED) { |
104 | cpu = t->cpu; |
||
105 | } |
||
106 | spinlock_unlock(&t->lock); |
||
107 | |||
108 | /* |
||
109 | * Append t to respective ready queue on respective processor. |
||
110 | */ |
||
111 | r = &cpu->rq[i]; |
||
112 | spinlock_lock(&r->lock); |
||
113 | list_append(&t->rq_link, &r->rq_head); |
||
114 | r->n++; |
||
115 | spinlock_unlock(&r->lock); |
||
116 | |||
117 | spinlock_lock(&nrdylock); |
||
7 | jermar | 118 | avg = ++nrdy / config.cpu_active; |
1 | jermar | 119 | spinlock_unlock(&nrdylock); |
120 | |||
121 | spinlock_lock(&cpu->lock); |
||
10 | jermar | 122 | if ((++cpu->nrdy) > avg) { |
7 | jermar | 123 | /* |
124 | * If there are idle halted CPU's, this will wake them up. |
||
125 | */ |
||
8 | jermar | 126 | ipi_broadcast(VECTOR_WAKEUP_IPI); |
7 | jermar | 127 | } |
1 | jermar | 128 | spinlock_unlock(&cpu->lock); |
129 | |||
130 | cpu_priority_restore(pri); |
||
131 | } |
||
132 | |||
133 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags) |
||
134 | { |
||
135 | thread_t *t; |
||
136 | __address frame_ks, frame_us = NULL; |
||
137 | |||
138 | t = (thread_t *) malloc(sizeof(thread_t)); |
||
139 | if (t) { |
||
140 | pri_t pri; |
||
141 | |||
142 | spinlock_initialize(&t->lock); |
||
143 | |||
144 | frame_ks = frame_alloc(FRAME_KA); |
||
145 | if (THREAD_USER_STACK & flags) { |
||
146 | frame_us = frame_alloc(0); |
||
147 | } |
||
148 | |||
149 | pri = cpu_priority_high(); |
||
150 | spinlock_lock(&tidlock); |
||
151 | t->tid = ++last_tid; |
||
152 | spinlock_unlock(&tidlock); |
||
153 | cpu_priority_restore(pri); |
||
154 | |||
155 | memsetb(frame_ks, THREAD_STACK_SIZE, 0); |
||
156 | link_initialize(&t->rq_link); |
||
157 | link_initialize(&t->wq_link); |
||
158 | link_initialize(&t->th_link); |
||
159 | link_initialize(&t->threads_link); |
||
160 | t->kstack = (__u8 *) frame_ks; |
||
161 | t->ustack = (__u8 *) frame_us; |
||
162 | |||
163 | |||
164 | context_save(&t->saved_context); |
||
165 | t->saved_context.pc = (__address) cushion; |
||
166 | t->saved_context.sp = (__address) &t->kstack[THREAD_STACK_SIZE-8]; |
||
167 | |||
168 | pri = cpu_priority_high(); |
||
169 | t->saved_context.pri = cpu_priority_read(); |
||
170 | cpu_priority_restore(pri); |
||
171 | |||
172 | t->thread_code = func; |
||
173 | t->thread_arg = arg; |
||
174 | t->ticks = -1; |
||
175 | t->pri = -1; /* start in rq[0] */ |
||
176 | t->cpu = NULL; |
||
177 | t->flags = 0; |
||
178 | t->state = Entering; |
||
179 | t->call_me = NULL; |
||
180 | t->call_me_with = NULL; |
||
181 | |||
182 | timeout_initialize(&t->sleep_timeout); |
||
183 | t->sleep_queue = NULL; |
||
184 | t->timeout_pending = 0; |
||
185 | |||
186 | t->rwlock_holder_type = RWLOCK_NONE; |
||
187 | |||
188 | t->task = task; |
||
189 | |||
190 | /* |
||
191 | * Register this thread in the system-wide list. |
||
192 | */ |
||
193 | pri = cpu_priority_high(); |
||
194 | spinlock_lock(&threads_lock); |
||
195 | list_append(&t->threads_link, &threads_head); |
||
196 | spinlock_unlock(&threads_lock); |
||
197 | |||
198 | /* |
||
199 | * Attach to the containing task. |
||
200 | */ |
||
201 | spinlock_lock(&task->lock); |
||
202 | list_append(&t->th_link, &task->th_head); |
||
203 | spinlock_unlock(&task->lock); |
||
204 | |||
205 | cpu_priority_restore(pri); |
||
206 | } |
||
207 | |||
208 | return t; |
||
209 | } |
||
210 | |||
211 | void thread_exit(void) |
||
212 | { |
||
213 | pri_t pri; |
||
214 | |||
215 | restart: |
||
216 | pri = cpu_priority_high(); |
||
15 | jermar | 217 | spinlock_lock(&THREAD->lock); |
218 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
||
219 | spinlock_unlock(&THREAD->lock); |
||
1 | jermar | 220 | cpu_priority_restore(pri); |
221 | goto restart; |
||
222 | } |
||
15 | jermar | 223 | THREAD->state = Exiting; |
224 | spinlock_unlock(&THREAD->lock); |
||
1 | jermar | 225 | scheduler(); |
226 | } |
||
227 | |||
228 | void thread_sleep(__u32 sec) |
||
229 | { |
||
230 | thread_usleep(sec*1000000); |
||
231 | } |
||
232 | |||
233 | /* |
||
234 | * Suspend execution of current thread for usec microseconds. |
||
235 | */ |
||
236 | void thread_usleep(__u32 usec) |
||
237 | { |
||
238 | waitq_t wq; |
||
239 | |||
240 | waitq_initialize(&wq); |
||
241 | |||
242 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
||
243 | } |
||
244 | |||
245 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
||
246 | { |
||
247 | pri_t pri; |
||
248 | |||
249 | pri = cpu_priority_high(); |
||
15 | jermar | 250 | spinlock_lock(&THREAD->lock); |
251 | THREAD->call_me = call_me; |
||
252 | THREAD->call_me_with = call_me_with; |
||
253 | spinlock_unlock(&THREAD->lock); |
||
1 | jermar | 254 | cpu_priority_restore(pri); |
255 | } |