Rev 73 | Rev 81 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | #include <proc/scheduler.h> |
||
30 | #include <proc/thread.h> |
||
31 | #include <proc/task.h> |
||
32 | #include <mm/heap.h> |
||
33 | #include <mm/frame.h> |
||
34 | #include <mm/page.h> |
||
35 | #include <arch/asm.h> |
||
36 | #include <arch.h> |
||
37 | #include <synch/synch.h> |
||
38 | #include <synch/spinlock.h> |
||
39 | #include <synch/waitq.h> |
||
40 | #include <synch/rwlock.h> |
||
41 | #include <cpu.h> |
||
42 | #include <func.h> |
||
43 | #include <context.h> |
||
44 | #include <list.h> |
||
45 | #include <typedefs.h> |
||
46 | #include <time/clock.h> |
||
47 | #include <list.h> |
||
7 | jermar | 48 | #include <config.h> |
49 | #include <arch/interrupt.h> |
||
10 | jermar | 50 | #include <smp/ipi.h> |
76 | jermar | 51 | #include <arch/faddr.h> |
7 | jermar | 52 | |
1 | jermar | 53 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; |
54 | |||
55 | spinlock_t threads_lock; |
||
56 | link_t threads_head; |
||
57 | |||
58 | static spinlock_t tidlock; |
||
59 | __u32 last_tid = 0; |
||
60 | |||
61 | /* |
||
62 | * cushion() is provided to ensure that every thread |
||
63 | * makes a call to thread_exit() when its implementing |
||
64 | * function returns. |
||
65 | * |
||
66 | * cpu_priority_high()'d |
||
67 | */ |
||
68 | void cushion(void) |
||
69 | { |
||
15 | jermar | 70 | void (*f)(void *) = THREAD->thread_code; |
71 | void *arg = THREAD->thread_arg; |
||
1 | jermar | 72 | |
22 | jermar | 73 | before_thread_runs(); |
74 | |||
1 | jermar | 75 | /* this is where each thread wakes up after its creation */ |
15 | jermar | 76 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 77 | cpu_priority_low(); |
78 | |||
79 | f(arg); |
||
80 | thread_exit(); |
||
81 | /* not reached */ |
||
82 | } |
||
83 | |||
84 | void thread_init(void) |
||
85 | { |
||
15 | jermar | 86 | THREAD = NULL; |
1 | jermar | 87 | nrdy = 0; |
88 | spinlock_initialize(&threads_lock); |
||
89 | list_initialize(&threads_head); |
||
90 | } |
||
91 | |||
92 | void thread_ready(thread_t *t) |
||
93 | { |
||
94 | cpu_t *cpu; |
||
95 | runq_t *r; |
||
96 | pri_t pri; |
||
7 | jermar | 97 | int i, avg, send_ipi = 0; |
1 | jermar | 98 | |
99 | pri = cpu_priority_high(); |
||
100 | |||
101 | spinlock_lock(&t->lock); |
||
102 | |||
103 | i = (t->pri < RQ_COUNT -1) ? ++t->pri : t->pri; |
||
104 | |||
16 | jermar | 105 | cpu = CPU; |
1 | jermar | 106 | if (t->flags & X_WIRED) { |
107 | cpu = t->cpu; |
||
108 | } |
||
109 | spinlock_unlock(&t->lock); |
||
110 | |||
111 | /* |
||
112 | * Append t to respective ready queue on respective processor. |
||
113 | */ |
||
114 | r = &cpu->rq[i]; |
||
115 | spinlock_lock(&r->lock); |
||
116 | list_append(&t->rq_link, &r->rq_head); |
||
117 | r->n++; |
||
118 | spinlock_unlock(&r->lock); |
||
119 | |||
120 | spinlock_lock(&nrdylock); |
||
7 | jermar | 121 | avg = ++nrdy / config.cpu_active; |
1 | jermar | 122 | spinlock_unlock(&nrdylock); |
123 | |||
124 | spinlock_lock(&cpu->lock); |
||
10 | jermar | 125 | if ((++cpu->nrdy) > avg) { |
7 | jermar | 126 | /* |
127 | * If there are idle halted CPU's, this will wake them up. |
||
128 | */ |
||
8 | jermar | 129 | ipi_broadcast(VECTOR_WAKEUP_IPI); |
7 | jermar | 130 | } |
1 | jermar | 131 | spinlock_unlock(&cpu->lock); |
132 | |||
133 | cpu_priority_restore(pri); |
||
134 | } |
||
135 | |||
136 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags) |
||
137 | { |
||
138 | thread_t *t; |
||
139 | __address frame_ks, frame_us = NULL; |
||
140 | |||
141 | t = (thread_t *) malloc(sizeof(thread_t)); |
||
142 | if (t) { |
||
143 | pri_t pri; |
||
144 | |||
145 | spinlock_initialize(&t->lock); |
||
146 | |||
147 | frame_ks = frame_alloc(FRAME_KA); |
||
148 | if (THREAD_USER_STACK & flags) { |
||
149 | frame_us = frame_alloc(0); |
||
150 | } |
||
151 | |||
152 | pri = cpu_priority_high(); |
||
153 | spinlock_lock(&tidlock); |
||
154 | t->tid = ++last_tid; |
||
155 | spinlock_unlock(&tidlock); |
||
156 | cpu_priority_restore(pri); |
||
157 | |||
158 | memsetb(frame_ks, THREAD_STACK_SIZE, 0); |
||
159 | link_initialize(&t->rq_link); |
||
160 | link_initialize(&t->wq_link); |
||
161 | link_initialize(&t->th_link); |
||
162 | link_initialize(&t->threads_link); |
||
163 | t->kstack = (__u8 *) frame_ks; |
||
164 | t->ustack = (__u8 *) frame_us; |
||
165 | |||
166 | |||
167 | context_save(&t->saved_context); |
||
76 | jermar | 168 | t->saved_context.pc = FADDR(cushion); |
1 | jermar | 169 | t->saved_context.sp = (__address) &t->kstack[THREAD_STACK_SIZE-8]; |
170 | |||
171 | pri = cpu_priority_high(); |
||
172 | t->saved_context.pri = cpu_priority_read(); |
||
173 | cpu_priority_restore(pri); |
||
174 | |||
175 | t->thread_code = func; |
||
176 | t->thread_arg = arg; |
||
177 | t->ticks = -1; |
||
178 | t->pri = -1; /* start in rq[0] */ |
||
179 | t->cpu = NULL; |
||
180 | t->flags = 0; |
||
181 | t->state = Entering; |
||
182 | t->call_me = NULL; |
||
183 | t->call_me_with = NULL; |
||
184 | |||
185 | timeout_initialize(&t->sleep_timeout); |
||
186 | t->sleep_queue = NULL; |
||
187 | t->timeout_pending = 0; |
||
188 | |||
189 | t->rwlock_holder_type = RWLOCK_NONE; |
||
190 | |||
191 | t->task = task; |
||
192 | |||
73 | vana | 193 | t->fpu_context_exists=0; |
194 | t->fpu_context_engaged=0; |
||
195 | |||
1 | jermar | 196 | /* |
197 | * Register this thread in the system-wide list. |
||
198 | */ |
||
199 | pri = cpu_priority_high(); |
||
200 | spinlock_lock(&threads_lock); |
||
201 | list_append(&t->threads_link, &threads_head); |
||
202 | spinlock_unlock(&threads_lock); |
||
203 | |||
204 | /* |
||
205 | * Attach to the containing task. |
||
206 | */ |
||
207 | spinlock_lock(&task->lock); |
||
208 | list_append(&t->th_link, &task->th_head); |
||
209 | spinlock_unlock(&task->lock); |
||
210 | |||
211 | cpu_priority_restore(pri); |
||
212 | } |
||
213 | |||
214 | return t; |
||
215 | } |
||
216 | |||
217 | void thread_exit(void) |
||
218 | { |
||
219 | pri_t pri; |
||
220 | |||
221 | restart: |
||
222 | pri = cpu_priority_high(); |
||
15 | jermar | 223 | spinlock_lock(&THREAD->lock); |
224 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
||
225 | spinlock_unlock(&THREAD->lock); |
||
1 | jermar | 226 | cpu_priority_restore(pri); |
227 | goto restart; |
||
228 | } |
||
15 | jermar | 229 | THREAD->state = Exiting; |
230 | spinlock_unlock(&THREAD->lock); |
||
1 | jermar | 231 | scheduler(); |
232 | } |
||
233 | |||
234 | void thread_sleep(__u32 sec) |
||
235 | { |
||
236 | thread_usleep(sec*1000000); |
||
237 | } |
||
238 | |||
239 | /* |
||
240 | * Suspend execution of current thread for usec microseconds. |
||
241 | */ |
||
242 | void thread_usleep(__u32 usec) |
||
243 | { |
||
244 | waitq_t wq; |
||
245 | |||
246 | waitq_initialize(&wq); |
||
247 | |||
248 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
||
249 | } |
||
250 | |||
251 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
||
252 | { |
||
253 | pri_t pri; |
||
254 | |||
255 | pri = cpu_priority_high(); |
||
15 | jermar | 256 | spinlock_lock(&THREAD->lock); |
257 | THREAD->call_me = call_me; |
||
258 | THREAD->call_me_with = call_me_with; |
||
259 | spinlock_unlock(&THREAD->lock); |
||
1 | jermar | 260 | cpu_priority_restore(pri); |
261 | } |