Rev 786 | Rev 788 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 786 | Rev 787 | ||
---|---|---|---|
Line 50... | Line 50... | ||
50 | #include <smp/ipi.h> |
50 | #include <smp/ipi.h> |
51 | #include <arch/faddr.h> |
51 | #include <arch/faddr.h> |
52 | #include <arch/atomic.h> |
52 | #include <arch/atomic.h> |
53 | #include <memstr.h> |
53 | #include <memstr.h> |
54 | #include <print.h> |
54 | #include <print.h> |
- | 55 | #include <mm/slab.h> |
|
- | 56 | #include <debug.h> |
|
55 | 57 | ||
56 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
58 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
57 | 59 | ||
58 | SPINLOCK_INITIALIZE(threads_lock); /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
60 | SPINLOCK_INITIALIZE(threads_lock); /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
59 | LIST_INITIALIZE(threads_head); /**< List of all threads. */ |
61 | LIST_INITIALIZE(threads_head); /**< List of all threads. */ |
60 | 62 | ||
61 | SPINLOCK_INITIALIZE(tidlock); |
63 | SPINLOCK_INITIALIZE(tidlock); |
62 | __u32 last_tid = 0; |
64 | __u32 last_tid = 0; |
63 | 65 | ||
- | 66 | static slab_cache_t *thread_slab; |
|
- | 67 | ||
64 | 68 | ||
65 | /** Thread wrapper |
69 | /** Thread wrapper |
66 | * |
70 | * |
67 | * This wrapper is provided to ensure that every thread |
71 | * This wrapper is provided to ensure that every thread |
68 | * makes a call to thread_exit() when its implementing |
72 | * makes a call to thread_exit() when its implementing |
Line 85... | Line 89... | ||
85 | f(arg); |
89 | f(arg); |
86 | thread_exit(); |
90 | thread_exit(); |
87 | /* not reached */ |
91 | /* not reached */ |
88 | } |
92 | } |
89 | 93 | ||
- | 94 | /** Initialization and allocation for thread_t structure */ |
|
- | 95 | static int thr_constructor(void *obj, int kmflags) |
|
- | 96 | { |
|
- | 97 | thread_t *t = (thread_t *)obj; |
|
- | 98 | ||
- | 99 | spinlock_initialize(&t->lock, "thread_t_lock"); |
|
- | 100 | link_initialize(&t->rq_link); |
|
- | 101 | link_initialize(&t->wq_link); |
|
- | 102 | link_initialize(&t->th_link); |
|
- | 103 | link_initialize(&t->threads_link); |
|
- | 104 | ||
- | 105 | t->kstack = (__u8 *)frame_alloc(ONE_FRAME, FRAME_KA | kmflags); |
|
- | 106 | if (!t->kstack) |
|
- | 107 | return -1; |
|
- | 108 | ||
- | 109 | return 0; |
|
- | 110 | } |
|
- | 111 | ||
- | 112 | /** Destruction of thread_t object */ |
|
- | 113 | static int thr_destructor(void *obj) |
|
- | 114 | { |
|
- | 115 | thread_t *t = (thread_t *)obj; |
|
- | 116 | ||
- | 117 | frame_free((__address) t->kstack); |
|
- | 118 | return 1; /* One page freed */ |
|
- | 119 | } |
|
90 | 120 | ||
91 | /** Initialize threads |
121 | /** Initialize threads |
92 | * |
122 | * |
93 | * Initialize kernel threads support. |
123 | * Initialize kernel threads support. |
94 | * |
124 | * |
95 | */ |
125 | */ |
96 | void thread_init(void) |
126 | void thread_init(void) |
97 | { |
127 | { |
98 | THREAD = NULL; |
128 | THREAD = NULL; |
99 | atomic_set(&nrdy,0); |
129 | atomic_set(&nrdy,0); |
- | 130 | thread_slab = slab_cache_create("thread_slab", |
|
- | 131 | sizeof(thread_t),0, |
|
- | 132 | thr_constructor, thr_destructor, 0); |
|
100 | } |
133 | } |
101 | 134 | ||
102 | 135 | ||
103 | /** Make thread ready |
136 | /** Make thread ready |
104 | * |
137 | * |
Line 141... | Line 174... | ||
141 | 174 | ||
142 | interrupts_restore(ipl); |
175 | interrupts_restore(ipl); |
143 | } |
176 | } |
144 | 177 | ||
145 | 178 | ||
- | 179 | /** Destroy thread memory structure |
|
- | 180 | * |
|
- | 181 | * Detach thread from all queues, cpus etc. and destroy it. |
|
- | 182 | * |
|
- | 183 | * Assume thread->lock is held!! |
|
- | 184 | */ |
|
- | 185 | void thread_destroy(thread_t *t) |
|
- | 186 | { |
|
- | 187 | ASSERT(t->state == Exiting); |
|
- | 188 | ASSERT(t->task); |
|
- | 189 | ASSERT(t->cpu); |
|
- | 190 | ||
- | 191 | spinlock_lock(&t->cpu->lock); |
|
- | 192 | if(t->cpu->fpu_owner==t) |
|
- | 193 | t->cpu->fpu_owner=NULL; |
|
- | 194 | spinlock_unlock(&t->cpu->lock); |
|
- | 195 | ||
- | 196 | if (t->ustack) |
|
- | 197 | frame_free((__address) t->ustack); |
|
- | 198 | ||
- | 199 | /* |
|
- | 200 | * Detach from the containing task. |
|
- | 201 | */ |
|
- | 202 | spinlock_lock(&t->task->lock); |
|
- | 203 | list_remove(&t->th_link); |
|
- | 204 | spinlock_unlock(&t->task->lock); |
|
- | 205 | ||
- | 206 | spinlock_unlock(&t->lock); |
|
- | 207 | ||
- | 208 | spinlock_lock(&threads_lock); |
|
- | 209 | list_remove(&t->threads_link); |
|
- | 210 | spinlock_unlock(&threads_lock); |
|
- | 211 | ||
- | 212 | slab_free(thread_slab, t); |
|
- | 213 | } |
|
- | 214 | ||
- | 215 | ||
146 | /** Create new thread |
216 | /** Create new thread |
147 | * |
217 | * |
148 | * Create a new thread. |
218 | * Create a new thread. |
149 | * |
219 | * |
150 | * @param func Thread's implementing function. |
220 | * @param func Thread's implementing function. |
Line 156... | Line 226... | ||
156 | * |
226 | * |
157 | */ |
227 | */ |
158 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags) |
228 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags) |
159 | { |
229 | { |
160 | thread_t *t; |
230 | thread_t *t; |
161 | __address frame_ks, frame_us = NULL; |
231 | __address frame_us = NULL; |
162 | 232 | ||
163 | t = (thread_t *) malloc(sizeof(thread_t)); |
233 | t = (thread_t *) slab_alloc(thread_slab, 0); |
164 | if (t) { |
234 | if (t) { |
165 | ipl_t ipl; |
235 | ipl_t ipl; |
166 | 236 | ||
167 | spinlock_initialize(&t->lock, "thread_t_lock"); |
- | |
168 | - | ||
169 | frame_ks = frame_alloc(ONE_FRAME, FRAME_KA); |
- | |
170 | if (THREAD_USER_STACK & flags) { |
237 | if (THREAD_USER_STACK & flags) { |
171 | frame_us = frame_alloc(ONE_FRAME, FRAME_KA); |
238 | frame_us = frame_alloc(ONE_FRAME, FRAME_KA); |
172 | } |
239 | } |
173 | 240 | ||
- | 241 | /* Not needed, but good for debugging */ |
|
- | 242 | memsetb((__address)t->kstack, THREAD_STACK_SIZE, 0); |
|
- | 243 | ||
174 | ipl = interrupts_disable(); |
244 | ipl = interrupts_disable(); |
175 | spinlock_lock(&tidlock); |
245 | spinlock_lock(&tidlock); |
176 | t->tid = ++last_tid; |
246 | t->tid = ++last_tid; |
177 | spinlock_unlock(&tidlock); |
247 | spinlock_unlock(&tidlock); |
178 | interrupts_restore(ipl); |
248 | interrupts_restore(ipl); |
179 | 249 | ||
180 | memsetb(frame_ks, THREAD_STACK_SIZE, 0); |
- | |
181 | link_initialize(&t->rq_link); |
- | |
182 | link_initialize(&t->wq_link); |
- | |
183 | link_initialize(&t->th_link); |
- | |
184 | link_initialize(&t->threads_link); |
- | |
185 | t->kstack = (__u8 *) frame_ks; |
- | |
186 | t->ustack = (__u8 *) frame_us; |
250 | t->ustack = (__u8 *) frame_us; |
187 | 251 | ||
188 | context_save(&t->saved_context); |
252 | context_save(&t->saved_context); |
189 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
253 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
190 | 254 | ||
Line 216... | Line 280... | ||
216 | t->fpu_context_engaged=0; |
280 | t->fpu_context_engaged=0; |
217 | 281 | ||
218 | /* |
282 | /* |
219 | * Register this thread in the system-wide list. |
283 | * Register this thread in the system-wide list. |
220 | */ |
284 | */ |
221 | ipl = interrupts_disable(); |
285 | ipl = interrupts_disable(); |
222 | spinlock_lock(&threads_lock); |
286 | spinlock_lock(&threads_lock); |
223 | list_append(&t->threads_link, &threads_head); |
287 | list_append(&t->threads_link, &threads_head); |
224 | spinlock_unlock(&threads_lock); |
288 | spinlock_unlock(&threads_lock); |
225 | 289 | ||
226 | /* |
290 | /* |