Subversion Repositories HelenOS

Rev

Rev 1854 | Rev 2032 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1854 Rev 2030
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericproc
29
/** @addtogroup genericproc
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Thread management functions.
35
 * @brief   Thread management functions.
36
 */
36
 */
37
 
37
 
38
#include <proc/scheduler.h>
38
#include <proc/scheduler.h>
39
#include <proc/thread.h>
39
#include <proc/thread.h>
40
#include <proc/task.h>
40
#include <proc/task.h>
41
#include <proc/uarg.h>
41
#include <proc/uarg.h>
42
#include <mm/frame.h>
42
#include <mm/frame.h>
43
#include <mm/page.h>
43
#include <mm/page.h>
44
#include <arch/asm.h>
44
#include <arch/asm.h>
-
 
45
#include <arch/cycle.h>
45
#include <arch.h>
46
#include <arch.h>
46
#include <synch/synch.h>
47
#include <synch/synch.h>
47
#include <synch/spinlock.h>
48
#include <synch/spinlock.h>
48
#include <synch/waitq.h>
49
#include <synch/waitq.h>
49
#include <synch/rwlock.h>
50
#include <synch/rwlock.h>
50
#include <cpu.h>
51
#include <cpu.h>
51
#include <func.h>
52
#include <func.h>
52
#include <context.h>
53
#include <context.h>
53
#include <adt/btree.h>
54
#include <adt/btree.h>
54
#include <adt/list.h>
55
#include <adt/list.h>
55
#include <typedefs.h>
56
#include <typedefs.h>
56
#include <time/clock.h>
57
#include <time/clock.h>
57
#include <config.h>
58
#include <config.h>
58
#include <arch/interrupt.h>
59
#include <arch/interrupt.h>
59
#include <smp/ipi.h>
60
#include <smp/ipi.h>
60
#include <arch/faddr.h>
61
#include <arch/faddr.h>
61
#include <atomic.h>
62
#include <atomic.h>
62
#include <memstr.h>
63
#include <memstr.h>
63
#include <print.h>
64
#include <print.h>
64
#include <mm/slab.h>
65
#include <mm/slab.h>
65
#include <debug.h>
66
#include <debug.h>
66
#include <main/uinit.h>
67
#include <main/uinit.h>
67
#include <syscall/copy.h>
68
#include <syscall/copy.h>
68
#include <errno.h>
69
#include <errno.h>
69
 
70
 
70
 
71
 
71
/** Thread states */
72
/** Thread states */
72
char *thread_states[] = {
73
char *thread_states[] = {
73
    "Invalid",
74
    "Invalid",
74
    "Running",
75
    "Running",
75
    "Sleeping",
76
    "Sleeping",
76
    "Ready",
77
    "Ready",
77
    "Entering",
78
    "Entering",
78
    "Exiting",
79
    "Exiting",
79
    "Undead"
80
    "Undead"
80
};
81
};
81
 
82
 
82
/** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */
83
/** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */
83
SPINLOCK_INITIALIZE(threads_lock);
84
SPINLOCK_INITIALIZE(threads_lock);
84
 
85
 
85
/** B+tree of all threads.
86
/** B+tree of all threads.
86
 *
87
 *
87
 * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long
88
 * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long
88
 * as the threads_lock is held.
89
 * as the threads_lock is held.
89
 */
90
 */
90
btree_t threads_btree;     
91
btree_t threads_btree;     
91
 
92
 
92
SPINLOCK_INITIALIZE(tidlock);
93
SPINLOCK_INITIALIZE(tidlock);
93
uint32_t last_tid = 0;
94
uint32_t last_tid = 0;
94
 
95
 
95
static slab_cache_t *thread_slab;
96
static slab_cache_t *thread_slab;
96
#ifdef ARCH_HAS_FPU
97
#ifdef ARCH_HAS_FPU
97
slab_cache_t *fpu_context_slab;
98
slab_cache_t *fpu_context_slab;
98
#endif
99
#endif
99
 
100
 
100
/** Thread wrapper
101
/** Thread wrapper
101
 *
102
 *
102
 * This wrapper is provided to ensure that every thread
103
 * This wrapper is provided to ensure that every thread
103
 * makes a call to thread_exit() when its implementing
104
 * makes a call to thread_exit() when its implementing
104
 * function returns.
105
 * function returns.
105
 *
106
 *
106
 * interrupts_disable() is assumed.
107
 * interrupts_disable() is assumed.
107
 *
108
 *
108
 */
109
 */
109
static void cushion(void)
110
static void cushion(void)
110
{
111
{
111
    void (*f)(void *) = THREAD->thread_code;
112
    void (*f)(void *) = THREAD->thread_code;
112
    void *arg = THREAD->thread_arg;
113
    void *arg = THREAD->thread_arg;
113
 
114
 
114
    /* this is where each thread wakes up after its creation */
115
    /* this is where each thread wakes up after its creation */
115
    spinlock_unlock(&THREAD->lock);
116
    spinlock_unlock(&THREAD->lock);
116
    interrupts_enable();
117
    interrupts_enable();
117
 
118
 
118
    f(arg);
119
    f(arg);
119
    thread_exit();
120
    thread_exit();
120
    /* not reached */
121
    /* not reached */
121
}
122
}
122
 
123
 
123
/** Initialization and allocation for thread_t structure */
124
/** Initialization and allocation for thread_t structure */
124
static int thr_constructor(void *obj, int kmflags)
125
static int thr_constructor(void *obj, int kmflags)
125
{
126
{
126
    thread_t *t = (thread_t *) obj;
127
    thread_t *t = (thread_t *) obj;
127
 
128
 
128
    spinlock_initialize(&t->lock, "thread_t_lock");
129
    spinlock_initialize(&t->lock, "thread_t_lock");
129
    link_initialize(&t->rq_link);
130
    link_initialize(&t->rq_link);
130
    link_initialize(&t->wq_link);
131
    link_initialize(&t->wq_link);
131
    link_initialize(&t->th_link);
132
    link_initialize(&t->th_link);
132
 
133
 
133
    /* call the architecture-specific part of the constructor */
134
    /* call the architecture-specific part of the constructor */
134
    thr_constructor_arch(t);
135
    thr_constructor_arch(t);
135
   
136
   
136
#ifdef ARCH_HAS_FPU
137
#ifdef ARCH_HAS_FPU
137
#  ifdef CONFIG_FPU_LAZY
138
#  ifdef CONFIG_FPU_LAZY
138
    t->saved_fpu_context = NULL;
139
    t->saved_fpu_context = NULL;
139
#  else
140
#  else
140
    t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags);
141
    t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags);
141
    if (!t->saved_fpu_context)
142
    if (!t->saved_fpu_context)
142
        return -1;
143
        return -1;
143
#  endif
144
#  endif
144
#endif  
145
#endif  
145
 
146
 
146
    t->kstack = frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
147
    t->kstack = frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
147
    if (! t->kstack) {
148
    if (! t->kstack) {
148
#ifdef ARCH_HAS_FPU
149
#ifdef ARCH_HAS_FPU
149
        if (t->saved_fpu_context)
150
        if (t->saved_fpu_context)
150
            slab_free(fpu_context_slab,t->saved_fpu_context);
151
            slab_free(fpu_context_slab,t->saved_fpu_context);
151
#endif
152
#endif
152
        return -1;
153
        return -1;
153
    }
154
    }
154
 
155
 
155
    return 0;
156
    return 0;
156
}
157
}
157
 
158
 
158
/** Destruction of thread_t object */
159
/** Destruction of thread_t object */
159
static int thr_destructor(void *obj)
160
static int thr_destructor(void *obj)
160
{
161
{
161
    thread_t *t = (thread_t *) obj;
162
    thread_t *t = (thread_t *) obj;
162
 
163
 
163
    /* call the architecture-specific part of the destructor */
164
    /* call the architecture-specific part of the destructor */
164
    thr_destructor_arch(t);
165
    thr_destructor_arch(t);
165
 
166
 
166
    frame_free(KA2PA(t->kstack));
167
    frame_free(KA2PA(t->kstack));
167
#ifdef ARCH_HAS_FPU
168
#ifdef ARCH_HAS_FPU
168
    if (t->saved_fpu_context)
169
    if (t->saved_fpu_context)
169
        slab_free(fpu_context_slab,t->saved_fpu_context);
170
        slab_free(fpu_context_slab,t->saved_fpu_context);
170
#endif
171
#endif
171
    return 1; /* One page freed */
172
    return 1; /* One page freed */
172
}
173
}
173
 
174
 
174
/** Initialize threads
175
/** Initialize threads
175
 *
176
 *
176
 * Initialize kernel threads support.
177
 * Initialize kernel threads support.
177
 *
178
 *
178
 */
179
 */
179
void thread_init(void)
180
void thread_init(void)
180
{
181
{
181
    THREAD = NULL;
182
    THREAD = NULL;
182
    atomic_set(&nrdy,0);
183
    atomic_set(&nrdy,0);
183
    thread_slab = slab_cache_create("thread_slab",
184
    thread_slab = slab_cache_create("thread_slab",
184
                    sizeof(thread_t),0,
185
                    sizeof(thread_t),0,
185
                    thr_constructor, thr_destructor, 0);
186
                    thr_constructor, thr_destructor, 0);
186
#ifdef ARCH_HAS_FPU
187
#ifdef ARCH_HAS_FPU
187
    fpu_context_slab = slab_cache_create("fpu_slab",
188
    fpu_context_slab = slab_cache_create("fpu_slab",
188
                         sizeof(fpu_context_t),
189
                         sizeof(fpu_context_t),
189
                         FPU_CONTEXT_ALIGN,
190
                         FPU_CONTEXT_ALIGN,
190
                         NULL, NULL, 0);
191
                         NULL, NULL, 0);
191
#endif
192
#endif
192
 
193
 
193
    btree_create(&threads_btree);
194
    btree_create(&threads_btree);
194
}
195
}
195
 
196
 
196
/** Make thread ready
197
/** Make thread ready
197
 *
198
 *
198
 * Switch thread t to the ready state.
199
 * Switch thread t to the ready state.
199
 *
200
 *
200
 * @param t Thread to make ready.
201
 * @param t Thread to make ready.
201
 *
202
 *
202
 */
203
 */
203
void thread_ready(thread_t *t)
204
void thread_ready(thread_t *t)
204
{
205
{
205
    cpu_t *cpu;
206
    cpu_t *cpu;
206
    runq_t *r;
207
    runq_t *r;
207
    ipl_t ipl;
208
    ipl_t ipl;
208
    int i, avg;
209
    int i, avg;
209
 
210
 
210
    ipl = interrupts_disable();
211
    ipl = interrupts_disable();
211
 
212
 
212
    spinlock_lock(&t->lock);
213
    spinlock_lock(&t->lock);
213
 
214
 
214
    ASSERT(! (t->state == Ready));
215
    ASSERT(! (t->state == Ready));
215
 
216
 
216
    i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
217
    i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
217
   
218
   
218
    cpu = CPU;
219
    cpu = CPU;
219
    if (t->flags & THREAD_FLAG_WIRED) {
220
    if (t->flags & THREAD_FLAG_WIRED) {
220
        cpu = t->cpu;
221
        cpu = t->cpu;
221
    }
222
    }
222
    t->state = Ready;
223
    t->state = Ready;
223
    spinlock_unlock(&t->lock);
224
    spinlock_unlock(&t->lock);
224
   
225
   
225
    /*
226
    /*
226
     * Append t to respective ready queue on respective processor.
227
     * Append t to respective ready queue on respective processor.
227
     */
228
     */
228
    r = &cpu->rq[i];
229
    r = &cpu->rq[i];
229
    spinlock_lock(&r->lock);
230
    spinlock_lock(&r->lock);
230
    list_append(&t->rq_link, &r->rq_head);
231
    list_append(&t->rq_link, &r->rq_head);
231
    r->n++;
232
    r->n++;
232
    spinlock_unlock(&r->lock);
233
    spinlock_unlock(&r->lock);
233
 
234
 
234
    atomic_inc(&nrdy);
235
    atomic_inc(&nrdy);
235
    avg = atomic_get(&nrdy) / config.cpu_active;
236
    avg = atomic_get(&nrdy) / config.cpu_active;
236
    atomic_inc(&cpu->nrdy);
237
    atomic_inc(&cpu->nrdy);
237
 
238
 
238
    interrupts_restore(ipl);
239
    interrupts_restore(ipl);
239
}
240
}
240
 
241
 
241
/** Destroy thread memory structure
242
/** Destroy thread memory structure
242
 *
243
 *
243
 * Detach thread from all queues, cpus etc. and destroy it.
244
 * Detach thread from all queues, cpus etc. and destroy it.
244
 *
245
 *
245
 * Assume thread->lock is held!!
246
 * Assume thread->lock is held!!
246
 */
247
 */
247
void thread_destroy(thread_t *t)
248
void thread_destroy(thread_t *t)
248
{
249
{
249
    bool destroy_task = false; 
250
    bool destroy_task = false; 
250
 
251
 
251
    ASSERT(t->state == Exiting || t->state == Undead);
252
    ASSERT(t->state == Exiting || t->state == Undead);
252
    ASSERT(t->task);
253
    ASSERT(t->task);
253
    ASSERT(t->cpu);
254
    ASSERT(t->cpu);
254
 
255
 
255
    spinlock_lock(&t->cpu->lock);
256
    spinlock_lock(&t->cpu->lock);
256
    if(t->cpu->fpu_owner==t)
257
    if(t->cpu->fpu_owner==t)
257
        t->cpu->fpu_owner=NULL;
258
        t->cpu->fpu_owner=NULL;
258
    spinlock_unlock(&t->cpu->lock);
259
    spinlock_unlock(&t->cpu->lock);
259
 
260
 
260
    spinlock_unlock(&t->lock);
261
    spinlock_unlock(&t->lock);
261
 
262
 
262
    spinlock_lock(&threads_lock);
263
    spinlock_lock(&threads_lock);
263
    btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL);
264
    btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL);
264
    spinlock_unlock(&threads_lock);
265
    spinlock_unlock(&threads_lock);
265
 
266
 
266
    /*
267
    /*
267
     * Detach from the containing task.
268
     * Detach from the containing task.
268
     */
269
     */
269
    spinlock_lock(&t->task->lock);
270
    spinlock_lock(&t->task->lock);
270
    list_remove(&t->th_link);
271
    list_remove(&t->th_link);
271
    if (--t->task->refcount == 0) {
272
    if (--t->task->refcount == 0) {
272
        t->task->accept_new_threads = false;
273
        t->task->accept_new_threads = false;
273
        destroy_task = true;
274
        destroy_task = true;
274
    }
275
    }
275
    spinlock_unlock(&t->task->lock);   
276
    spinlock_unlock(&t->task->lock);   
276
   
277
   
277
    if (destroy_task)
278
    if (destroy_task)
278
        task_destroy(t->task);
279
        task_destroy(t->task);
279
   
280
   
280
    slab_free(thread_slab, t);
281
    slab_free(thread_slab, t);
281
}
282
}
282
 
283
 
283
/** Create new thread
284
/** Create new thread
284
 *
285
 *
285
 * Create a new thread.
286
 * Create a new thread.
286
 *
287
 *
287
 * @param func  Thread's implementing function.
288
 * @param func  Thread's implementing function.
288
 * @param arg   Thread's implementing function argument.
289
 * @param arg   Thread's implementing function argument.
289
 * @param task  Task to which the thread belongs.
290
 * @param task  Task to which the thread belongs.
290
 * @param flags Thread flags.
291
 * @param flags Thread flags.
291
 * @param name  Symbolic name.
292
 * @param name  Symbolic name.
292
 *
293
 *
293
 * @return New thread's structure on success, NULL on failure.
294
 * @return New thread's structure on success, NULL on failure.
294
 *
295
 *
295
 */
296
 */
296
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name)
297
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name)
297
{
298
{
298
    thread_t *t;
299
    thread_t *t;
299
    ipl_t ipl;
300
    ipl_t ipl;
300
   
301
   
301
    t = (thread_t *) slab_alloc(thread_slab, 0);
302
    t = (thread_t *) slab_alloc(thread_slab, 0);
302
    if (!t)
303
    if (!t)
303
        return NULL;
304
        return NULL;
304
   
305
   
305
    /* Not needed, but good for debugging */
306
    /* Not needed, but good for debugging */
306
    memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
307
    memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
307
   
308
   
308
    ipl = interrupts_disable();
309
    ipl = interrupts_disable();
309
    spinlock_lock(&tidlock);
310
    spinlock_lock(&tidlock);
310
    t->tid = ++last_tid;
311
    t->tid = ++last_tid;
311
    spinlock_unlock(&tidlock);
312
    spinlock_unlock(&tidlock);
312
    interrupts_restore(ipl);
313
    interrupts_restore(ipl);
313
   
314
   
314
    context_save(&t->saved_context);
315
    context_save(&t->saved_context);
315
    context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE);
316
    context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE);
316
   
317
   
317
    the_initialize((the_t *) t->kstack);
318
    the_initialize((the_t *) t->kstack);
318
   
319
   
319
    ipl = interrupts_disable();
320
    ipl = interrupts_disable();
320
    t->saved_context.ipl = interrupts_read();
321
    t->saved_context.ipl = interrupts_read();
321
    interrupts_restore(ipl);
322
    interrupts_restore(ipl);
322
   
323
   
323
    memcpy(t->name, name, THREAD_NAME_BUFLEN);
324
    memcpy(t->name, name, THREAD_NAME_BUFLEN);
324
   
325
   
325
    t->thread_code = func;
326
    t->thread_code = func;
326
    t->thread_arg = arg;
327
    t->thread_arg = arg;
327
    t->ticks = -1;
328
    t->ticks = -1;
-
 
329
    t->cycles = 0;
328
    t->priority = -1;       /* start in rq[0] */
330
    t->priority = -1;       /* start in rq[0] */
329
    t->cpu = NULL;
331
    t->cpu = NULL;
330
    t->flags = flags;
332
    t->flags = flags;
331
    t->state = Entering;
333
    t->state = Entering;
332
    t->call_me = NULL;
334
    t->call_me = NULL;
333
    t->call_me_with = NULL;
335
    t->call_me_with = NULL;
334
   
336
   
335
    timeout_initialize(&t->sleep_timeout);
337
    timeout_initialize(&t->sleep_timeout);
336
    t->sleep_interruptible = false;
338
    t->sleep_interruptible = false;
337
    t->sleep_queue = NULL;
339
    t->sleep_queue = NULL;
338
    t->timeout_pending = 0;
340
    t->timeout_pending = 0;
339
 
341
 
340
    t->in_copy_from_uspace = false;
342
    t->in_copy_from_uspace = false;
341
    t->in_copy_to_uspace = false;
343
    t->in_copy_to_uspace = false;
342
 
344
 
343
    t->interrupted = false;
345
    t->interrupted = false;
344
    t->join_type = None;
346
    t->join_type = None;
345
    t->detached = false;
347
    t->detached = false;
346
    waitq_initialize(&t->join_wq);
348
    waitq_initialize(&t->join_wq);
347
   
349
   
348
    t->rwlock_holder_type = RWLOCK_NONE;
350
    t->rwlock_holder_type = RWLOCK_NONE;
349
       
351
       
350
    t->task = task;
352
    t->task = task;
351
   
353
   
352
    t->fpu_context_exists = 0;
354
    t->fpu_context_exists = 0;
353
    t->fpu_context_engaged = 0;
355
    t->fpu_context_engaged = 0;
354
 
356
 
355
    thread_create_arch(t);      /* might depend on previous initialization */
357
    thread_create_arch(t);      /* might depend on previous initialization */
356
   
358
   
357
    /*
359
    /*
358
     * Attach to the containing task.
360
     * Attach to the containing task.
359
     */
361
     */
360
    ipl = interrupts_disable();  
362
    ipl = interrupts_disable();  
361
    spinlock_lock(&task->lock);
363
    spinlock_lock(&task->lock);
362
    if (!task->accept_new_threads) {
364
    if (!task->accept_new_threads) {
363
        spinlock_unlock(&task->lock);
365
        spinlock_unlock(&task->lock);
364
        slab_free(thread_slab, t);
366
        slab_free(thread_slab, t);
365
        interrupts_restore(ipl);
367
        interrupts_restore(ipl);
366
        return NULL;
368
        return NULL;
367
    }
369
    }
368
    list_append(&t->th_link, &task->th_head);
370
    list_append(&t->th_link, &task->th_head);
369
    if (task->refcount++ == 0)
371
    if (task->refcount++ == 0)
370
        task->main_thread = t;
372
        task->main_thread = t;
371
    spinlock_unlock(&task->lock);
373
    spinlock_unlock(&task->lock);
372
 
374
 
373
    /*
375
    /*
374
     * Register this thread in the system-wide list.
376
     * Register this thread in the system-wide list.
375
     */
377
     */
376
    spinlock_lock(&threads_lock);
378
    spinlock_lock(&threads_lock);
377
    btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL);
379
    btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL);
378
    spinlock_unlock(&threads_lock);
380
    spinlock_unlock(&threads_lock);
379
   
381
   
380
    interrupts_restore(ipl);
382
    interrupts_restore(ipl);
381
   
383
   
382
    return t;
384
    return t;
383
}
385
}
384
 
386
 
385
/** Terminate thread.
387
/** Terminate thread.
386
 *
388
 *
387
 * End current thread execution and switch it to the exiting
389
 * End current thread execution and switch it to the exiting
388
 * state. All pending timeouts are executed.
390
 * state. All pending timeouts are executed.
389
 *
391
 *
390
 */
392
 */
391
void thread_exit(void)
393
void thread_exit(void)
392
{
394
{
393
    ipl_t ipl;
395
    ipl_t ipl;
394
 
396
 
395
restart:
397
restart:
396
    ipl = interrupts_disable();
398
    ipl = interrupts_disable();
397
    spinlock_lock(&THREAD->lock);
399
    spinlock_lock(&THREAD->lock);
398
    if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
400
    if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
399
        spinlock_unlock(&THREAD->lock);
401
        spinlock_unlock(&THREAD->lock);
400
        interrupts_restore(ipl);
402
        interrupts_restore(ipl);
401
        goto restart;
403
        goto restart;
402
    }
404
    }
403
    THREAD->state = Exiting;
405
    THREAD->state = Exiting;
404
    spinlock_unlock(&THREAD->lock);
406
    spinlock_unlock(&THREAD->lock);
405
    scheduler();
407
    scheduler();
406
 
408
 
407
    /* Not reached */
409
    /* Not reached */
408
    while (1)
410
    while (1)
409
        ;
411
        ;
410
}
412
}
411
 
413
 
412
 
414
 
413
/** Thread sleep
415
/** Thread sleep
414
 *
416
 *
415
 * Suspend execution of the current thread.
417
 * Suspend execution of the current thread.
416
 *
418
 *
417
 * @param sec Number of seconds to sleep.
419
 * @param sec Number of seconds to sleep.
418
 *
420
 *
419
 */
421
 */
420
void thread_sleep(uint32_t sec)
422
void thread_sleep(uint32_t sec)
421
{
423
{
422
    thread_usleep(sec*1000000);
424
    thread_usleep(sec*1000000);
423
}
425
}
424
 
426
 
425
/** Wait for another thread to exit.
427
/** Wait for another thread to exit.
426
 *
428
 *
427
 * @param t Thread to join on exit.
429
 * @param t Thread to join on exit.
428
 * @param usec Timeout in microseconds.
430
 * @param usec Timeout in microseconds.
429
 * @param flags Mode of operation.
431
 * @param flags Mode of operation.
430
 *
432
 *
431
 * @return An error code from errno.h or an error code from synch.h.
433
 * @return An error code from errno.h or an error code from synch.h.
432
 */
434
 */
433
int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
435
int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
434
{
436
{
435
    ipl_t ipl;
437
    ipl_t ipl;
436
    int rc;
438
    int rc;
437
 
439
 
438
    if (t == THREAD)
440
    if (t == THREAD)
439
        return EINVAL;
441
        return EINVAL;
440
 
442
 
441
    /*
443
    /*
442
     * Since thread join can only be called once on an undetached thread,
444
     * Since thread join can only be called once on an undetached thread,
443
     * the thread pointer is guaranteed to be still valid.
445
     * the thread pointer is guaranteed to be still valid.
444
     */
446
     */
445
   
447
   
446
    ipl = interrupts_disable();
448
    ipl = interrupts_disable();
447
    spinlock_lock(&t->lock);
449
    spinlock_lock(&t->lock);
448
    ASSERT(!t->detached);
450
    ASSERT(!t->detached);
449
    spinlock_unlock(&t->lock);
451
    spinlock_unlock(&t->lock);
450
    interrupts_restore(ipl);
452
    interrupts_restore(ipl);
451
   
453
   
452
    rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
454
    rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
453
   
455
   
454
    return rc; 
456
    return rc; 
455
}
457
}
456
 
458
 
457
/** Detach thread.
459
/** Detach thread.
458
 *
460
 *
459
 * Mark the thread as detached, if the thread is already in the Undead state,
461
 * Mark the thread as detached, if the thread is already in the Undead state,
460
 * deallocate its resources.
462
 * deallocate its resources.
461
 *
463
 *
462
 * @param t Thread to be detached.
464
 * @param t Thread to be detached.
463
 */
465
 */
464
void thread_detach(thread_t *t)
466
void thread_detach(thread_t *t)
465
{
467
{
466
    ipl_t ipl;
468
    ipl_t ipl;
467
 
469
 
468
    /*
470
    /*
469
     * Since the thread is expected to not be already detached,
471
     * Since the thread is expected to not be already detached,
470
     * pointer to it must be still valid.
472
     * pointer to it must be still valid.
471
     */
473
     */
472
    ipl = interrupts_disable();
474
    ipl = interrupts_disable();
473
    spinlock_lock(&t->lock);
475
    spinlock_lock(&t->lock);
474
    ASSERT(!t->detached);
476
    ASSERT(!t->detached);
475
    if (t->state == Undead) {
477
    if (t->state == Undead) {
476
        thread_destroy(t);  /* unlocks &t->lock */
478
        thread_destroy(t);  /* unlocks &t->lock */
477
        interrupts_restore(ipl);
479
        interrupts_restore(ipl);
478
        return;
480
        return;
479
    } else {
481
    } else {
480
        t->detached = true;
482
        t->detached = true;
481
    }
483
    }
482
    spinlock_unlock(&t->lock);
484
    spinlock_unlock(&t->lock);
483
    interrupts_restore(ipl);
485
    interrupts_restore(ipl);
484
}
486
}
485
 
487
 
486
/** Thread usleep
488
/** Thread usleep
487
 *
489
 *
488
 * Suspend execution of the current thread.
490
 * Suspend execution of the current thread.
489
 *
491
 *
490
 * @param usec Number of microseconds to sleep.
492
 * @param usec Number of microseconds to sleep.
491
 *
493
 *
492
 */
494
 */
493
void thread_usleep(uint32_t usec)
495
void thread_usleep(uint32_t usec)
494
{
496
{
495
    waitq_t wq;
497
    waitq_t wq;
496
                 
498
                 
497
    waitq_initialize(&wq);
499
    waitq_initialize(&wq);
498
 
500
 
499
    (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
501
    (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
500
}
502
}
501
 
503
 
502
/** Register thread out-of-context invocation
504
/** Register thread out-of-context invocation
503
 *
505
 *
504
 * Register a function and its argument to be executed
506
 * Register a function and its argument to be executed
505
 * on next context switch to the current thread.
507
 * on next context switch to the current thread.
506
 *
508
 *
507
 * @param call_me      Out-of-context function.
509
 * @param call_me      Out-of-context function.
508
 * @param call_me_with Out-of-context function argument.
510
 * @param call_me_with Out-of-context function argument.
509
 *
511
 *
510
 */
512
 */
511
void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
513
void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
512
{
514
{
513
    ipl_t ipl;
515
    ipl_t ipl;
514
   
516
   
515
    ipl = interrupts_disable();
517
    ipl = interrupts_disable();
516
    spinlock_lock(&THREAD->lock);
518
    spinlock_lock(&THREAD->lock);
517
    THREAD->call_me = call_me;
519
    THREAD->call_me = call_me;
518
    THREAD->call_me_with = call_me_with;
520
    THREAD->call_me_with = call_me_with;
519
    spinlock_unlock(&THREAD->lock);
521
    spinlock_unlock(&THREAD->lock);
520
    interrupts_restore(ipl);
522
    interrupts_restore(ipl);
521
}
523
}
522
 
524
 
523
/** Print list of threads debug info */
525
/** Print list of threads debug info */
524
void thread_print_list(void)
526
void thread_print_list(void)
525
{
527
{
526
    link_t *cur;
528
    link_t *cur;
527
    ipl_t ipl;
529
    ipl_t ipl;
528
   
530
   
529
    /* Messing with thread structures, avoid deadlock */
531
    /* Messing with thread structures, avoid deadlock */
530
    ipl = interrupts_disable();
532
    ipl = interrupts_disable();
531
    spinlock_lock(&threads_lock);
533
    spinlock_lock(&threads_lock);
-
 
534
   
-
 
535
    printf("tid    name       address    state    task       ctx code       stack      cycles     cpu  kst        wq\n");
-
 
536
    printf("------ ---------- ---------- -------- ---------- --- ---------- ---------- ---------- ---- ---------- ----------\n");
532
 
537
 
533
    for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) {
538
    for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) {
534
        btree_node_t *node;
539
        btree_node_t *node;
535
        int i;
540
        int i;
536
 
541
 
537
        node = list_get_instance(cur, btree_node_t, leaf_link);
542
        node = list_get_instance(cur, btree_node_t, leaf_link);
538
        for (i = 0; i < node->keys; i++) {
543
        for (i = 0; i < node->keys; i++) {
539
            thread_t *t;
544
            thread_t *t;
540
       
545
       
541
            t = (thread_t *) node->value[i];
546
            t = (thread_t *) node->value[i];
-
 
547
           
-
 
548
            uint64_t cycles;
-
 
549
            char suffix;
-
 
550
           
-
 
551
            if (t->cycles > 1000000000000000000LL) {
-
 
552
                cycles = t->cycles / 1000000000000000000LL;
-
 
553
                suffix = 'E';
542
            printf("%s: address=%#zx, tid=%zd, state=%s, task=%#zx, context=%ld, code=%#zx, stack=%#zx, cpu=",
554
            } else if (t->cycles > 1000000000000LL) {
-
 
555
                cycles = t->cycles / 1000000000000LL;
-
 
556
                suffix = 'T';
-
 
557
            } else if (t->cycles > 1000000LL) {
-
 
558
                cycles = t->cycles / 1000000LL;
-
 
559
                suffix = 'M';
-
 
560
            } else {
-
 
561
                cycles = t->cycles;
-
 
562
                suffix = ' ';
-
 
563
            }
-
 
564
           
543
                t->name, t, t->tid, thread_states[t->state], t->task, t->task->context, t->thread_code, t->kstack);
565
            printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ", t->tid, t->name, t, thread_states[t->state], t->task, t->task->context, t->thread_code, t->kstack, cycles, suffix);
-
 
566
           
544
            if (t->cpu)
567
            if (t->cpu)
545
                printf("cpu%zd", t->cpu->id);
568
                printf("%-4zd", t->cpu->id);
546
            else
569
            else
547
                printf("none");
570
                printf("none");
-
 
571
           
548
            if (t->state == Sleeping) {
572
            if (t->state == Sleeping)
549
                printf(", kst=%#zx", t->kstack);
-
 
550
                printf(", wq=%#zx", t->sleep_queue);
573
                printf(" %#10zx %#10zx", t->kstack, t->sleep_queue);
551
            }
574
           
552
            printf("\n");
575
            printf("\n");
553
        }
576
        }
554
    }
577
    }
555
 
578
 
556
    spinlock_unlock(&threads_lock);
579
    spinlock_unlock(&threads_lock);
557
    interrupts_restore(ipl);
580
    interrupts_restore(ipl);
558
}
581
}
559
 
582
 
560
/** Check whether thread exists.
583
/** Check whether thread exists.
561
 *
584
 *
562
 * Note that threads_lock must be already held and
585
 * Note that threads_lock must be already held and
563
 * interrupts must be already disabled.
586
 * interrupts must be already disabled.
564
 *
587
 *
565
 * @param t Pointer to thread.
588
 * @param t Pointer to thread.
566
 *
589
 *
567
 * @return True if thread t is known to the system, false otherwise.
590
 * @return True if thread t is known to the system, false otherwise.
568
 */
591
 */
569
bool thread_exists(thread_t *t)
592
bool thread_exists(thread_t *t)
570
{
593
{
571
    btree_node_t *leaf;
594
    btree_node_t *leaf;
572
   
595
   
573
    return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), &leaf) != NULL;
596
    return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), &leaf) != NULL;
574
}
597
}
-
 
598
 
-
 
599
 
-
 
600
/** Update accounting of current thread.
-
 
601
 *
-
 
602
 * Note that thread_lock on THREAD must be already held and
-
 
603
 * interrupts must be already disabled.
-
 
604
 *
-
 
605
 * @param t Pointer to thread.
-
 
606
 *
-
 
607
 */
-
 
608
void thread_update_accounting(void)
-
 
609
{
-
 
610
    uint64_t time = get_cycle();
-
 
611
    THREAD->cycles += time - THREAD->last_cycle;
-
 
612
    THREAD->last_cycle = time;
-
 
613
}
575
 
614
 
576
/** Process syscall to create new thread.
615
/** Process syscall to create new thread.
577
 *
616
 *
578
 */
617
 */
579
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name)
618
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name)
580
{
619
{
581
    thread_t *t;
620
    thread_t *t;
582
    char namebuf[THREAD_NAME_BUFLEN];
621
    char namebuf[THREAD_NAME_BUFLEN];
583
    uspace_arg_t *kernel_uarg;
622
    uspace_arg_t *kernel_uarg;
584
    uint32_t tid;
623
    uint32_t tid;
585
    int rc;
624
    int rc;
586
 
625
 
587
    rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
626
    rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
588
    if (rc != 0)
627
    if (rc != 0)
589
        return (unative_t) rc;
628
        return (unative_t) rc;
590
 
629
 
591
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
630
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
592
    rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
631
    rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
593
    if (rc != 0) {
632
    if (rc != 0) {
594
        free(kernel_uarg);
633
        free(kernel_uarg);
595
        return (unative_t) rc;
634
        return (unative_t) rc;
596
    }
635
    }
597
 
636
 
598
    if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) {
637
    if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) {
599
        tid = t->tid;
638
        tid = t->tid;
600
        thread_ready(t);
639
        thread_ready(t);
601
        return (unative_t) tid;
640
        return (unative_t) tid;
602
    } else {
641
    } else {
603
        free(kernel_uarg);
642
        free(kernel_uarg);
604
    }
643
    }
605
 
644
 
606
    return (unative_t) ENOMEM;
645
    return (unative_t) ENOMEM;
607
}
646
}
608
 
647
 
609
/** Process syscall to terminate thread.
648
/** Process syscall to terminate thread.
610
 *
649
 *
611
 */
650
 */
612
unative_t sys_thread_exit(int uspace_status)
651
unative_t sys_thread_exit(int uspace_status)
613
{
652
{
614
    thread_exit();
653
    thread_exit();
615
    /* Unreachable */
654
    /* Unreachable */
616
    return 0;
655
    return 0;
617
}
656
}
618
 
657
 
619
/** @}
658
/** @}
620
 */
659
 */
621
 
660