Subversion Repositories HelenOS

Rev

Rev 2436 | Rev 2446 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2436 Rev 2440
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericproc
29
/** @addtogroup genericproc
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Thread management functions.
35
 * @brief   Thread management functions.
36
 */
36
 */
37
 
37
 
38
#include <proc/scheduler.h>
38
#include <proc/scheduler.h>
39
#include <proc/thread.h>
39
#include <proc/thread.h>
40
#include <proc/task.h>
40
#include <proc/task.h>
41
#include <proc/uarg.h>
41
#include <proc/uarg.h>
42
#include <mm/frame.h>
42
#include <mm/frame.h>
43
#include <mm/page.h>
43
#include <mm/page.h>
44
#include <arch/asm.h>
44
#include <arch/asm.h>
45
#include <arch/cycle.h>
45
#include <arch/cycle.h>
46
#include <arch.h>
46
#include <arch.h>
47
#include <synch/synch.h>
47
#include <synch/synch.h>
48
#include <synch/spinlock.h>
48
#include <synch/spinlock.h>
49
#include <synch/waitq.h>
49
#include <synch/waitq.h>
50
#include <synch/rwlock.h>
50
#include <synch/rwlock.h>
51
#include <cpu.h>
51
#include <cpu.h>
52
#include <func.h>
52
#include <func.h>
53
#include <context.h>
53
#include <context.h>
54
#include <adt/btree.h>
54
#include <adt/btree.h>
55
#include <adt/list.h>
55
#include <adt/list.h>
56
#include <time/clock.h>
56
#include <time/clock.h>
57
#include <time/timeout.h>
57
#include <time/timeout.h>
58
#include <config.h>
58
#include <config.h>
59
#include <arch/interrupt.h>
59
#include <arch/interrupt.h>
60
#include <smp/ipi.h>
60
#include <smp/ipi.h>
61
#include <arch/faddr.h>
61
#include <arch/faddr.h>
62
#include <atomic.h>
62
#include <atomic.h>
63
#include <memstr.h>
63
#include <memstr.h>
64
#include <print.h>
64
#include <print.h>
65
#include <mm/slab.h>
65
#include <mm/slab.h>
66
#include <debug.h>
66
#include <debug.h>
67
#include <main/uinit.h>
67
#include <main/uinit.h>
68
#include <syscall/copy.h>
68
#include <syscall/copy.h>
69
#include <errno.h>
69
#include <errno.h>
70
 
70
 
71
 
71
 
72
/** Thread states */
72
/** Thread states */
73
char *thread_states[] = {
73
char *thread_states[] = {
74
    "Invalid",
74
    "Invalid",
75
    "Running",
75
    "Running",
76
    "Sleeping",
76
    "Sleeping",
77
    "Ready",
77
    "Ready",
78
    "Entering",
78
    "Entering",
79
    "Exiting",
79
    "Exiting",
80
    "Undead"
80
    "Undead"
81
};
81
};
82
 
82
 
83
/** Lock protecting the threads_btree B+tree.
83
/** Lock protecting the threads_btree B+tree.
84
 *
84
 *
85
 * For locking rules, see declaration thereof.
85
 * For locking rules, see declaration thereof.
86
 */
86
 */
87
SPINLOCK_INITIALIZE(threads_lock);
87
SPINLOCK_INITIALIZE(threads_lock);
88
 
88
 
89
/** B+tree of all threads.
89
/** B+tree of all threads.
90
 *
90
 *
91
 * When a thread is found in the threads_btree B+tree, it is guaranteed to
91
 * When a thread is found in the threads_btree B+tree, it is guaranteed to
92
 * exist as long as the threads_lock is held.
92
 * exist as long as the threads_lock is held.
93
 */
93
 */
94
btree_t threads_btree;     
94
btree_t threads_btree;     
95
 
95
 
96
SPINLOCK_INITIALIZE(tidlock);
96
SPINLOCK_INITIALIZE(tidlock);
97
thread_id_t last_tid = 0;
97
thread_id_t last_tid = 0;
98
 
98
 
99
static slab_cache_t *thread_slab;
99
static slab_cache_t *thread_slab;
100
#ifdef ARCH_HAS_FPU
100
#ifdef ARCH_HAS_FPU
101
slab_cache_t *fpu_context_slab;
101
slab_cache_t *fpu_context_slab;
102
#endif
102
#endif
103
 
103
 
104
/** Thread wrapper.
104
/** Thread wrapper.
105
 *
105
 *
106
 * This wrapper is provided to ensure that every thread makes a call to
106
 * This wrapper is provided to ensure that every thread makes a call to
107
 * thread_exit() when its implementing function returns.
107
 * thread_exit() when its implementing function returns.
108
 *
108
 *
109
 * interrupts_disable() is assumed.
109
 * interrupts_disable() is assumed.
110
 *
110
 *
111
 */
111
 */
112
static void cushion(void)
112
static void cushion(void)
113
{
113
{
114
    void (*f)(void *) = THREAD->thread_code;
114
    void (*f)(void *) = THREAD->thread_code;
115
    void *arg = THREAD->thread_arg;
115
    void *arg = THREAD->thread_arg;
116
    THREAD->last_cycle = get_cycle();
116
    THREAD->last_cycle = get_cycle();
117
 
117
 
118
    /* This is where each thread wakes up after its creation */
118
    /* This is where each thread wakes up after its creation */
119
    spinlock_unlock(&THREAD->lock);
119
    spinlock_unlock(&THREAD->lock);
120
    interrupts_enable();
120
    interrupts_enable();
121
 
121
 
122
    f(arg);
122
    f(arg);
123
   
123
   
124
    /* Accumulate accounting to the task */
124
    /* Accumulate accounting to the task */
125
    ipl_t ipl = interrupts_disable();
125
    ipl_t ipl = interrupts_disable();
126
   
126
   
127
    spinlock_lock(&THREAD->lock);
127
    spinlock_lock(&THREAD->lock);
128
    if (!THREAD->uncounted) {
128
    if (!THREAD->uncounted) {
129
        thread_update_accounting();
129
        thread_update_accounting();
130
        uint64_t cycles = THREAD->cycles;
130
        uint64_t cycles = THREAD->cycles;
131
        THREAD->cycles = 0;
131
        THREAD->cycles = 0;
132
        spinlock_unlock(&THREAD->lock);
132
        spinlock_unlock(&THREAD->lock);
133
       
133
       
134
        spinlock_lock(&TASK->lock);
134
        spinlock_lock(&TASK->lock);
135
        TASK->cycles += cycles;
135
        TASK->cycles += cycles;
136
        spinlock_unlock(&TASK->lock);
136
        spinlock_unlock(&TASK->lock);
137
    } else
137
    } else
138
        spinlock_unlock(&THREAD->lock);
138
        spinlock_unlock(&THREAD->lock);
139
   
139
   
140
    interrupts_restore(ipl);
140
    interrupts_restore(ipl);
141
   
141
   
142
    thread_exit();
142
    thread_exit();
143
    /* not reached */
143
    /* not reached */
144
}
144
}
145
 
145
 
146
/** Initialization and allocation for thread_t structure */
146
/** Initialization and allocation for thread_t structure */
147
static int thr_constructor(void *obj, int kmflags)
147
static int thr_constructor(void *obj, int kmflags)
148
{
148
{
149
    thread_t *t = (thread_t *) obj;
149
    thread_t *t = (thread_t *) obj;
150
 
150
 
151
    spinlock_initialize(&t->lock, "thread_t_lock");
151
    spinlock_initialize(&t->lock, "thread_t_lock");
152
    link_initialize(&t->rq_link);
152
    link_initialize(&t->rq_link);
153
    link_initialize(&t->wq_link);
153
    link_initialize(&t->wq_link);
154
    link_initialize(&t->th_link);
154
    link_initialize(&t->th_link);
155
 
155
 
156
    /* call the architecture-specific part of the constructor */
156
    /* call the architecture-specific part of the constructor */
157
    thr_constructor_arch(t);
157
    thr_constructor_arch(t);
158
   
158
   
159
#ifdef ARCH_HAS_FPU
159
#ifdef ARCH_HAS_FPU
160
#  ifdef CONFIG_FPU_LAZY
160
#ifdef CONFIG_FPU_LAZY
161
    t->saved_fpu_context = NULL;
161
    t->saved_fpu_context = NULL;
162
#  else
162
#else
163
    t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags);
163
    t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
164
    if (!t->saved_fpu_context)
164
    if (!t->saved_fpu_context)
165
        return -1;
165
        return -1;
166
#  endif
166
#endif
167
#endif  
167
#endif  
168
 
168
 
169
    t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
169
    t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
170
    if (! t->kstack) {
170
    if (!t->kstack) {
171
#ifdef ARCH_HAS_FPU
171
#ifdef ARCH_HAS_FPU
172
        if (t->saved_fpu_context)
172
        if (t->saved_fpu_context)
173
            slab_free(fpu_context_slab,t->saved_fpu_context);
173
            slab_free(fpu_context_slab, t->saved_fpu_context);
174
#endif
174
#endif
175
        return -1;
175
        return -1;
176
    }
176
    }
177
 
177
 
178
    return 0;
178
    return 0;
179
}
179
}
180
 
180
 
181
/** Destruction of thread_t object */
181
/** Destruction of thread_t object */
182
static int thr_destructor(void *obj)
182
static int thr_destructor(void *obj)
183
{
183
{
184
    thread_t *t = (thread_t *) obj;
184
    thread_t *t = (thread_t *) obj;
185
 
185
 
186
    /* call the architecture-specific part of the destructor */
186
    /* call the architecture-specific part of the destructor */
187
    thr_destructor_arch(t);
187
    thr_destructor_arch(t);
188
 
188
 
189
    frame_free(KA2PA(t->kstack));
189
    frame_free(KA2PA(t->kstack));
190
#ifdef ARCH_HAS_FPU
190
#ifdef ARCH_HAS_FPU
191
    if (t->saved_fpu_context)
191
    if (t->saved_fpu_context)
192
        slab_free(fpu_context_slab,t->saved_fpu_context);
192
        slab_free(fpu_context_slab, t->saved_fpu_context);
193
#endif
193
#endif
194
    return 1; /* One page freed */
194
    return 1; /* One page freed */
195
}
195
}
196
 
196
 
197
/** Initialize threads
197
/** Initialize threads
198
 *
198
 *
199
 * Initialize kernel threads support.
199
 * Initialize kernel threads support.
200
 *
200
 *
201
 */
201
 */
202
void thread_init(void)
202
void thread_init(void)
203
{
203
{
204
    THREAD = NULL;
204
    THREAD = NULL;
205
    atomic_set(&nrdy,0);
205
    atomic_set(&nrdy,0);
206
    thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
206
    thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
207
        thr_constructor, thr_destructor, 0);
207
        thr_constructor, thr_destructor, 0);
208
 
208
 
209
#ifdef ARCH_HAS_FPU
209
#ifdef ARCH_HAS_FPU
210
    fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
210
    fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
211
        FPU_CONTEXT_ALIGN, NULL, NULL, 0);
211
        FPU_CONTEXT_ALIGN, NULL, NULL, 0);
212
#endif
212
#endif
213
 
213
 
214
    btree_create(&threads_btree);
214
    btree_create(&threads_btree);
215
}
215
}
216
 
216
 
217
/** Make thread ready
217
/** Make thread ready
218
 *
218
 *
219
 * Switch thread t to the ready state.
219
 * Switch thread t to the ready state.
220
 *
220
 *
221
 * @param t Thread to make ready.
221
 * @param t Thread to make ready.
222
 *
222
 *
223
 */
223
 */
224
void thread_ready(thread_t *t)
224
void thread_ready(thread_t *t)
225
{
225
{
226
    cpu_t *cpu;
226
    cpu_t *cpu;
227
    runq_t *r;
227
    runq_t *r;
228
    ipl_t ipl;
228
    ipl_t ipl;
229
    int i, avg;
229
    int i, avg;
230
 
230
 
231
    ipl = interrupts_disable();
231
    ipl = interrupts_disable();
232
 
232
 
233
    spinlock_lock(&t->lock);
233
    spinlock_lock(&t->lock);
234
 
234
 
235
    ASSERT(! (t->state == Ready));
235
    ASSERT(!(t->state == Ready));
236
 
236
 
237
    i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
237
    i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
238
   
238
   
239
    cpu = CPU;
239
    cpu = CPU;
240
    if (t->flags & THREAD_FLAG_WIRED) {
240
    if (t->flags & THREAD_FLAG_WIRED) {
241
        ASSERT(t->cpu != NULL);
241
        ASSERT(t->cpu != NULL);
242
        cpu = t->cpu;
242
        cpu = t->cpu;
243
    }
243
    }
244
    t->state = Ready;
244
    t->state = Ready;
245
    spinlock_unlock(&t->lock);
245
    spinlock_unlock(&t->lock);
246
   
246
   
247
    /*
247
    /*
248
     * Append t to respective ready queue on respective processor.
248
     * Append t to respective ready queue on respective processor.
249
     */
249
     */
250
    r = &cpu->rq[i];
250
    r = &cpu->rq[i];
251
    spinlock_lock(&r->lock);
251
    spinlock_lock(&r->lock);
252
    list_append(&t->rq_link, &r->rq_head);
252
    list_append(&t->rq_link, &r->rq_head);
253
    r->n++;
253
    r->n++;
254
    spinlock_unlock(&r->lock);
254
    spinlock_unlock(&r->lock);
255
 
255
 
256
    atomic_inc(&nrdy);
256
    atomic_inc(&nrdy);
257
    avg = atomic_get(&nrdy) / config.cpu_active;
257
    avg = atomic_get(&nrdy) / config.cpu_active;
258
    atomic_inc(&cpu->nrdy);
258
    atomic_inc(&cpu->nrdy);
259
 
259
 
260
    interrupts_restore(ipl);
260
    interrupts_restore(ipl);
261
}
261
}
262
 
262
 
263
/** Destroy thread memory structure
-
 
264
 *
-
 
265
 * Detach thread from all queues, cpus etc. and destroy it.
-
 
266
 *
-
 
267
 * Assume thread->lock is held!!
-
 
268
 */
-
 
269
void thread_destroy(thread_t *t)
-
 
270
{
-
 
271
    bool destroy_task = false;
-
 
272
 
-
 
273
    ASSERT(t->state == Exiting || t->state == Undead);
-
 
274
    ASSERT(t->task);
-
 
275
    ASSERT(t->cpu);
-
 
276
 
-
 
277
    spinlock_lock(&t->cpu->lock);
-
 
278
    if(t->cpu->fpu_owner == t)
-
 
279
        t->cpu->fpu_owner = NULL;
-
 
280
    spinlock_unlock(&t->cpu->lock);
-
 
281
 
-
 
282
    spinlock_unlock(&t->lock);
-
 
283
 
-
 
284
    spinlock_lock(&threads_lock);
-
 
285
    btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL);
-
 
286
    spinlock_unlock(&threads_lock);
-
 
287
 
-
 
288
    /*
-
 
289
     * Detach from the containing task.
-
 
290
     */
-
 
291
    spinlock_lock(&t->task->lock);
-
 
292
    list_remove(&t->th_link);
-
 
293
    if (--t->task->refcount == 0) {
-
 
294
        t->task->accept_new_threads = false;
-
 
295
        destroy_task = true;
-
 
296
    }
-
 
297
    spinlock_unlock(&t->task->lock);   
-
 
298
   
-
 
299
    if (destroy_task)
-
 
300
        task_destroy(t->task);
-
 
301
   
-
 
302
    /*
-
 
303
     * If the thread had a userspace context, free up its kernel_uarg
-
 
304
     * structure.
-
 
305
     */
-
 
306
    if (t->flags & THREAD_FLAG_USPACE) {
-
 
307
        ASSERT(t->thread_arg);
-
 
308
        free(t->thread_arg);
-
 
309
    }
-
 
310
 
-
 
311
    slab_free(thread_slab, t);
-
 
312
}
-
 
313
 
-
 
314
/** Create new thread
263
/** Create new thread
315
 *
264
 *
316
 * Create a new thread.
265
 * Create a new thread.
317
 *
266
 *
318
 * @param func      Thread's implementing function.
267
 * @param func      Thread's implementing function.
319
 * @param arg       Thread's implementing function argument.
268
 * @param arg       Thread's implementing function argument.
320
 * @param task      Task to which the thread belongs.
269
 * @param task      Task to which the thread belongs.
321
 * @param flags     Thread flags.
270
 * @param flags     Thread flags.
322
 * @param name      Symbolic name.
271
 * @param name      Symbolic name.
323
 * @param uncounted Thread's accounting doesn't affect accumulated task
272
 * @param uncounted Thread's accounting doesn't affect accumulated task
324
 *          accounting.
273
 *          accounting.
325
 *
274
 *
326
 * @return New thread's structure on success, NULL on failure.
275
 * @return New thread's structure on success, NULL on failure.
327
 *
276
 *
328
 */
277
 */
329
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
278
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
330
    int flags, char *name, bool uncounted)
279
    int flags, char *name, bool uncounted)
331
{
280
{
332
    thread_t *t;
281
    thread_t *t;
333
    ipl_t ipl;
282
    ipl_t ipl;
334
   
283
   
335
    t = (thread_t *) slab_alloc(thread_slab, 0);
284
    t = (thread_t *) slab_alloc(thread_slab, 0);
336
    if (!t)
285
    if (!t)
337
        return NULL;
286
        return NULL;
338
   
287
   
339
    /* Not needed, but good for debugging */
288
    /* Not needed, but good for debugging */
340
    memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES,
289
    memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES,
341
        0);
290
        0);
342
   
291
   
343
    ipl = interrupts_disable();
292
    ipl = interrupts_disable();
344
    spinlock_lock(&tidlock);
293
    spinlock_lock(&tidlock);
345
    t->tid = ++last_tid;
294
    t->tid = ++last_tid;
346
    spinlock_unlock(&tidlock);
295
    spinlock_unlock(&tidlock);
347
    interrupts_restore(ipl);
296
    interrupts_restore(ipl);
348
   
297
   
349
    context_save(&t->saved_context);
298
    context_save(&t->saved_context);
350
    context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
299
    context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
351
        THREAD_STACK_SIZE);
300
        THREAD_STACK_SIZE);
352
   
301
   
353
    the_initialize((the_t *) t->kstack);
302
    the_initialize((the_t *) t->kstack);
354
   
303
   
355
    ipl = interrupts_disable();
304
    ipl = interrupts_disable();
356
    t->saved_context.ipl = interrupts_read();
305
    t->saved_context.ipl = interrupts_read();
357
    interrupts_restore(ipl);
306
    interrupts_restore(ipl);
358
   
307
   
359
    memcpy(t->name, name, THREAD_NAME_BUFLEN);
308
    memcpy(t->name, name, THREAD_NAME_BUFLEN);
360
   
309
   
361
    t->thread_code = func;
310
    t->thread_code = func;
362
    t->thread_arg = arg;
311
    t->thread_arg = arg;
363
    t->ticks = -1;
312
    t->ticks = -1;
364
    t->cycles = 0;
313
    t->cycles = 0;
365
    t->uncounted = uncounted;
314
    t->uncounted = uncounted;
366
    t->priority = -1;       /* start in rq[0] */
315
    t->priority = -1;       /* start in rq[0] */
367
    t->cpu = NULL;
316
    t->cpu = NULL;
368
    t->flags = flags;
317
    t->flags = flags;
369
    t->state = Entering;
318
    t->state = Entering;
370
    t->call_me = NULL;
319
    t->call_me = NULL;
371
    t->call_me_with = NULL;
320
    t->call_me_with = NULL;
372
   
321
   
373
    timeout_initialize(&t->sleep_timeout);
322
    timeout_initialize(&t->sleep_timeout);
374
    t->sleep_interruptible = false;
323
    t->sleep_interruptible = false;
375
    t->sleep_queue = NULL;
324
    t->sleep_queue = NULL;
376
    t->timeout_pending = 0;
325
    t->timeout_pending = 0;
377
 
326
 
378
    t->in_copy_from_uspace = false;
327
    t->in_copy_from_uspace = false;
379
    t->in_copy_to_uspace = false;
328
    t->in_copy_to_uspace = false;
380
 
329
 
381
    t->interrupted = false;
330
    t->interrupted = false;
382
    t->join_type = None;
331
    t->join_type = None;
383
    t->detached = false;
332
    t->detached = false;
384
    waitq_initialize(&t->join_wq);
333
    waitq_initialize(&t->join_wq);
385
   
334
   
386
    t->rwlock_holder_type = RWLOCK_NONE;
335
    t->rwlock_holder_type = RWLOCK_NONE;
387
       
336
       
388
    t->task = task;
337
    t->task = task;
389
   
338
   
390
    t->fpu_context_exists = 0;
339
    t->fpu_context_exists = 0;
391
    t->fpu_context_engaged = 0;
340
    t->fpu_context_engaged = 0;
392
 
341
 
393
    /* might depend on previous initialization */
342
    /* might depend on previous initialization */
394
    thread_create_arch(t); 
343
    thread_create_arch(t); 
395
   
344
 
396
    /*
-
 
397
     * Attach to the containing task.
-
 
398
     */
-
 
399
    ipl = interrupts_disable();  
345
    ipl = interrupts_disable();  
400
    spinlock_lock(&task->lock);
346
    spinlock_lock(&task->lock);
401
    if (!task->accept_new_threads) {
347
    if (!task->accept_new_threads) {
402
        spinlock_unlock(&task->lock);
348
        spinlock_unlock(&task->lock);
403
        slab_free(thread_slab, t);
349
        slab_free(thread_slab, t);
404
        interrupts_restore(ipl);
350
        interrupts_restore(ipl);
405
        return NULL;
351
        return NULL;
-
 
352
    } else {
-
 
353
        /*
-
 
354
         * Bump the reference count so that this task cannot be
-
 
355
         * destroyed while the new thread is being attached to it.
-
 
356
         */
-
 
357
        task->refcount++;
406
    }
358
    }
-
 
359
    spinlock_unlock(&task->lock);
-
 
360
    interrupts_restore(ipl);
-
 
361
 
-
 
362
    if (!(flags & THREAD_FLAG_NOATTACH))
-
 
363
        thread_attach(t, task);
-
 
364
 
-
 
365
    return t;
-
 
366
}
-
 
367
 
-
 
368
/** Destroy thread memory structure
-
 
369
 *
-
 
370
 * Detach thread from all queues, cpus etc. and destroy it.
-
 
371
 *
-
 
372
 * Assume thread->lock is held!!
-
 
373
 */
-
 
374
void thread_destroy(thread_t *t)
-
 
375
{
-
 
376
    bool destroy_task = false;
-
 
377
 
-
 
378
    ASSERT(t->state == Exiting || t->state == Undead);
-
 
379
    ASSERT(t->task);
-
 
380
    ASSERT(t->cpu);
-
 
381
 
-
 
382
    spinlock_lock(&t->cpu->lock);
-
 
383
    if (t->cpu->fpu_owner == t)
-
 
384
        t->cpu->fpu_owner = NULL;
-
 
385
    spinlock_unlock(&t->cpu->lock);
-
 
386
 
-
 
387
    spinlock_unlock(&t->lock);
-
 
388
 
-
 
389
    spinlock_lock(&threads_lock);
-
 
390
    btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL);
-
 
391
    spinlock_unlock(&threads_lock);
-
 
392
 
-
 
393
    /*
-
 
394
     * Detach from the containing task.
-
 
395
     */
-
 
396
    spinlock_lock(&t->task->lock);
-
 
397
    list_remove(&t->th_link);
-
 
398
    if (--t->task->refcount == 0) {
-
 
399
        t->task->accept_new_threads = false;
-
 
400
        destroy_task = true;
-
 
401
    }
-
 
402
    spinlock_unlock(&t->task->lock);   
-
 
403
   
-
 
404
    if (destroy_task)
-
 
405
        task_destroy(t->task);
-
 
406
   
-
 
407
    /*
-
 
408
     * If the thread had a userspace context, free up its kernel_uarg
-
 
409
     * structure.
-
 
410
     */
-
 
411
    if (t->flags & THREAD_FLAG_USPACE) {
-
 
412
        ASSERT(t->thread_arg);
-
 
413
        free(t->thread_arg);
-
 
414
    }
-
 
415
 
-
 
416
    slab_free(thread_slab, t);
-
 
417
}
-
 
418
 
-
 
419
/** Make the thread visible to the system.
-
 
420
 *
-
 
421
 * Attach the thread structure to the current task and make it visible in the
-
 
422
 * threads_btree.
-
 
423
 *
-
 
424
 * @param t Thread to be attached to the task.
-
 
425
 * @param task  Task to which the thread is to be attached.
-
 
426
 */
-
 
427
void thread_attach(thread_t *t, task_t *task)
-
 
428
{
-
 
429
    ipl_t ipl;
-
 
430
 
-
 
431
    /*
-
 
432
     * Attach to the current task.
-
 
433
     */
-
 
434
    ipl = interrupts_disable();  
-
 
435
    spinlock_lock(&task->lock);
-
 
436
    ASSERT(task->refcount);
407
    list_append(&t->th_link, &task->th_head);
437
    list_append(&t->th_link, &task->th_head);
408
    if (task->refcount++ == 0)
438
    if (task->refcount == 1)
409
        task->main_thread = t;
439
        task->main_thread = t;
410
    spinlock_unlock(&task->lock);
440
    spinlock_unlock(&task->lock);
411
 
441
 
412
    /*
442
    /*
413
     * Register this thread in the system-wide list.
443
     * Register this thread in the system-wide list.
414
     */
444
     */
415
    spinlock_lock(&threads_lock);
445
    spinlock_lock(&threads_lock);
416
    btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t,
446
    btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t,
417
        NULL);
447
        NULL);
418
    spinlock_unlock(&threads_lock);
448
    spinlock_unlock(&threads_lock);
419
   
449
   
420
    interrupts_restore(ipl);
450
    interrupts_restore(ipl);
421
   
-
 
422
    return t;
-
 
423
}
451
}
424
 
452
 
425
/** Terminate thread.
453
/** Terminate thread.
426
 *
454
 *
427
 * End current thread execution and switch it to the exiting state. All pending
455
 * End current thread execution and switch it to the exiting state. All pending
428
 * timeouts are executed.
456
 * timeouts are executed.
429
 */
457
 */
430
void thread_exit(void)
458
void thread_exit(void)
431
{
459
{
432
    ipl_t ipl;
460
    ipl_t ipl;
433
 
461
 
434
restart:
462
restart:
435
    ipl = interrupts_disable();
463
    ipl = interrupts_disable();
436
    spinlock_lock(&THREAD->lock);
464
    spinlock_lock(&THREAD->lock);
437
    if (THREAD->timeout_pending) {
465
    if (THREAD->timeout_pending) {
438
        /* busy waiting for timeouts in progress */
466
        /* busy waiting for timeouts in progress */
439
        spinlock_unlock(&THREAD->lock);
467
        spinlock_unlock(&THREAD->lock);
440
        interrupts_restore(ipl);
468
        interrupts_restore(ipl);
441
        goto restart;
469
        goto restart;
442
    }
470
    }
443
    THREAD->state = Exiting;
471
    THREAD->state = Exiting;
444
    spinlock_unlock(&THREAD->lock);
472
    spinlock_unlock(&THREAD->lock);
445
    scheduler();
473
    scheduler();
446
 
474
 
447
    /* Not reached */
475
    /* Not reached */
448
    while (1)
476
    while (1)
449
        ;
477
        ;
450
}
478
}
451
 
479
 
452
 
480
 
453
/** Thread sleep
481
/** Thread sleep
454
 *
482
 *
455
 * Suspend execution of the current thread.
483
 * Suspend execution of the current thread.
456
 *
484
 *
457
 * @param sec Number of seconds to sleep.
485
 * @param sec Number of seconds to sleep.
458
 *
486
 *
459
 */
487
 */
460
void thread_sleep(uint32_t sec)
488
void thread_sleep(uint32_t sec)
461
{
489
{
462
    thread_usleep(sec * 1000000);
490
    thread_usleep(sec * 1000000);
463
}
491
}
464
 
492
 
465
/** Wait for another thread to exit.
493
/** Wait for another thread to exit.
466
 *
494
 *
467
 * @param t Thread to join on exit.
495
 * @param t Thread to join on exit.
468
 * @param usec Timeout in microseconds.
496
 * @param usec Timeout in microseconds.
469
 * @param flags Mode of operation.
497
 * @param flags Mode of operation.
470
 *
498
 *
471
 * @return An error code from errno.h or an error code from synch.h.
499
 * @return An error code from errno.h or an error code from synch.h.
472
 */
500
 */
473
int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
501
int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
474
{
502
{
475
    ipl_t ipl;
503
    ipl_t ipl;
476
    int rc;
504
    int rc;
477
 
505
 
478
    if (t == THREAD)
506
    if (t == THREAD)
479
        return EINVAL;
507
        return EINVAL;
480
 
508
 
481
    /*
509
    /*
482
     * Since thread join can only be called once on an undetached thread,
510
     * Since thread join can only be called once on an undetached thread,
483
     * the thread pointer is guaranteed to be still valid.
511
     * the thread pointer is guaranteed to be still valid.
484
     */
512
     */
485
   
513
   
486
    ipl = interrupts_disable();
514
    ipl = interrupts_disable();
487
    spinlock_lock(&t->lock);
515
    spinlock_lock(&t->lock);
488
    ASSERT(!t->detached);
516
    ASSERT(!t->detached);
489
    spinlock_unlock(&t->lock);
517
    spinlock_unlock(&t->lock);
490
    interrupts_restore(ipl);
518
    interrupts_restore(ipl);
491
   
519
   
492
    rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
520
    rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
493
   
521
   
494
    return rc; 
522
    return rc; 
495
}
523
}
496
 
524
 
497
/** Detach thread.
525
/** Detach thread.
498
 *
526
 *
499
 * Mark the thread as detached, if the thread is already in the Undead state,
527
 * Mark the thread as detached, if the thread is already in the Undead state,
500
 * deallocate its resources.
528
 * deallocate its resources.
501
 *
529
 *
502
 * @param t Thread to be detached.
530
 * @param t Thread to be detached.
503
 */
531
 */
504
void thread_detach(thread_t *t)
532
void thread_detach(thread_t *t)
505
{
533
{
506
    ipl_t ipl;
534
    ipl_t ipl;
507
 
535
 
508
    /*
536
    /*
509
     * Since the thread is expected not to be already detached,
537
     * Since the thread is expected not to be already detached,
510
     * pointer to it must be still valid.
538
     * pointer to it must be still valid.
511
     */
539
     */
512
    ipl = interrupts_disable();
540
    ipl = interrupts_disable();
513
    spinlock_lock(&t->lock);
541
    spinlock_lock(&t->lock);
514
    ASSERT(!t->detached);
542
    ASSERT(!t->detached);
515
    if (t->state == Undead) {
543
    if (t->state == Undead) {
516
        thread_destroy(t);  /* unlocks &t->lock */
544
        thread_destroy(t);  /* unlocks &t->lock */
517
        interrupts_restore(ipl);
545
        interrupts_restore(ipl);
518
        return;
546
        return;
519
    } else {
547
    } else {
520
        t->detached = true;
548
        t->detached = true;
521
    }
549
    }
522
    spinlock_unlock(&t->lock);
550
    spinlock_unlock(&t->lock);
523
    interrupts_restore(ipl);
551
    interrupts_restore(ipl);
524
}
552
}
525
 
553
 
526
/** Thread usleep
554
/** Thread usleep
527
 *
555
 *
528
 * Suspend execution of the current thread.
556
 * Suspend execution of the current thread.
529
 *
557
 *
530
 * @param usec Number of microseconds to sleep.
558
 * @param usec Number of microseconds to sleep.
531
 *
559
 *
532
 */
560
 */
533
void thread_usleep(uint32_t usec)
561
void thread_usleep(uint32_t usec)
534
{
562
{
535
    waitq_t wq;
563
    waitq_t wq;
536
                 
564
                 
537
    waitq_initialize(&wq);
565
    waitq_initialize(&wq);
538
 
566
 
539
    (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
567
    (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
540
}
568
}
541
 
569
 
542
/** Register thread out-of-context invocation
570
/** Register thread out-of-context invocation
543
 *
571
 *
544
 * Register a function and its argument to be executed
572
 * Register a function and its argument to be executed
545
 * on next context switch to the current thread.
573
 * on next context switch to the current thread.
546
 *
574
 *
547
 * @param call_me      Out-of-context function.
575
 * @param call_me      Out-of-context function.
548
 * @param call_me_with Out-of-context function argument.
576
 * @param call_me_with Out-of-context function argument.
549
 *
577
 *
550
 */
578
 */
551
void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
579
void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
552
{
580
{
553
    ipl_t ipl;
581
    ipl_t ipl;
554
   
582
   
555
    ipl = interrupts_disable();
583
    ipl = interrupts_disable();
556
    spinlock_lock(&THREAD->lock);
584
    spinlock_lock(&THREAD->lock);
557
    THREAD->call_me = call_me;
585
    THREAD->call_me = call_me;
558
    THREAD->call_me_with = call_me_with;
586
    THREAD->call_me_with = call_me_with;
559
    spinlock_unlock(&THREAD->lock);
587
    spinlock_unlock(&THREAD->lock);
560
    interrupts_restore(ipl);
588
    interrupts_restore(ipl);
561
}
589
}
562
 
590
 
563
/** Print list of threads debug info */
591
/** Print list of threads debug info */
564
void thread_print_list(void)
592
void thread_print_list(void)
565
{
593
{
566
    link_t *cur;
594
    link_t *cur;
567
    ipl_t ipl;
595
    ipl_t ipl;
568
   
596
   
569
    /* Messing with thread structures, avoid deadlock */
597
    /* Messing with thread structures, avoid deadlock */
570
    ipl = interrupts_disable();
598
    ipl = interrupts_disable();
571
    spinlock_lock(&threads_lock);
599
    spinlock_lock(&threads_lock);
572
   
600
   
573
    printf("tid    name       address    state    task       ctx code    "
601
    printf("tid    name       address    state    task       ctx code    "
574
        "   stack      cycles     cpu  kstack     waitqueue\n");
602
        "   stack      cycles     cpu  kstack     waitqueue\n");
575
    printf("------ ---------- ---------- -------- ---------- --- --------"
603
    printf("------ ---------- ---------- -------- ---------- --- --------"
576
        "-- ---------- ---------- ---- ---------- ----------\n");
604
        "-- ---------- ---------- ---- ---------- ----------\n");
577
 
605
 
578
    for (cur = threads_btree.leaf_head.next;
606
    for (cur = threads_btree.leaf_head.next;
579
        cur != &threads_btree.leaf_head; cur = cur->next) {
607
        cur != &threads_btree.leaf_head; cur = cur->next) {
580
        btree_node_t *node;
608
        btree_node_t *node;
581
        unsigned int i;
609
        unsigned int i;
582
 
610
 
583
        node = list_get_instance(cur, btree_node_t, leaf_link);
611
        node = list_get_instance(cur, btree_node_t, leaf_link);
584
        for (i = 0; i < node->keys; i++) {
612
        for (i = 0; i < node->keys; i++) {
585
            thread_t *t;
613
            thread_t *t;
586
       
614
       
587
            t = (thread_t *) node->value[i];
615
            t = (thread_t *) node->value[i];
588
           
616
           
589
            uint64_t cycles;
617
            uint64_t cycles;
590
            char suffix;
618
            char suffix;
591
            order(t->cycles, &cycles, &suffix);
619
            order(t->cycles, &cycles, &suffix);
592
           
620
           
593
            printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx "
621
            printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx "
594
                "%#10zx %9llu%c ", t->tid, t->name, t,
622
                "%#10zx %9llu%c ", t->tid, t->name, t,
595
                thread_states[t->state], t->task, t->task->context,
623
                thread_states[t->state], t->task, t->task->context,
596
                t->thread_code, t->kstack, cycles, suffix);
624
                t->thread_code, t->kstack, cycles, suffix);
597
           
625
           
598
            if (t->cpu)
626
            if (t->cpu)
599
                printf("%-4zd", t->cpu->id);
627
                printf("%-4zd", t->cpu->id);
600
            else
628
            else
601
                printf("none");
629
                printf("none");
602
           
630
           
603
            if (t->state == Sleeping)
631
            if (t->state == Sleeping)
604
                printf(" %#10zx %#10zx", t->kstack,
632
                printf(" %#10zx %#10zx", t->kstack,
605
                    t->sleep_queue);
633
                    t->sleep_queue);
606
           
634
           
607
            printf("\n");
635
            printf("\n");
608
        }
636
        }
609
    }
637
    }
610
 
638
 
611
    spinlock_unlock(&threads_lock);
639
    spinlock_unlock(&threads_lock);
612
    interrupts_restore(ipl);
640
    interrupts_restore(ipl);
613
}
641
}
614
 
642
 
615
/** Check whether thread exists.
643
/** Check whether thread exists.
616
 *
644
 *
617
 * Note that threads_lock must be already held and
645
 * Note that threads_lock must be already held and
618
 * interrupts must be already disabled.
646
 * interrupts must be already disabled.
619
 *
647
 *
620
 * @param t Pointer to thread.
648
 * @param t Pointer to thread.
621
 *
649
 *
622
 * @return True if thread t is known to the system, false otherwise.
650
 * @return True if thread t is known to the system, false otherwise.
623
 */
651
 */
624
bool thread_exists(thread_t *t)
652
bool thread_exists(thread_t *t)
625
{
653
{
626
    btree_node_t *leaf;
654
    btree_node_t *leaf;
627
   
655
   
628
    return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t),
656
    return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t),
629
        &leaf) != NULL;
657
        &leaf) != NULL;
630
}
658
}
631
 
659
 
632
 
660
 
633
/** Update accounting of current thread.
661
/** Update accounting of current thread.
634
 *
662
 *
635
 * Note that thread_lock on THREAD must be already held and
663
 * Note that thread_lock on THREAD must be already held and
636
 * interrupts must be already disabled.
664
 * interrupts must be already disabled.
637
 *
665
 *
638
 */
666
 */
639
void thread_update_accounting(void)
667
void thread_update_accounting(void)
640
{
668
{
641
    uint64_t time = get_cycle();
669
    uint64_t time = get_cycle();
642
    THREAD->cycles += time - THREAD->last_cycle;
670
    THREAD->cycles += time - THREAD->last_cycle;
643
    THREAD->last_cycle = time;
671
    THREAD->last_cycle = time;
644
}
672
}
645
 
673
 
646
/** Process syscall to create new thread.
674
/** Process syscall to create new thread.
647
 *
675
 *
648
 */
676
 */
649
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
677
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
650
    thread_id_t *uspace_thread_id)
678
    thread_id_t *uspace_thread_id)
651
{
679
{
652
    thread_t *t;
680
    thread_t *t;
653
    char namebuf[THREAD_NAME_BUFLEN];
681
    char namebuf[THREAD_NAME_BUFLEN];
654
    uspace_arg_t *kernel_uarg;
682
    uspace_arg_t *kernel_uarg;
655
    int rc;
683
    int rc;
656
 
684
 
657
    rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
685
    rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
658
    if (rc != 0)
686
    if (rc != 0)
659
        return (unative_t) rc;
687
        return (unative_t) rc;
660
 
688
 
661
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
689
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
662
    rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
690
    rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
663
    if (rc != 0) {
691
    if (rc != 0) {
664
        free(kernel_uarg);
692
        free(kernel_uarg);
665
        return (unative_t) rc;
693
        return (unative_t) rc;
666
    }
694
    }
667
 
695
 
668
    t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf,
696
    t = thread_create(uinit, kernel_uarg, TASK,
669
        false);
697
        THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
670
    if (t) {
698
    if (t) {
-
 
699
        if (uspace_thread_id != NULL) {
-
 
700
            int rc;
-
 
701
 
-
 
702
            rc = copy_to_uspace(uspace_thread_id, &t->tid,
-
 
703
                sizeof(t->tid));
-
 
704
            if (rc != 0) {
-
 
705
                ipl_t ipl;
-
 
706
 
-
 
707
                /*
-
 
708
                 * We have encountered a failure, but the thread
-
 
709
                 * has already been created. We need to undo its
-
 
710
                 * creation now.
-
 
711
                 */
-
 
712
 
-
 
713
                /*
-
 
714
                 * The new thread structure is initialized,
-
 
715
                 * but is still not visible to the system.
-
 
716
                 * We can safely deallocate it.
-
 
717
                 */
-
 
718
                slab_free(thread_slab, t);
-
 
719
                free(kernel_uarg);
-
 
720
 
-
 
721
                /*
-
 
722
                 * Now we need to decrement the task reference
-
 
723
                 * counter. Because we are running within the
-
 
724
                 * same task, thread t is not the last thread
-
 
725
                 * in the task, so it is safe to merely
-
 
726
                 * decrement the counter.
-
 
727
                 */
-
 
728
                ipl = interrupts_disable();
-
 
729
                spinlock_lock(&TASK->lock);
-
 
730
                TASK->refcount--;
-
 
731
                spinlock_unlock(&TASK->lock);
-
 
732
                interrupts_restore(ipl);
-
 
733
 
-
 
734
                return (unative_t) rc;
-
 
735
             }
-
 
736
        }
-
 
737
        thread_attach(t, TASK);
671
        thread_ready(t);
738
        thread_ready(t);
672
        if (uspace_thread_id != NULL)
-
 
673
            return (unative_t) copy_to_uspace(uspace_thread_id,
-
 
674
                &t->tid, sizeof(t->tid));
-
 
675
        else
739
 
676
            return 0;
740
        return 0;
677
    } else
741
    } else
678
        free(kernel_uarg);
742
        free(kernel_uarg);
679
 
743
 
680
    return (unative_t) ENOMEM;
744
    return (unative_t) ENOMEM;
681
}
745
}
682
 
746
 
683
/** Process syscall to terminate thread.
747
/** Process syscall to terminate thread.
684
 *
748
 *
685
 */
749
 */
686
unative_t sys_thread_exit(int uspace_status)
750
unative_t sys_thread_exit(int uspace_status)
687
{
751
{
688
    thread_exit();
752
    thread_exit();
689
    /* Unreachable */
753
    /* Unreachable */
690
    return 0;
754
    return 0;
691
}
755
}
692
 
756
 
693
/** Syscall for getting TID.
757
/** Syscall for getting TID.
694
 *
758
 *
695
 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
759
 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
696
 * current thread ID.
760
 * current thread ID.
697
 *
761
 *
698
 * @return 0 on success or an error code from @ref errno.h.
762
 * @return 0 on success or an error code from @ref errno.h.
699
 */
763
 */
700
unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
764
unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
701
{
765
{
702
    /*
766
    /*
703
     * No need to acquire lock on THREAD because tid
767
     * No need to acquire lock on THREAD because tid
704
     * remains constant for the lifespan of the thread.
768
     * remains constant for the lifespan of the thread.
705
     */
769
     */
706
    return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
770
    return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
707
        sizeof(THREAD->tid));
771
        sizeof(THREAD->tid));
708
}
772
}
709
 
773
 
710
/** @}
774
/** @}
711
 */
775
 */
712
 
776