Subversion Repositories HelenOS

Rev

Rev 1636 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1248 jermar 29
/**
30
 * @file    thread.c
31
 * @brief   Thread management functions.
32
 */
33
 
1 jermar 34
#include <proc/scheduler.h>
35
#include <proc/thread.h>
36
#include <proc/task.h>
1078 jermar 37
#include <proc/uarg.h>
1 jermar 38
#include <mm/frame.h>
39
#include <mm/page.h>
40
#include <arch/asm.h>
41
#include <arch.h>
42
#include <synch/synch.h>
43
#include <synch/spinlock.h>
44
#include <synch/waitq.h>
45
#include <synch/rwlock.h>
46
#include <cpu.h>
47
#include <func.h>
48
#include <context.h>
1158 jermar 49
#include <adt/btree.h>
788 jermar 50
#include <adt/list.h>
1 jermar 51
#include <typedefs.h>
52
#include <time/clock.h>
7 jermar 53
#include <config.h>
54
#include <arch/interrupt.h>
10 jermar 55
#include <smp/ipi.h>
76 jermar 56
#include <arch/faddr.h>
1104 jermar 57
#include <atomic.h>
195 vana 58
#include <memstr.h>
777 palkovsky 59
#include <print.h>
787 palkovsky 60
#include <mm/slab.h>
61
#include <debug.h>
1066 jermar 62
#include <main/uinit.h>
1288 jermar 63
#include <syscall/copy.h>
64
#include <errno.h>
7 jermar 65
 
1 jermar 66
 
1571 jermar 67
/** Thread states */
68
char *thread_states[] = {
69
    "Invalid",
70
    "Running",
71
    "Sleeping",
72
    "Ready",
73
    "Entering",
74
    "Exiting",
75
    "Undead"
76
};
77
 
1636 jermar 78
/** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */
1158 jermar 79
SPINLOCK_INITIALIZE(threads_lock);
1 jermar 80
 
1636 jermar 81
/** B+tree of all threads.
82
 *
83
 * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long
84
 * as the threads_lock is held.
85
 */
86
btree_t threads_btree;     
87
 
623 jermar 88
SPINLOCK_INITIALIZE(tidlock);
1 jermar 89
__u32 last_tid = 0;
90
 
787 palkovsky 91
static slab_cache_t *thread_slab;
906 palkovsky 92
#ifdef ARCH_HAS_FPU
93
slab_cache_t *fpu_context_slab;
94
#endif
107 decky 95
 
96
/** Thread wrapper
97
 *
98
 * This wrapper is provided to ensure that every thread
1 jermar 99
 * makes a call to thread_exit() when its implementing
100
 * function returns.
101
 *
413 jermar 102
 * interrupts_disable() is assumed.
107 decky 103
 *
1 jermar 104
 */
452 decky 105
static void cushion(void)
1 jermar 106
{
15 jermar 107
    void (*f)(void *) = THREAD->thread_code;
108
    void *arg = THREAD->thread_arg;
1 jermar 109
 
213 jermar 110
    /* this is where each thread wakes up after its creation */
15 jermar 111
    spinlock_unlock(&THREAD->lock);
413 jermar 112
    interrupts_enable();
1 jermar 113
 
114
    f(arg);
115
    thread_exit();
116
    /* not reached */
117
}
118
 
787 palkovsky 119
/** Initialization and allocation for thread_t structure */
120
static int thr_constructor(void *obj, int kmflags)
121
{
122
    thread_t *t = (thread_t *)obj;
814 palkovsky 123
    pfn_t pfn;
842 palkovsky 124
    int status;
107 decky 125
 
787 palkovsky 126
    spinlock_initialize(&t->lock, "thread_t_lock");
127
    link_initialize(&t->rq_link);
128
    link_initialize(&t->wq_link);
129
    link_initialize(&t->th_link);
130
 
906 palkovsky 131
#ifdef ARCH_HAS_FPU
132
#  ifdef CONFIG_FPU_LAZY
133
    t->saved_fpu_context = NULL;
134
#  else
135
    t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags);
136
    if (!t->saved_fpu_context)
137
        return -1;
138
#  endif
139
#endif  
140
 
935 vana 141
    pfn = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status);
906 palkovsky 142
    if (status) {
143
#ifdef ARCH_HAS_FPU
144
        if (t->saved_fpu_context)
145
            slab_free(fpu_context_slab,t->saved_fpu_context);
146
#endif
842 palkovsky 147
        return -1;
906 palkovsky 148
    }
814 palkovsky 149
    t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn));
787 palkovsky 150
 
151
    return 0;
152
}
153
 
154
/** Destruction of thread_t object */
155
static int thr_destructor(void *obj)
156
{
157
    thread_t *t = (thread_t *)obj;
158
 
814 palkovsky 159
    frame_free(ADDR2PFN(KA2PA(t->kstack)));
906 palkovsky 160
#ifdef ARCH_HAS_FPU
161
    if (t->saved_fpu_context)
162
        slab_free(fpu_context_slab,t->saved_fpu_context);
163
#endif
787 palkovsky 164
    return 1; /* One page freed */
165
}
166
 
107 decky 167
/** Initialize threads
168
 *
169
 * Initialize kernel threads support.
170
 *
171
 */
1 jermar 172
void thread_init(void)
173
{
15 jermar 174
    THREAD = NULL;
625 palkovsky 175
    atomic_set(&nrdy,0);
787 palkovsky 176
    thread_slab = slab_cache_create("thread_slab",
177
                    sizeof(thread_t),0,
178
                    thr_constructor, thr_destructor, 0);
906 palkovsky 179
#ifdef ARCH_HAS_FPU
180
    fpu_context_slab = slab_cache_create("fpu_slab",
181
                         sizeof(fpu_context_t),
182
                         FPU_CONTEXT_ALIGN,
183
                         NULL, NULL, 0);
184
#endif
1158 jermar 185
 
186
    btree_create(&threads_btree);
1 jermar 187
}
188
 
107 decky 189
/** Make thread ready
190
 *
191
 * Switch thread t to the ready state.
192
 *
193
 * @param t Thread to make ready.
194
 *
195
 */
1 jermar 196
void thread_ready(thread_t *t)
197
{
198
    cpu_t *cpu;
199
    runq_t *r;
413 jermar 200
    ipl_t ipl;
625 palkovsky 201
    int i, avg;
1 jermar 202
 
413 jermar 203
    ipl = interrupts_disable();
1 jermar 204
 
205
    spinlock_lock(&t->lock);
206
 
1086 palkovsky 207
    ASSERT(! (t->state == Ready));
208
 
413 jermar 209
    i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
1 jermar 210
 
16 jermar 211
    cpu = CPU;
1 jermar 212
    if (t->flags & X_WIRED) {
213
        cpu = t->cpu;
214
    }
1083 palkovsky 215
    t->state = Ready;
1 jermar 216
    spinlock_unlock(&t->lock);
217
 
107 decky 218
    /*
1 jermar 219
     * Append t to respective ready queue on respective processor.
220
     */
221
    r = &cpu->rq[i];
222
    spinlock_lock(&r->lock);
223
    list_append(&t->rq_link, &r->rq_head);
224
    r->n++;
225
    spinlock_unlock(&r->lock);
226
 
475 jermar 227
    atomic_inc(&nrdy);
625 palkovsky 228
    avg = atomic_get(&nrdy) / config.cpu_active;
783 palkovsky 229
    atomic_inc(&cpu->nrdy);
1 jermar 230
 
413 jermar 231
    interrupts_restore(ipl);
1 jermar 232
}
233
 
787 palkovsky 234
/** Destroy thread memory structure
235
 *
236
 * Detach thread from all queues, cpus etc. and destroy it.
237
 *
238
 * Assume thread->lock is held!!
239
 */
240
void thread_destroy(thread_t *t)
241
{
1579 jermar 242
    bool destroy_task = false; 
243
 
1581 jermar 244
    ASSERT(t->state == Exiting || t->state == Undead);
787 palkovsky 245
    ASSERT(t->task);
246
    ASSERT(t->cpu);
247
 
248
    spinlock_lock(&t->cpu->lock);
249
    if(t->cpu->fpu_owner==t)
250
        t->cpu->fpu_owner=NULL;
251
    spinlock_unlock(&t->cpu->lock);
252
 
1579 jermar 253
    spinlock_unlock(&t->lock);
254
 
255
    spinlock_lock(&threads_lock);
256
    btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL);
257
    spinlock_unlock(&threads_lock);
258
 
787 palkovsky 259
    /*
260
     * Detach from the containing task.
261
     */
262
    spinlock_lock(&t->task->lock);
263
    list_remove(&t->th_link);
1579 jermar 264
    if (--t->task->refcount == 0) {
265
        t->task->accept_new_threads = false;
266
        destroy_task = true;
267
    }
268
    spinlock_unlock(&t->task->lock);   
787 palkovsky 269
 
1579 jermar 270
    if (destroy_task)
271
        task_destroy(t->task);
787 palkovsky 272
 
273
    slab_free(thread_slab, t);
274
}
275
 
107 decky 276
/** Create new thread
277
 *
278
 * Create a new thread.
279
 *
280
 * @param func  Thread's implementing function.
281
 * @param arg   Thread's implementing function argument.
282
 * @param task  Task to which the thread belongs.
283
 * @param flags Thread flags.
1062 jermar 284
 * @param name  Symbolic name.
107 decky 285
 *
286
 * @return New thread's structure on success, NULL on failure.
287
 *
288
 */
1062 jermar 289
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name)
1 jermar 290
{
291
    thread_t *t;
822 palkovsky 292
    ipl_t ipl;
293
 
787 palkovsky 294
    t = (thread_t *) slab_alloc(thread_slab, 0);
842 palkovsky 295
    if (!t)
296
        return NULL;
1171 jermar 297
 
298
    thread_create_arch(t);
1 jermar 299
 
822 palkovsky 300
    /* Not needed, but good for debugging */
1138 jermar 301
    memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0);
822 palkovsky 302
 
303
    ipl = interrupts_disable();
304
    spinlock_lock(&tidlock);
305
    t->tid = ++last_tid;
306
    spinlock_unlock(&tidlock);
307
    interrupts_restore(ipl);
308
 
309
    context_save(&t->saved_context);
310
    context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE);
311
 
312
    the_initialize((the_t *) t->kstack);
313
 
314
    ipl = interrupts_disable();
315
    t->saved_context.ipl = interrupts_read();
316
    interrupts_restore(ipl);
317
 
1066 jermar 318
    memcpy(t->name, name, THREAD_NAME_BUFLEN);
319
 
822 palkovsky 320
    t->thread_code = func;
321
    t->thread_arg = arg;
322
    t->ticks = -1;
323
    t->priority = -1;       /* start in rq[0] */
324
    t->cpu = NULL;
325
    t->flags = 0;
326
    t->state = Entering;
327
    t->call_me = NULL;
328
    t->call_me_with = NULL;
329
 
330
    timeout_initialize(&t->sleep_timeout);
1502 jermar 331
    t->sleep_interruptible = false;
822 palkovsky 332
    t->sleep_queue = NULL;
333
    t->timeout_pending = 0;
1288 jermar 334
 
335
    t->in_copy_from_uspace = false;
336
    t->in_copy_to_uspace = false;
1579 jermar 337
 
338
    t->interrupted = false;
1661 jermar 339
    t->join_type = None;
1571 jermar 340
    t->detached = false;
341
    waitq_initialize(&t->join_wq);
342
 
822 palkovsky 343
    t->rwlock_holder_type = RWLOCK_NONE;
210 decky 344
 
822 palkovsky 345
    t->task = task;
346
 
860 decky 347
    t->fpu_context_exists = 0;
348
    t->fpu_context_engaged = 0;
822 palkovsky 349
 
350
    /*
1579 jermar 351
     * Attach to the containing task.
352
     */
353
    spinlock_lock(&task->lock);
354
    if (!task->accept_new_threads) {
355
        spinlock_unlock(&task->lock);
356
        slab_free(thread_slab, t);
357
        return NULL;
358
    }
359
    list_append(&t->th_link, &task->th_head);
1585 jermar 360
    if (task->refcount++ == 0)
361
        task->main_thread = t;
1579 jermar 362
    spinlock_unlock(&task->lock);
363
 
364
    /*
822 palkovsky 365
     * Register this thread in the system-wide list.
366
     */
367
    ipl = interrupts_disable();
368
    spinlock_lock(&threads_lock);
1177 jermar 369
    btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL);
822 palkovsky 370
    spinlock_unlock(&threads_lock);
371
 
372
    interrupts_restore(ipl);
860 decky 373
 
1 jermar 374
    return t;
375
}
376
 
107 decky 377
/** Make thread exiting
378
 *
379
 * End current thread execution and switch it to the exiting
380
 * state. All pending timeouts are executed.
381
 *
382
 */
1 jermar 383
void thread_exit(void)
384
{
413 jermar 385
    ipl_t ipl;
1 jermar 386
 
387
restart:
413 jermar 388
    ipl = interrupts_disable();
15 jermar 389
    spinlock_lock(&THREAD->lock);
390
    if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
391
        spinlock_unlock(&THREAD->lock);
413 jermar 392
        interrupts_restore(ipl);
1 jermar 393
        goto restart;
394
    }
15 jermar 395
    THREAD->state = Exiting;
396
    spinlock_unlock(&THREAD->lock);
1 jermar 397
    scheduler();
1595 palkovsky 398
 
399
    /* Not reached */
400
    while (1)
401
        ;
1 jermar 402
}
403
 
107 decky 404
 
405
/** Thread sleep
406
 *
407
 * Suspend execution of the current thread.
408
 *
409
 * @param sec Number of seconds to sleep.
410
 *
411
 */
1 jermar 412
void thread_sleep(__u32 sec)
413
{
125 jermar 414
    thread_usleep(sec*1000000);
1 jermar 415
}
107 decky 416
 
1571 jermar 417
/** Wait for another thread to exit.
418
 *
419
 * @param t Thread to join on exit.
420
 * @param usec Timeout in microseconds.
421
 * @param flags Mode of operation.
422
 *
423
 * @return An error code from errno.h or an error code from synch.h.
424
 */
425
int thread_join_timeout(thread_t *t, __u32 usec, int flags)
426
{
427
    ipl_t ipl;
428
    int rc;
429
 
430
    if (t == THREAD)
431
        return EINVAL;
432
 
433
    /*
434
     * Since thread join can only be called once on an undetached thread,
435
     * the thread pointer is guaranteed to be still valid.
436
     */
437
 
438
    ipl = interrupts_disable();
439
    spinlock_lock(&t->lock);
440
 
441
    ASSERT(!t->detached);
442
 
443
    (void) waitq_sleep_prepare(&t->join_wq);
444
    spinlock_unlock(&t->lock);
445
 
446
    rc = waitq_sleep_timeout_unsafe(&t->join_wq, usec, flags);
447
 
448
    waitq_sleep_finish(&t->join_wq, rc, ipl);
449
 
450
    return rc; 
451
}
452
 
453
/** Detach thread.
454
 *
455
 * Mark the thread as detached, if the thread is already in the Undead state,
456
 * deallocate its resources.
457
 *
458
 * @param t Thread to be detached.
459
 */
460
void thread_detach(thread_t *t)
461
{
462
    ipl_t ipl;
463
 
464
    /*
465
     * Since the thread is expected to not be already detached,
466
     * pointer to it must be still valid.
467
     */
468
 
469
    ipl = interrupts_disable();
470
    spinlock_lock(&t->lock);
471
    ASSERT(!t->detached);
472
    if (t->state == Undead) {
473
        thread_destroy(t);  /* unlocks &t->lock */
474
        interrupts_restore(ipl);
475
        return;
476
    } else {
477
        t->detached = true;
478
    }
479
    spinlock_unlock(&t->lock);
480
    interrupts_restore(ipl);
481
}
482
 
107 decky 483
/** Thread usleep
484
 *
485
 * Suspend execution of the current thread.
486
 *
487
 * @param usec Number of microseconds to sleep.
488
 *
489
 */
1 jermar 490
void thread_usleep(__u32 usec)
491
{
492
    waitq_t wq;
493
 
494
    waitq_initialize(&wq);
495
 
1502 jermar 496
    (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
1 jermar 497
}
498
 
107 decky 499
/** Register thread out-of-context invocation
500
 *
501
 * Register a function and its argument to be executed
502
 * on next context switch to the current thread.
503
 *
504
 * @param call_me      Out-of-context function.
505
 * @param call_me_with Out-of-context function argument.
506
 *
507
 */
1 jermar 508
void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
509
{
413 jermar 510
    ipl_t ipl;
1 jermar 511
 
413 jermar 512
    ipl = interrupts_disable();
15 jermar 513
    spinlock_lock(&THREAD->lock);
514
    THREAD->call_me = call_me;
515
    THREAD->call_me_with = call_me_with;
516
    spinlock_unlock(&THREAD->lock);
413 jermar 517
    interrupts_restore(ipl);
1 jermar 518
}
777 palkovsky 519
 
520
/** Print list of threads debug info */
521
void thread_print_list(void)
522
{
523
    link_t *cur;
524
    ipl_t ipl;
525
 
526
    /* Messing with thread structures, avoid deadlock */
527
    ipl = interrupts_disable();
528
    spinlock_lock(&threads_lock);
529
 
1158 jermar 530
    for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) {
531
        btree_node_t *node;
532
        int i;
533
 
534
        node = list_get_instance(cur, btree_node_t, leaf_link);
535
        for (i = 0; i < node->keys; i++) {
536
            thread_t *t;
537
 
538
            t = (thread_t *) node->value[i];
1196 cejka 539
            printf("%s: address=%#zX, tid=%zd, state=%s, task=%#zX, code=%#zX, stack=%#zX, cpu=",
1158 jermar 540
                t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack);
541
            if (t->cpu)
1458 palkovsky 542
                printf("cpu%zd", t->cpu->id);
1158 jermar 543
            else
544
                printf("none");
1458 palkovsky 545
            if (t->state == Sleeping) {
546
                printf(", kst=%#zX", t->kstack);
547
                printf(", wq=%#zX", t->sleep_queue);
548
            }
1158 jermar 549
            printf("\n");
550
        }
777 palkovsky 551
    }
552
 
553
    spinlock_unlock(&threads_lock);
1060 palkovsky 554
    interrupts_restore(ipl);
777 palkovsky 555
}
1066 jermar 556
 
1158 jermar 557
/** Check whether thread exists.
558
 *
559
 * Note that threads_lock must be already held and
560
 * interrupts must be already disabled.
561
 *
562
 * @param t Pointer to thread.
563
 *
564
 * @return True if thread t is known to the system, false otherwise.
565
 */
566
bool thread_exists(thread_t *t)
567
{
568
    btree_node_t *leaf;
569
 
1177 jermar 570
    return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL;
1158 jermar 571
}
572
 
1066 jermar 573
/** Process syscall to create new thread.
574
 *
575
 */
1078 jermar 576
__native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name)
1066 jermar 577
{
1210 vana 578
    thread_t *t;
579
    char namebuf[THREAD_NAME_BUFLEN];
1103 jermar 580
    uspace_arg_t *kernel_uarg;
1066 jermar 581
    __u32 tid;
1288 jermar 582
    int rc;
1066 jermar 583
 
1288 jermar 584
    rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
585
    if (rc != 0)
586
        return (__native) rc;
1066 jermar 587
 
1078 jermar 588
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
1288 jermar 589
    rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
590
    if (rc != 0) {
591
        free(kernel_uarg);
592
        return (__native) rc;
593
    }
1078 jermar 594
 
1210 vana 595
    if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) {
1066 jermar 596
        tid = t->tid;
1210 vana 597
        thread_ready(t);
1066 jermar 598
        return (__native) tid;
1210 vana 599
    } else {
1078 jermar 600
        free(kernel_uarg);
1210 vana 601
    }
1066 jermar 602
 
1288 jermar 603
    return (__native) ENOMEM;
1066 jermar 604
}
605
 
606
/** Process syscall to terminate thread.
607
 *
608
 */
1078 jermar 609
__native sys_thread_exit(int uspace_status)
1066 jermar 610
{
1210 vana 611
    thread_exit();
612
    /* Unreachable */
613
    return 0;
1066 jermar 614
}