Subversion Repositories HelenOS-historic

Rev

Rev 1702 | Rev 1760 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1702 cejka 29
 /** @addtogroup genericproc
30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief   Thread management functions.
36
 */
37
 
1 jermar 38
#include <proc/scheduler.h>
39
#include <proc/thread.h>
40
#include <proc/task.h>
1078 jermar 41
#include <proc/uarg.h>
1 jermar 42
#include <mm/frame.h>
43
#include <mm/page.h>
44
#include <arch/asm.h>
45
#include <arch.h>
46
#include <synch/synch.h>
47
#include <synch/spinlock.h>
48
#include <synch/waitq.h>
49
#include <synch/rwlock.h>
50
#include <cpu.h>
51
#include <func.h>
52
#include <context.h>
1158 jermar 53
#include <adt/btree.h>
788 jermar 54
#include <adt/list.h>
1 jermar 55
#include <typedefs.h>
56
#include <time/clock.h>
7 jermar 57
#include <config.h>
58
#include <arch/interrupt.h>
10 jermar 59
#include <smp/ipi.h>
76 jermar 60
#include <arch/faddr.h>
1104 jermar 61
#include <atomic.h>
195 vana 62
#include <memstr.h>
777 palkovsky 63
#include <print.h>
787 palkovsky 64
#include <mm/slab.h>
65
#include <debug.h>
1066 jermar 66
#include <main/uinit.h>
1288 jermar 67
#include <syscall/copy.h>
68
#include <errno.h>
7 jermar 69
 
1 jermar 70
 
1571 jermar 71
/** Thread states */
72
char *thread_states[] = {
73
    "Invalid",
74
    "Running",
75
    "Sleeping",
76
    "Ready",
77
    "Entering",
78
    "Exiting",
79
    "Undead"
80
};
81
 
1636 jermar 82
/** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */
1158 jermar 83
SPINLOCK_INITIALIZE(threads_lock);
1 jermar 84
 
1636 jermar 85
/** B+tree of all threads.
86
 *
87
 * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long
88
 * as the threads_lock is held.
89
 */
90
btree_t threads_btree;     
91
 
623 jermar 92
SPINLOCK_INITIALIZE(tidlock);
1 jermar 93
__u32 last_tid = 0;
94
 
787 palkovsky 95
static slab_cache_t *thread_slab;
906 palkovsky 96
#ifdef ARCH_HAS_FPU
97
slab_cache_t *fpu_context_slab;
98
#endif
107 decky 99
 
100
/** Thread wrapper
101
 *
102
 * This wrapper is provided to ensure that every thread
1 jermar 103
 * makes a call to thread_exit() when its implementing
104
 * function returns.
105
 *
413 jermar 106
 * interrupts_disable() is assumed.
107 decky 107
 *
1 jermar 108
 */
452 decky 109
static void cushion(void)
1 jermar 110
{
15 jermar 111
    void (*f)(void *) = THREAD->thread_code;
112
    void *arg = THREAD->thread_arg;
1 jermar 113
 
213 jermar 114
    /* this is where each thread wakes up after its creation */
15 jermar 115
    spinlock_unlock(&THREAD->lock);
413 jermar 116
    interrupts_enable();
1 jermar 117
 
118
    f(arg);
119
    thread_exit();
120
    /* not reached */
121
}
122
 
787 palkovsky 123
/** Initialization and allocation for thread_t structure */
124
static int thr_constructor(void *obj, int kmflags)
125
{
126
    thread_t *t = (thread_t *)obj;
814 palkovsky 127
    pfn_t pfn;
842 palkovsky 128
    int status;
107 decky 129
 
787 palkovsky 130
    spinlock_initialize(&t->lock, "thread_t_lock");
131
    link_initialize(&t->rq_link);
132
    link_initialize(&t->wq_link);
133
    link_initialize(&t->th_link);
134
 
906 palkovsky 135
#ifdef ARCH_HAS_FPU
136
#  ifdef CONFIG_FPU_LAZY
137
    t->saved_fpu_context = NULL;
138
#  else
139
    t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags);
140
    if (!t->saved_fpu_context)
141
        return -1;
142
#  endif
143
#endif  
144
 
935 vana 145
    pfn = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status);
906 palkovsky 146
    if (status) {
147
#ifdef ARCH_HAS_FPU
148
        if (t->saved_fpu_context)
149
            slab_free(fpu_context_slab,t->saved_fpu_context);
150
#endif
842 palkovsky 151
        return -1;
906 palkovsky 152
    }
814 palkovsky 153
    t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn));
787 palkovsky 154
 
155
    return 0;
156
}
157
 
158
/** Destruction of thread_t object */
159
static int thr_destructor(void *obj)
160
{
161
    thread_t *t = (thread_t *)obj;
162
 
814 palkovsky 163
    frame_free(ADDR2PFN(KA2PA(t->kstack)));
906 palkovsky 164
#ifdef ARCH_HAS_FPU
165
    if (t->saved_fpu_context)
166
        slab_free(fpu_context_slab,t->saved_fpu_context);
167
#endif
787 palkovsky 168
    return 1; /* One page freed */
169
}
170
 
107 decky 171
/** Initialize threads
172
 *
173
 * Initialize kernel threads support.
174
 *
175
 */
1 jermar 176
void thread_init(void)
177
{
15 jermar 178
    THREAD = NULL;
625 palkovsky 179
    atomic_set(&nrdy,0);
787 palkovsky 180
    thread_slab = slab_cache_create("thread_slab",
181
                    sizeof(thread_t),0,
182
                    thr_constructor, thr_destructor, 0);
906 palkovsky 183
#ifdef ARCH_HAS_FPU
184
    fpu_context_slab = slab_cache_create("fpu_slab",
185
                         sizeof(fpu_context_t),
186
                         FPU_CONTEXT_ALIGN,
187
                         NULL, NULL, 0);
188
#endif
1158 jermar 189
 
190
    btree_create(&threads_btree);
1 jermar 191
}
192
 
107 decky 193
/** Make thread ready
194
 *
195
 * Switch thread t to the ready state.
196
 *
197
 * @param t Thread to make ready.
198
 *
199
 */
1 jermar 200
void thread_ready(thread_t *t)
201
{
202
    cpu_t *cpu;
203
    runq_t *r;
413 jermar 204
    ipl_t ipl;
625 palkovsky 205
    int i, avg;
1 jermar 206
 
413 jermar 207
    ipl = interrupts_disable();
1 jermar 208
 
209
    spinlock_lock(&t->lock);
210
 
1086 palkovsky 211
    ASSERT(! (t->state == Ready));
212
 
413 jermar 213
    i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
1 jermar 214
 
16 jermar 215
    cpu = CPU;
1 jermar 216
    if (t->flags & X_WIRED) {
217
        cpu = t->cpu;
218
    }
1083 palkovsky 219
    t->state = Ready;
1 jermar 220
    spinlock_unlock(&t->lock);
221
 
107 decky 222
    /*
1 jermar 223
     * Append t to respective ready queue on respective processor.
224
     */
225
    r = &cpu->rq[i];
226
    spinlock_lock(&r->lock);
227
    list_append(&t->rq_link, &r->rq_head);
228
    r->n++;
229
    spinlock_unlock(&r->lock);
230
 
475 jermar 231
    atomic_inc(&nrdy);
625 palkovsky 232
    avg = atomic_get(&nrdy) / config.cpu_active;
783 palkovsky 233
    atomic_inc(&cpu->nrdy);
1 jermar 234
 
413 jermar 235
    interrupts_restore(ipl);
1 jermar 236
}
237
 
787 palkovsky 238
/** Destroy thread memory structure
239
 *
240
 * Detach thread from all queues, cpus etc. and destroy it.
241
 *
242
 * Assume thread->lock is held!!
243
 */
244
void thread_destroy(thread_t *t)
245
{
1579 jermar 246
    bool destroy_task = false; 
247
 
1581 jermar 248
    ASSERT(t->state == Exiting || t->state == Undead);
787 palkovsky 249
    ASSERT(t->task);
250
    ASSERT(t->cpu);
251
 
252
    spinlock_lock(&t->cpu->lock);
253
    if(t->cpu->fpu_owner==t)
254
        t->cpu->fpu_owner=NULL;
255
    spinlock_unlock(&t->cpu->lock);
256
 
1579 jermar 257
    spinlock_unlock(&t->lock);
258
 
259
    spinlock_lock(&threads_lock);
260
    btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL);
261
    spinlock_unlock(&threads_lock);
262
 
787 palkovsky 263
    /*
264
     * Detach from the containing task.
265
     */
266
    spinlock_lock(&t->task->lock);
267
    list_remove(&t->th_link);
1579 jermar 268
    if (--t->task->refcount == 0) {
269
        t->task->accept_new_threads = false;
270
        destroy_task = true;
271
    }
272
    spinlock_unlock(&t->task->lock);   
787 palkovsky 273
 
1579 jermar 274
    if (destroy_task)
275
        task_destroy(t->task);
787 palkovsky 276
 
277
    slab_free(thread_slab, t);
278
}
279
 
107 decky 280
/** Create new thread
281
 *
282
 * Create a new thread.
283
 *
284
 * @param func  Thread's implementing function.
285
 * @param arg   Thread's implementing function argument.
286
 * @param task  Task to which the thread belongs.
287
 * @param flags Thread flags.
1062 jermar 288
 * @param name  Symbolic name.
107 decky 289
 *
290
 * @return New thread's structure on success, NULL on failure.
291
 *
292
 */
1062 jermar 293
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name)
1 jermar 294
{
295
    thread_t *t;
822 palkovsky 296
    ipl_t ipl;
297
 
787 palkovsky 298
    t = (thread_t *) slab_alloc(thread_slab, 0);
842 palkovsky 299
    if (!t)
300
        return NULL;
1171 jermar 301
 
302
    thread_create_arch(t);
1 jermar 303
 
822 palkovsky 304
    /* Not needed, but good for debugging */
1138 jermar 305
    memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0);
822 palkovsky 306
 
307
    ipl = interrupts_disable();
308
    spinlock_lock(&tidlock);
309
    t->tid = ++last_tid;
310
    spinlock_unlock(&tidlock);
311
    interrupts_restore(ipl);
312
 
313
    context_save(&t->saved_context);
314
    context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE);
315
 
316
    the_initialize((the_t *) t->kstack);
317
 
318
    ipl = interrupts_disable();
319
    t->saved_context.ipl = interrupts_read();
320
    interrupts_restore(ipl);
321
 
1066 jermar 322
    memcpy(t->name, name, THREAD_NAME_BUFLEN);
323
 
822 palkovsky 324
    t->thread_code = func;
325
    t->thread_arg = arg;
326
    t->ticks = -1;
327
    t->priority = -1;       /* start in rq[0] */
328
    t->cpu = NULL;
329
    t->flags = 0;
330
    t->state = Entering;
331
    t->call_me = NULL;
332
    t->call_me_with = NULL;
333
 
334
    timeout_initialize(&t->sleep_timeout);
1502 jermar 335
    t->sleep_interruptible = false;
822 palkovsky 336
    t->sleep_queue = NULL;
337
    t->timeout_pending = 0;
1288 jermar 338
 
339
    t->in_copy_from_uspace = false;
340
    t->in_copy_to_uspace = false;
1579 jermar 341
 
342
    t->interrupted = false;
1661 jermar 343
    t->join_type = None;
1571 jermar 344
    t->detached = false;
345
    waitq_initialize(&t->join_wq);
346
 
822 palkovsky 347
    t->rwlock_holder_type = RWLOCK_NONE;
210 decky 348
 
822 palkovsky 349
    t->task = task;
350
 
860 decky 351
    t->fpu_context_exists = 0;
352
    t->fpu_context_engaged = 0;
822 palkovsky 353
 
354
    /*
1579 jermar 355
     * Attach to the containing task.
356
     */
1687 jermar 357
    ipl = interrupts_disable();  
1579 jermar 358
    spinlock_lock(&task->lock);
359
    if (!task->accept_new_threads) {
360
        spinlock_unlock(&task->lock);
361
        slab_free(thread_slab, t);
1687 jermar 362
        interrupts_restore(ipl);
1579 jermar 363
        return NULL;
364
    }
365
    list_append(&t->th_link, &task->th_head);
1585 jermar 366
    if (task->refcount++ == 0)
367
        task->main_thread = t;
1579 jermar 368
    spinlock_unlock(&task->lock);
369
 
370
    /*
822 palkovsky 371
     * Register this thread in the system-wide list.
372
     */
373
    spinlock_lock(&threads_lock);
1177 jermar 374
    btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL);
822 palkovsky 375
    spinlock_unlock(&threads_lock);
376
 
377
    interrupts_restore(ipl);
860 decky 378
 
1 jermar 379
    return t;
380
}
381
 
1687 jermar 382
/** Terminate thread.
107 decky 383
 *
384
 * End current thread execution and switch it to the exiting
385
 * state. All pending timeouts are executed.
386
 *
387
 */
1 jermar 388
void thread_exit(void)
389
{
413 jermar 390
    ipl_t ipl;
1 jermar 391
 
392
restart:
413 jermar 393
    ipl = interrupts_disable();
15 jermar 394
    spinlock_lock(&THREAD->lock);
395
    if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
396
        spinlock_unlock(&THREAD->lock);
413 jermar 397
        interrupts_restore(ipl);
1 jermar 398
        goto restart;
399
    }
15 jermar 400
    THREAD->state = Exiting;
401
    spinlock_unlock(&THREAD->lock);
1 jermar 402
    scheduler();
1595 palkovsky 403
 
404
    /* Not reached */
405
    while (1)
406
        ;
1 jermar 407
}
408
 
107 decky 409
 
410
/** Thread sleep
411
 *
412
 * Suspend execution of the current thread.
413
 *
414
 * @param sec Number of seconds to sleep.
415
 *
416
 */
1 jermar 417
void thread_sleep(__u32 sec)
418
{
125 jermar 419
    thread_usleep(sec*1000000);
1 jermar 420
}
107 decky 421
 
1571 jermar 422
/** Wait for another thread to exit.
423
 *
424
 * @param t Thread to join on exit.
425
 * @param usec Timeout in microseconds.
426
 * @param flags Mode of operation.
427
 *
428
 * @return An error code from errno.h or an error code from synch.h.
429
 */
430
int thread_join_timeout(thread_t *t, __u32 usec, int flags)
431
{
432
    ipl_t ipl;
433
    int rc;
434
 
435
    if (t == THREAD)
436
        return EINVAL;
437
 
438
    /*
439
     * Since thread join can only be called once on an undetached thread,
440
     * the thread pointer is guaranteed to be still valid.
441
     */
442
 
443
    ipl = interrupts_disable();
444
    spinlock_lock(&t->lock);
445
    ASSERT(!t->detached);
446
    spinlock_unlock(&t->lock);
1687 jermar 447
    interrupts_restore(ipl);
1571 jermar 448
 
1687 jermar 449
    rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
1571 jermar 450
 
451
    return rc; 
452
}
453
 
454
/** Detach thread.
455
 *
456
 * Mark the thread as detached, if the thread is already in the Undead state,
457
 * deallocate its resources.
458
 *
459
 * @param t Thread to be detached.
460
 */
461
void thread_detach(thread_t *t)
462
{
463
    ipl_t ipl;
464
 
465
    /*
466
     * Since the thread is expected to not be already detached,
467
     * pointer to it must be still valid.
468
     */
469
    ipl = interrupts_disable();
470
    spinlock_lock(&t->lock);
471
    ASSERT(!t->detached);
472
    if (t->state == Undead) {
473
        thread_destroy(t);  /* unlocks &t->lock */
474
        interrupts_restore(ipl);
475
        return;
476
    } else {
477
        t->detached = true;
478
    }
479
    spinlock_unlock(&t->lock);
480
    interrupts_restore(ipl);
481
}
482
 
107 decky 483
/** Thread usleep
484
 *
485
 * Suspend execution of the current thread.
486
 *
487
 * @param usec Number of microseconds to sleep.
488
 *
489
 */
1 jermar 490
void thread_usleep(__u32 usec)
491
{
492
    waitq_t wq;
493
 
494
    waitq_initialize(&wq);
495
 
1502 jermar 496
    (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
1 jermar 497
}
498
 
107 decky 499
/** Register thread out-of-context invocation
500
 *
501
 * Register a function and its argument to be executed
502
 * on next context switch to the current thread.
503
 *
504
 * @param call_me      Out-of-context function.
505
 * @param call_me_with Out-of-context function argument.
506
 *
507
 */
1 jermar 508
void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
509
{
413 jermar 510
    ipl_t ipl;
1 jermar 511
 
413 jermar 512
    ipl = interrupts_disable();
15 jermar 513
    spinlock_lock(&THREAD->lock);
514
    THREAD->call_me = call_me;
515
    THREAD->call_me_with = call_me_with;
516
    spinlock_unlock(&THREAD->lock);
413 jermar 517
    interrupts_restore(ipl);
1 jermar 518
}
777 palkovsky 519
 
520
/** Print list of threads debug info */
521
void thread_print_list(void)
522
{
523
    link_t *cur;
524
    ipl_t ipl;
525
 
526
    /* Messing with thread structures, avoid deadlock */
527
    ipl = interrupts_disable();
528
    spinlock_lock(&threads_lock);
529
 
1158 jermar 530
    for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) {
531
        btree_node_t *node;
532
        int i;
533
 
534
        node = list_get_instance(cur, btree_node_t, leaf_link);
535
        for (i = 0; i < node->keys; i++) {
536
            thread_t *t;
537
 
538
            t = (thread_t *) node->value[i];
1735 decky 539
            printf("%s: address=%#zx, tid=%zd, state=%s, task=%#zx, code=%#zx, stack=%#zx, cpu=",
1158 jermar 540
                t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack);
541
            if (t->cpu)
1458 palkovsky 542
                printf("cpu%zd", t->cpu->id);
1158 jermar 543
            else
544
                printf("none");
1458 palkovsky 545
            if (t->state == Sleeping) {
1735 decky 546
                printf(", kst=%#zx", t->kstack);
547
                printf(", wq=%#zx", t->sleep_queue);
1458 palkovsky 548
            }
1158 jermar 549
            printf("\n");
550
        }
777 palkovsky 551
    }
552
 
553
    spinlock_unlock(&threads_lock);
1060 palkovsky 554
    interrupts_restore(ipl);
777 palkovsky 555
}
1066 jermar 556
 
1158 jermar 557
/** Check whether thread exists.
558
 *
559
 * Note that threads_lock must be already held and
560
 * interrupts must be already disabled.
561
 *
562
 * @param t Pointer to thread.
563
 *
564
 * @return True if thread t is known to the system, false otherwise.
565
 */
566
bool thread_exists(thread_t *t)
567
{
568
    btree_node_t *leaf;
569
 
1177 jermar 570
    return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL;
1158 jermar 571
}
572
 
1066 jermar 573
/** Process syscall to create new thread.
574
 *
575
 */
1078 jermar 576
__native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name)
1066 jermar 577
{
1210 vana 578
    thread_t *t;
579
    char namebuf[THREAD_NAME_BUFLEN];
1103 jermar 580
    uspace_arg_t *kernel_uarg;
1066 jermar 581
    __u32 tid;
1288 jermar 582
    int rc;
1066 jermar 583
 
1288 jermar 584
    rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
585
    if (rc != 0)
586
        return (__native) rc;
1066 jermar 587
 
1078 jermar 588
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
1288 jermar 589
    rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
590
    if (rc != 0) {
591
        free(kernel_uarg);
592
        return (__native) rc;
593
    }
1078 jermar 594
 
1210 vana 595
    if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) {
1066 jermar 596
        tid = t->tid;
1210 vana 597
        thread_ready(t);
1066 jermar 598
        return (__native) tid;
1210 vana 599
    } else {
1078 jermar 600
        free(kernel_uarg);
1210 vana 601
    }
1066 jermar 602
 
1288 jermar 603
    return (__native) ENOMEM;
1066 jermar 604
}
605
 
606
/** Process syscall to terminate thread.
607
 *
608
 */
1078 jermar 609
__native sys_thread_exit(int uspace_status)
1066 jermar 610
{
1210 vana 611
    thread_exit();
612
    /* Unreachable */
613
    return 0;
1066 jermar 614
}
1702 cejka 615
 
616
 /** @}
617
 */
618