Subversion Repositories HelenOS-historic

Rev

Rev 405 | Rev 414 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 405 Rev 413
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <synch/waitq.h>
29
#include <synch/waitq.h>
30
#include <synch/synch.h>
30
#include <synch/synch.h>
31
#include <synch/spinlock.h>
31
#include <synch/spinlock.h>
32
#include <proc/thread.h>
32
#include <proc/thread.h>
33
#include <arch/asm.h>
33
#include <arch/asm.h>
34
#include <arch/types.h>
34
#include <arch/types.h>
35
#include <time/timeout.h>
35
#include <time/timeout.h>
36
#include <arch.h>
36
#include <arch.h>
37
#include <context.h>
37
#include <context.h>
38
#include <list.h>
38
#include <list.h>
39
 
39
 
40
/** Initialize wait queue
40
/** Initialize wait queue
41
 *
41
 *
42
 * Initialize wait queue.
42
 * Initialize wait queue.
43
 *
43
 *
44
 * @param wq Pointer to wait queue to be initialized.
44
 * @param wq Pointer to wait queue to be initialized.
45
 */
45
 */
46
void waitq_initialize(waitq_t *wq)
46
void waitq_initialize(waitq_t *wq)
47
{
47
{
48
    spinlock_initialize(&wq->lock);
48
    spinlock_initialize(&wq->lock);
49
    list_initialize(&wq->head);
49
    list_initialize(&wq->head);
50
    wq->missed_wakeups = 0;
50
    wq->missed_wakeups = 0;
51
}
51
}
52
 
52
 
53
/** Handle timeout during waitq_sleep_timeout() call
53
/** Handle timeout during waitq_sleep_timeout() call
54
 *
54
 *
55
 * This routine is called when waitq_sleep_timeout() timeouts.
55
 * This routine is called when waitq_sleep_timeout() timeouts.
56
 * Interrupts are disabled.
56
 * Interrupts are disabled.
57
 *
57
 *
58
 * It is supposed to try to remove 'its' thread from the wait queue;
58
 * It is supposed to try to remove 'its' thread from the wait queue;
59
 * it can eventually fail to achieve this goal when these two events
59
 * it can eventually fail to achieve this goal when these two events
60
 * overlap. In that case it behaves just as though there was no
60
 * overlap. In that case it behaves just as though there was no
61
 * timeout at all.
61
 * timeout at all.
62
 *
62
 *
63
 * @param data Pointer to the thread that called waitq_sleep_timeout().
63
 * @param data Pointer to the thread that called waitq_sleep_timeout().
64
 */
64
 */
65
void waitq_interrupted_sleep(void *data)
65
void waitq_interrupted_sleep(void *data)
66
{
66
{
67
    thread_t *t = (thread_t *) data;
67
    thread_t *t = (thread_t *) data;
68
    waitq_t *wq;
68
    waitq_t *wq;
69
    int do_wakeup = 0;
69
    int do_wakeup = 0;
70
 
70
 
71
    spinlock_lock(&threads_lock);
71
    spinlock_lock(&threads_lock);
72
    if (!list_member(&t->threads_link, &threads_head))
72
    if (!list_member(&t->threads_link, &threads_head))
73
        goto out;
73
        goto out;
74
 
74
 
75
grab_locks:
75
grab_locks:
76
    spinlock_lock(&t->lock);
76
    spinlock_lock(&t->lock);
77
    if (wq = t->sleep_queue) {
77
    if (wq = t->sleep_queue) {
78
        if (!spinlock_trylock(&wq->lock)) {
78
        if (!spinlock_trylock(&wq->lock)) {
79
            spinlock_unlock(&t->lock);
79
            spinlock_unlock(&t->lock);
80
            goto grab_locks; /* avoid deadlock */
80
            goto grab_locks; /* avoid deadlock */
81
        }
81
        }
82
 
82
 
83
        list_remove(&t->wq_link);
83
        list_remove(&t->wq_link);
84
        t->saved_context = t->sleep_timeout_context;
84
        t->saved_context = t->sleep_timeout_context;
85
        do_wakeup = 1;
85
        do_wakeup = 1;
86
       
86
       
87
        spinlock_unlock(&wq->lock);
87
        spinlock_unlock(&wq->lock);
88
        t->sleep_queue = NULL;
88
        t->sleep_queue = NULL;
89
    }
89
    }
90
   
90
   
91
    t->timeout_pending = 0;
91
    t->timeout_pending = 0;
92
    spinlock_unlock(&t->lock);
92
    spinlock_unlock(&t->lock);
93
   
93
   
94
    if (do_wakeup) thread_ready(t);
94
    if (do_wakeup) thread_ready(t);
95
 
95
 
96
out:
96
out:
97
    spinlock_unlock(&threads_lock);
97
    spinlock_unlock(&threads_lock);
98
}
98
}
99
 
99
 
100
/** Sleep until either wakeup or timeout occurs
100
/** Sleep until either wakeup or timeout occurs
101
 *
101
 *
102
 * This is a sleep implementation which allows itself to be
102
 * This is a sleep implementation which allows itself to be
103
 * interrupted from the sleep, restoring a failover context.
103
 * interrupted from the sleep, restoring a failover context.
104
 *
104
 *
105
 * Sleepers are organised in FIFO fashion in a structure called wait queue.
105
 * Sleepers are organised in FIFO fashion in a structure called wait queue.
106
 *
106
 *
107
 * This function is really basic in that other functions as waitq_sleep()
107
 * This function is really basic in that other functions as waitq_sleep()
108
 * and all the *_timeout() functions use it.
108
 * and all the *_timeout() functions use it.
109
 *
109
 *
110
 * @param wq Pointer to wait queue.
110
 * @param wq Pointer to wait queue.
111
 * @param usec Timeout in microseconds.
111
 * @param usec Timeout in microseconds.
112
 * @param nonblocking Blocking vs. non-blocking operation mode switch.
112
 * @param nonblocking Blocking vs. non-blocking operation mode switch.
113
 *
113
 *
114
 * If usec is greater than zero, regardless of the value of nonblocking,
114
 * If usec is greater than zero, regardless of the value of nonblocking,
115
 * the call will not return until either timeout or wakeup comes.
115
 * the call will not return until either timeout or wakeup comes.
116
 *
116
 *
117
 * If usec is zero and nonblocking is zero (false), the call
117
 * If usec is zero and nonblocking is zero (false), the call
118
 * will not return until wakeup comes.
118
 * will not return until wakeup comes.
119
 *
119
 *
120
 * If usec is zero and nonblocking is non-zero (true), the call will
120
 * If usec is zero and nonblocking is non-zero (true), the call will
121
 * immediately return, reporting either success or failure.
121
 * immediately return, reporting either success or failure.
122
 *
122
 *
123
 * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
123
 * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
124
 *         ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
124
 *         ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
125
 *
125
 *
126
 * ESYNCH_WOULD_BLOCK means that the sleep failed because at the time
126
 * ESYNCH_WOULD_BLOCK means that the sleep failed because at the time
127
 * of the call there was no pending wakeup.
127
 * of the call there was no pending wakeup.
128
 *
128
 *
129
 * ESYNCH_TIMEOUT means that the sleep timed out.
129
 * ESYNCH_TIMEOUT means that the sleep timed out.
130
 *
130
 *
131
 * ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
131
 * ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
132
 * a pending wakeup at the time of the call. The caller was not put
132
 * a pending wakeup at the time of the call. The caller was not put
133
 * asleep at all.
133
 * asleep at all.
134
 *
134
 *
135
 * ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
135
 * ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
136
 * attempted.
136
 * attempted.
137
 */
137
 */
138
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)
138
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)
139
{
139
{
140
    volatile pri_t pri; /* must be live after context_restore() */
140
    volatile ipl_t ipl; /* must be live after context_restore() */
141
   
141
   
142
   
142
   
143
restart:
143
restart:
144
    pri = cpu_priority_high();
144
    ipl = interrupts_disable();
145
   
145
   
146
    /*
146
    /*
147
     * Busy waiting for a delayed timeout.
147
     * Busy waiting for a delayed timeout.
148
     * This is an important fix for the race condition between
148
     * This is an important fix for the race condition between
149
     * a delayed timeout and a next call to waitq_sleep_timeout().
149
     * a delayed timeout and a next call to waitq_sleep_timeout().
150
     * Simply, the thread is not allowed to go to sleep if
150
     * Simply, the thread is not allowed to go to sleep if
151
     * there are timeouts in progress.
151
     * there are timeouts in progress.
152
     */
152
     */
153
    spinlock_lock(&THREAD->lock);
153
    spinlock_lock(&THREAD->lock);
154
    if (THREAD->timeout_pending) {
154
    if (THREAD->timeout_pending) {
155
        spinlock_unlock(&THREAD->lock);
155
        spinlock_unlock(&THREAD->lock);
156
        cpu_priority_restore(pri);     
156
        interrupts_restore(ipl);       
157
        goto restart;
157
        goto restart;
158
    }
158
    }
159
    spinlock_unlock(&THREAD->lock);
159
    spinlock_unlock(&THREAD->lock);
160
   
160
   
161
    spinlock_lock(&wq->lock);
161
    spinlock_lock(&wq->lock);
162
   
162
   
163
    /* checks whether to go to sleep at all */
163
    /* checks whether to go to sleep at all */
164
    if (wq->missed_wakeups) {
164
    if (wq->missed_wakeups) {
165
        wq->missed_wakeups--;
165
        wq->missed_wakeups--;
166
        spinlock_unlock(&wq->lock);
166
        spinlock_unlock(&wq->lock);
167
        cpu_priority_restore(pri);
167
        interrupts_restore(ipl);
168
        return ESYNCH_OK_ATOMIC;
168
        return ESYNCH_OK_ATOMIC;
169
    }
169
    }
170
    else {
170
    else {
171
        if (nonblocking && (usec == 0)) {
171
        if (nonblocking && (usec == 0)) {
172
            /* return immediatelly instead of going to sleep */
172
            /* return immediatelly instead of going to sleep */
173
            spinlock_unlock(&wq->lock);
173
            spinlock_unlock(&wq->lock);
174
            cpu_priority_restore(pri);
174
            interrupts_restore(ipl);
175
            return ESYNCH_WOULD_BLOCK;
175
            return ESYNCH_WOULD_BLOCK;
176
        }
176
        }
177
    }
177
    }
178
 
178
 
179
   
179
   
180
    /*
180
    /*
181
     * Now we are firmly decided to go to sleep.
181
     * Now we are firmly decided to go to sleep.
182
     */
182
     */
183
    spinlock_lock(&THREAD->lock);
183
    spinlock_lock(&THREAD->lock);
184
    if (usec) {
184
    if (usec) {
185
        /* We use the timeout variant. */
185
        /* We use the timeout variant. */
186
        if (!context_save(&THREAD->sleep_timeout_context)) {
186
        if (!context_save(&THREAD->sleep_timeout_context)) {
187
            /*
187
            /*
188
             * Short emulation of scheduler() return code.
188
             * Short emulation of scheduler() return code.
189
             */
189
             */
190
            before_thread_runs();
190
            before_thread_runs();
191
            spinlock_unlock(&THREAD->lock);
191
            spinlock_unlock(&THREAD->lock);
192
            cpu_priority_restore(pri);
192
            interrupts_restore(ipl);
193
            return ESYNCH_TIMEOUT;
193
            return ESYNCH_TIMEOUT;
194
        }
194
        }
195
        THREAD->timeout_pending = 1;
195
        THREAD->timeout_pending = 1;
196
        timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD);
196
        timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD);
197
    }
197
    }
198
 
198
 
199
    list_append(&THREAD->wq_link, &wq->head);
199
    list_append(&THREAD->wq_link, &wq->head);
200
 
200
 
201
    /*
201
    /*
202
     * Suspend execution.
202
     * Suspend execution.
203
     */
203
     */
204
    THREAD->state = Sleeping;
204
    THREAD->state = Sleeping;
205
    THREAD->sleep_queue = wq;
205
    THREAD->sleep_queue = wq;
206
 
206
 
207
    spinlock_unlock(&THREAD->lock);
207
    spinlock_unlock(&THREAD->lock);
208
 
208
 
209
    scheduler();    /* wq->lock is released in scheduler_separated_stack() */
209
    scheduler();    /* wq->lock is released in scheduler_separated_stack() */
210
    cpu_priority_restore(pri);
210
    interrupts_restore(ipl);
211
   
211
   
212
    return ESYNCH_OK_BLOCKED;
212
    return ESYNCH_OK_BLOCKED;
213
}
213
}
214
 
214
 
215
 
215
 
216
/** Wake up first thread sleeping in a wait queue
216
/** Wake up first thread sleeping in a wait queue
217
 *
217
 *
218
 * Wake up first thread sleeping in a wait queue.
218
 * Wake up first thread sleeping in a wait queue.
219
 * This is the SMP- and IRQ-safe wrapper meant for
219
 * This is the SMP- and IRQ-safe wrapper meant for
220
 * general use.
220
 * general use.
221
 *
221
 *
222
 * Besides its 'normal' wakeup operation, it attempts
222
 * Besides its 'normal' wakeup operation, it attempts
223
 * to unregister possible timeout.
223
 * to unregister possible timeout.
224
 *
224
 *
225
 * @param wq Pointer to wait queue.
225
 * @param wq Pointer to wait queue.
226
 * @param all If this is non-zero, all sleeping threads
226
 * @param all If this is non-zero, all sleeping threads
227
 *        will be woken up and missed count will be zeroed.
227
 *        will be woken up and missed count will be zeroed.
228
 */
228
 */
229
void waitq_wakeup(waitq_t *wq, int all)
229
void waitq_wakeup(waitq_t *wq, int all)
230
{
230
{
231
    pri_t pri;
231
    ipl_t ipl;
232
 
232
 
233
    pri = cpu_priority_high();
233
    ipl = interrupts_disable();
234
    spinlock_lock(&wq->lock);
234
    spinlock_lock(&wq->lock);
235
 
235
 
236
    _waitq_wakeup_unsafe(wq, all);
236
    _waitq_wakeup_unsafe(wq, all);
237
 
237
 
238
    spinlock_unlock(&wq->lock);
238
    spinlock_unlock(&wq->lock);
239
    cpu_priority_restore(pri); 
239
    interrupts_restore(ipl);   
240
}
240
}
241
 
241
 
242
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
242
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
243
 *
243
 *
244
 * This is the internal SMP- and IRQ-unsafe version
244
 * This is the internal SMP- and IRQ-unsafe version
245
 * of waitq_wakeup(). It assumes wq->lock is already
245
 * of waitq_wakeup(). It assumes wq->lock is already
246
 * locked and interrupts are already disabled.
246
 * locked and interrupts are already disabled.
247
 *
247
 *
248
 * @param wq Pointer to wait queue.
248
 * @param wq Pointer to wait queue.
249
 * @param all If this is non-zero, all sleeping threads
249
 * @param all If this is non-zero, all sleeping threads
250
 *        will be woken up and missed count will be zeroed.
250
 *        will be woken up and missed count will be zeroed.
251
 */
251
 */
252
void _waitq_wakeup_unsafe(waitq_t *wq, int all)
252
void _waitq_wakeup_unsafe(waitq_t *wq, int all)
253
{
253
{
254
    thread_t *t;
254
    thread_t *t;
255
 
255
 
256
loop:  
256
loop:  
257
    if (list_empty(&wq->head)) {
257
    if (list_empty(&wq->head)) {
258
        wq->missed_wakeups++;
258
        wq->missed_wakeups++;
259
        if (all) wq->missed_wakeups = 0;
259
        if (all) wq->missed_wakeups = 0;
260
        return;
260
        return;
261
    }
261
    }
262
 
262
 
263
    t = list_get_instance(wq->head.next, thread_t, wq_link);
263
    t = list_get_instance(wq->head.next, thread_t, wq_link);
264
   
264
   
265
    list_remove(&t->wq_link);
265
    list_remove(&t->wq_link);
266
    spinlock_lock(&t->lock);
266
    spinlock_lock(&t->lock);
267
    if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
267
    if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
268
        t->timeout_pending = 0;
268
        t->timeout_pending = 0;
269
    t->sleep_queue = NULL;
269
    t->sleep_queue = NULL;
270
    spinlock_unlock(&t->lock);
270
    spinlock_unlock(&t->lock);
271
 
271
 
272
    thread_ready(t);
272
    thread_ready(t);
273
 
273
 
274
    if (all) goto loop;
274
    if (all) goto loop;
275
}
275
}
276
 
276