Subversion Repositories HelenOS-historic

Rev

Rev 68 | Rev 385 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
153 jermar 29
 
1 jermar 30
/*
153 jermar 31
 * Reader/Writer locks
32
 */
33
 
34
/*
1 jermar 35
 * These locks are not recursive.
36
 * Neither readers nor writers will suffer starvation.
37
 *
38
 * If there is a writer followed by a reader waiting for the rwlock
39
 * and the writer times out, all leading readers are automatically woken up
40
 * and allowed in.
41
 */
42
 
43
/*
44
 * NOTE ON rwlock_holder_type
45
 * This field is set on an attempt to acquire the exclusive mutex
46
 * to the respective value depending whether the caller is a reader
47
 * or a writer. The field is examined only if the thread had been
48
 * previously blocked on the exclusive mutex. Thus it is save
49
 * to store the rwlock type in the thread structure, because
50
 * each thread can block on only one rwlock at a time.
51
 */
52
 
53
#include <synch/synch.h>
54
#include <synch/rwlock.h>
55
#include <synch/spinlock.h>
56
#include <synch/mutex.h>
57
#include <synch/waitq.h>
58
 
59
#include <list.h>
60
#include <typedefs.h>
61
#include <arch/asm.h>
62
#include <arch.h>
63
#include <proc/thread.h>
64
#include <panic.h>
65
 
66
#define ALLOW_ALL       0
67
#define ALLOW_READERS_ONLY  1
68
 
69
static void let_others_in(rwlock_t *rwl, int readers_only);
70
static void release_spinlock(void *arg);
71
 
72
void rwlock_initialize(rwlock_t *rwl) {
73
    spinlock_initialize(&rwl->lock);
74
    mutex_initialize(&rwl->exclusive);
75
    rwl->readers_in = 0;
76
}
77
 
78
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
79
{
80
    pri_t pri;
81
    int rc;
82
 
83
    pri = cpu_priority_high();
15 jermar 84
    spinlock_lock(&THREAD->lock);
85
    THREAD->rwlock_holder_type = RWLOCK_WRITER;
86
    spinlock_unlock(&THREAD->lock);
1 jermar 87
    cpu_priority_restore(pri);
88
 
89
    /*
90
     * Writers take the easy part.
91
     * They just need to acquire the exclusive mutex.
92
     */
93
    rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
94
    if (SYNCH_FAILED(rc)) {
95
 
96
        /*
97
         * Lock operation timed out.
98
         * The state of rwl is UNKNOWN at this point.
99
         * No claims about its holder can be made.
100
         */
101
 
102
        pri = cpu_priority_high();
103
        spinlock_lock(&rwl->lock);
104
        /*
105
         * Now when rwl is locked, we can inspect it again.
106
         * If it is held by some readers already, we can let
107
         * readers from the head of the wait queue in.
108
         */
109
        if (rwl->readers_in)
110
            let_others_in(rwl, ALLOW_READERS_ONLY);
111
        spinlock_unlock(&rwl->lock);
112
        cpu_priority_restore(pri);
113
    }
114
 
115
    return rc;
116
}
117
 
118
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
119
{
120
    int rc;
121
    pri_t pri;
122
 
123
    pri = cpu_priority_high();
15 jermar 124
    spinlock_lock(&THREAD->lock);
125
    THREAD->rwlock_holder_type = RWLOCK_READER;
126
    spinlock_unlock(&THREAD->lock);
1 jermar 127
 
128
    spinlock_lock(&rwl->lock);
129
 
130
    /*
131
     * Find out whether we can get what we want without blocking.
132
     */
133
    rc = mutex_trylock(&rwl->exclusive);
134
    if (SYNCH_FAILED(rc)) {
135
 
136
        /*
137
         * 'exclusive' mutex is being held by someone else.
138
         * If the holder is a reader and there is no one
139
         * else waiting for it, we can enter the critical
140
         * section.
141
         */
142
 
143
        if (rwl->readers_in) {
144
            spinlock_lock(&rwl->exclusive.sem.wq.lock);
145
            if (list_empty(&rwl->exclusive.sem.wq.head)) {
146
                /*
147
                 * We can enter.
148
                 */
149
                spinlock_unlock(&rwl->exclusive.sem.wq.lock);
150
                goto shortcut;
151
            }
152
            spinlock_unlock(&rwl->exclusive.sem.wq.lock);
153
        }
154
 
155
        /*
156
         * In order to prevent a race condition when a reader
157
         * could block another reader at the head of the waitq,
158
         * we register a function to unlock rwl->lock
159
         * after this thread is put asleep.
160
         */
161
        thread_register_call_me(release_spinlock, &rwl->lock);
162
 
163
        rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
164
        switch (rc) {
165
            case ESYNCH_WOULD_BLOCK:
166
                /*
167
                 * release_spinlock() wasn't called
168
                 */
169
                thread_register_call_me(NULL, NULL);                 
170
                spinlock_unlock(&rwl->lock);
171
            case ESYNCH_TIMEOUT:
172
                /*
173
                 * The sleep timeouted.
174
                 * We just restore the cpu priority.
175
                 */
176
            case ESYNCH_OK_BLOCKED:    
177
                /*
178
                 * We were woken with rwl->readers_in already incremented.
179
                 * Note that this arrangement avoids race condition between
180
                 * two concurrent readers. (Race is avoided if 'exclusive' is
181
                 * locked at the same time as 'readers_in' is incremented.
182
                 * Same time means both events happen atomically when
183
                 * rwl->lock is held.)
184
                 */
185
                cpu_priority_restore(pri);
186
                break;
187
            case ESYNCH_OK_ATOMIC:
68 decky 188
                panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC");
1 jermar 189
                break;
190
            dafault:
68 decky 191
                panic("invalid ESYNCH");
1 jermar 192
                break;
193
        }
194
        return rc;
195
    }
196
 
197
shortcut:
198
 
199
    /*
200
     * We can increment readers_in only if we didn't go to sleep.
201
     * For sleepers, rwlock_let_others_in() will do the job.
202
     */
203
    rwl->readers_in++;
204
 
205
    spinlock_unlock(&rwl->lock);
206
    cpu_priority_restore(pri);
207
 
208
    return ESYNCH_OK_ATOMIC;
209
}
210
 
211
void rwlock_write_unlock(rwlock_t *rwl)
212
{
213
    pri_t pri;
214
 
215
    pri = cpu_priority_high();
216
    spinlock_lock(&rwl->lock);
217
    let_others_in(rwl, ALLOW_ALL);
218
    spinlock_unlock(&rwl->lock);
219
    cpu_priority_restore(pri);
220
 
221
}
222
 
223
void rwlock_read_unlock(rwlock_t *rwl)
224
{
225
    pri_t pri;
226
 
227
    pri = cpu_priority_high();
228
    spinlock_lock(&rwl->lock);
229
    if (!--rwl->readers_in)
230
        let_others_in(rwl, ALLOW_ALL);
231
    spinlock_unlock(&rwl->lock);
232
    cpu_priority_restore(pri);
233
}
234
 
235
 
236
/*
237
 * Must be called with rwl->lock locked.
238
 * Must be called with cpu_priority_high'ed.
239
 */
240
/*
241
 * If readers_only is false: (unlock scenario)
242
 * Let the first sleeper on 'exclusive' mutex in, no matter
243
 * whether it is a reader or a writer. If there are more leading
244
 * readers in line, let each of them in.
245
 *
246
 * Otherwise: (timeout scenario)
247
 * Let all leading readers in.
248
 */
249
void let_others_in(rwlock_t *rwl, int readers_only)
250
{
251
    rwlock_type_t type = RWLOCK_NONE;
252
    thread_t *t = NULL;
253
    int one_more = 1;
254
 
255
    spinlock_lock(&rwl->exclusive.sem.wq.lock);
256
 
257
    if (!list_empty(&rwl->exclusive.sem.wq.head))
258
        t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
259
    do {
260
        if (t) {
261
            spinlock_lock(&t->lock);
262
            type = t->rwlock_holder_type;
263
            spinlock_unlock(&t->lock);         
264
        }
265
 
266
        /*
267
         * If readers_only is true, we wake all leading readers
268
         * if and only if rwl is locked by another reader.
269
         * Assumption: readers_only ==> rwl->readers_in
270
         */
271
        if (readers_only && (type != RWLOCK_READER))
272
            break;
273
 
274
 
275
        if (type == RWLOCK_READER) {
276
            /*
277
             * Waking up a reader.
278
             * We are responsible for incrementing rwl->readers_in for it.
279
             */
280
             rwl->readers_in++;
281
        }
282
 
283
        /*
284
         * Only the last iteration through this loop can increment
285
         * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
286
         * iterations will wake up a thread.
287
         */
288
        /* We call the internal version of waitq_wakeup, which
289
         * relies on the fact that the waitq is already locked.
290
         */
291
        _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
292
 
293
        t = NULL;
294
        if (!list_empty(&rwl->exclusive.sem.wq.head)) {
295
            t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
296
            if (t) {
297
                spinlock_lock(&t->lock);
298
                if (t->rwlock_holder_type != RWLOCK_READER)
299
                    one_more = 0;
300
                spinlock_unlock(&t->lock); 
301
            }
302
        }
303
    } while ((type == RWLOCK_READER) && t && one_more);
304
 
305
    spinlock_unlock(&rwl->exclusive.sem.wq.lock);
306
}
307
 
308
void release_spinlock(void *arg)
309
{
310
    spinlock_unlock((spinlock_t *) arg);
311
}