Subversion Repositories HelenOS

Rev

Rev 4633 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4633 Rev 4635
1
/*
1
/*
2
 * Copyright (c) 2006 Ondrej Palkovsky
2
 * Copyright (c) 2006 Ondrej Palkovsky
3
 * Copyright (c) 2007 Jakub Jermar
3
 * Copyright (c) 2007 Jakub Jermar
4
 * All rights reserved.
4
 * All rights reserved.
5
 *
5
 *
6
 * Redistribution and use in source and binary forms, with or without
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
7
 * modification, are permitted provided that the following conditions
8
 * are met:
8
 * are met:
9
 *
9
 *
10
 * - Redistributions of source code must retain the above copyright
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
16
 *   derived from this software without specific prior written permission.
17
 *
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
28
 */
29
 
29
 
30
/** @addtogroup libc
30
/** @addtogroup libc
31
 * @{
31
 * @{
32
 */
32
 */
33
/** @file
33
/** @file
34
 */
34
 */
35
 
35
 
36
#include <adt/list.h>
36
#include <adt/list.h>
37
#include <fibril.h>
37
#include <fibril.h>
38
#include <thread.h>
38
#include <thread.h>
39
#include <tls.h>
39
#include <tls.h>
40
#include <malloc.h>
40
#include <malloc.h>
41
#include <unistd.h>
41
#include <unistd.h>
42
#include <stdio.h>
42
#include <stdio.h>
43
#include <libarch/faddr.h>
43
#include <libarch/faddr.h>
44
#include <futex.h>
44
#include <futex.h>
45
#include <assert.h>
45
#include <assert.h>
46
#include <async.h>
46
#include <async.h>
47
 
47
 
48
#ifndef FIBRIL_INITIAL_STACK_PAGES_NO
48
#ifndef FIBRIL_INITIAL_STACK_PAGES_NO
49
#define FIBRIL_INITIAL_STACK_PAGES_NO   1
49
#define FIBRIL_INITIAL_STACK_PAGES_NO   1
50
#endif
50
#endif
51
 
51
 
52
/**
52
/**
53
 * This futex serializes access to ready_list, serialized_list and manager_list.
53
 * This futex serializes access to ready_list, serialized_list and manager_list.
54
 */
54
 */
55
static atomic_t fibril_futex = FUTEX_INITIALIZER;
55
static atomic_t fibril_futex = FUTEX_INITIALIZER;
56
 
56
 
57
static LIST_INITIALIZE(ready_list);
57
static LIST_INITIALIZE(ready_list);
58
static LIST_INITIALIZE(serialized_list);
58
static LIST_INITIALIZE(serialized_list);
59
static LIST_INITIALIZE(manager_list);
59
static LIST_INITIALIZE(manager_list);
60
 
60
 
61
static void fibril_main(void);
61
static void fibril_main(void);
62
 
62
 
63
/** Number of threads that are executing a manager fibril. */
63
/** Number of threads that are executing a manager fibril. */
64
static int threads_in_manager;
64
static int threads_in_manager;
65
/** Number of threads that are executing a manager fibril and are serialized. */
65
/** Number of threads that are executing a manager fibril and are serialized. */
66
static int serialized_threads;  /* Protected by async_futex */
66
static int serialized_threads;  /* Protected by async_futex */
67
/** Fibril-local count of serialization. If > 0, we must not preempt */
67
/** Fibril-local count of serialization. If > 0, we must not preempt */
68
static fibril_local int serialization_count;
68
static fibril_local int serialization_count;
69
 
69
 
70
/** Setup fibril information into TCB structure */
70
/** Setup fibril information into TCB structure */
71
fibril_t *fibril_setup(void)
71
fibril_t *fibril_setup(void)
72
{
72
{
73
    fibril_t *f;
73
    fibril_t *f;
74
    tcb_t *tcb;
74
    tcb_t *tcb;
75
 
75
 
76
    tcb = __make_tls();
76
    tcb = __make_tls();
77
    if (!tcb)
77
    if (!tcb)
78
        return NULL;
78
        return NULL;
79
 
79
 
80
    f = malloc(sizeof(fibril_t));
80
    f = malloc(sizeof(fibril_t));
81
    if (!f) {
81
    if (!f) {
82
        __free_tls(tcb);
82
        __free_tls(tcb);
83
        return NULL;
83
        return NULL;
84
    }
84
    }
85
 
85
 
86
    tcb->fibril_data = f;
86
    tcb->fibril_data = f;
87
    f->tcb = tcb;
87
    f->tcb = tcb;
88
 
88
 
89
    f->func = NULL;
89
    f->func = NULL;
90
    f->arg = NULL;
90
    f->arg = NULL;
91
    f->stack = NULL;
91
    f->stack = NULL;
92
    f->clean_after_me = NULL;
92
    f->clean_after_me = NULL;
93
    f->retval = 0;
93
    f->retval = 0;
94
    f->flags = 0;
94
    f->flags = 0;
95
 
95
 
96
    return f;
96
    return f;
97
}
97
}
98
 
98
 
99
void fibril_teardown(fibril_t *f)
99
void fibril_teardown(fibril_t *f)
100
{
100
{
101
    __free_tls(f->tcb);
101
    __free_tls(f->tcb);
102
    free(f);
102
    free(f);
103
}
103
}
104
 
104
 
105
/** Function that spans the whole life-cycle of a fibril.
105
/** Function that spans the whole life-cycle of a fibril.
106
 *
106
 *
107
 * Each fibril begins execution in this function. Then the function implementing
107
 * Each fibril begins execution in this function. Then the function implementing
108
 * the fibril logic is called.  After its return, the return value is saved.
108
 * the fibril logic is called.  After its return, the return value is saved.
109
 * The fibril then switches to another fibril, which cleans up after it.
109
 * The fibril then switches to another fibril, which cleans up after it.
110
 */
110
 */
111
void fibril_main(void)
111
void fibril_main(void)
112
{
112
{
113
    fibril_t *f = __tcb_get()->fibril_data;
113
    fibril_t *f = __tcb_get()->fibril_data;
114
 
114
 
115
    /* Call the implementing function. */
115
    /* Call the implementing function. */
116
    f->retval = f->func(f->arg);
116
    f->retval = f->func(f->arg);
117
 
117
 
118
    fibril_switch(FIBRIL_FROM_DEAD);
118
    fibril_switch(FIBRIL_FROM_DEAD);
119
    /* not reached */
119
    /* not reached */
120
}
120
}
121
 
121
 
122
/** Switch from the current fibril.
122
/** Switch from the current fibril.
123
 *
123
 *
124
 * If calling with FIBRIL_TO_MANAGER parameter, the async_futex should be
124
 * If calling with FIBRIL_TO_MANAGER parameter, the async_futex should be
125
 * held.
125
 * held.
126
 *
126
 *
127
 * @param stype     Switch type. One of FIBRIL_PREEMPT, FIBRIL_TO_MANAGER,
127
 * @param stype     Switch type. One of FIBRIL_PREEMPT, FIBRIL_TO_MANAGER,
128
 *          FIBRIL_FROM_MANAGER, FIBRIL_FROM_DEAD. The parameter
128
 *          FIBRIL_FROM_MANAGER, FIBRIL_FROM_DEAD. The parameter
129
 *          describes the circumstances of the switch.
129
 *          describes the circumstances of the switch.
130
 * @return      Return 0 if there is no ready fibril,
130
 * @return      Return 0 if there is no ready fibril,
131
 *          return 1 otherwise.
131
 *          return 1 otherwise.
132
 */
132
 */
133
int fibril_switch(fibril_switch_type_t stype)
133
int fibril_switch(fibril_switch_type_t stype)
134
{
134
{
135
    fibril_t *srcf, *dstf;
135
    fibril_t *srcf, *dstf;
136
    int retval = 0;
136
    int retval = 0;
137
   
137
   
138
    futex_down(&fibril_futex);
138
    futex_down(&fibril_futex);
139
 
139
 
140
    if (stype == FIBRIL_PREEMPT && list_empty(&ready_list))
140
    if (stype == FIBRIL_PREEMPT && list_empty(&ready_list))
141
        goto ret_0;
141
        goto ret_0;
142
 
142
 
143
    if (stype == FIBRIL_FROM_MANAGER) {
143
    if (stype == FIBRIL_FROM_MANAGER) {
144
        if (list_empty(&ready_list) && list_empty(&serialized_list))
144
        if (list_empty(&ready_list) && list_empty(&serialized_list))
145
            goto ret_0;
145
            goto ret_0;
146
        /*
146
        /*
147
         * Do not preempt if there is not enough threads to run the
147
         * Do not preempt if there is not enough threads to run the
148
         * ready fibrils which are not serialized.
148
         * ready fibrils which are not serialized.
149
         */
149
         */
150
        if (list_empty(&serialized_list) &&
150
        if (list_empty(&serialized_list) &&
151
            threads_in_manager <= serialized_threads) {
151
            threads_in_manager <= serialized_threads) {
152
            goto ret_0;
152
            goto ret_0;
153
        }
153
        }
154
    }
154
    }
155
    /* If we are going to manager and none exists, create it */
155
    /* If we are going to manager and none exists, create it */
156
    if (stype == FIBRIL_TO_MANAGER || stype == FIBRIL_FROM_DEAD) {
156
    if (stype == FIBRIL_TO_MANAGER || stype == FIBRIL_FROM_DEAD) {
157
        while (list_empty(&manager_list)) {
157
        while (list_empty(&manager_list)) {
158
            futex_up(&fibril_futex);
158
            futex_up(&fibril_futex);
159
            async_create_manager();
159
            async_create_manager();
160
            futex_down(&fibril_futex);
160
            futex_down(&fibril_futex);
161
        }
161
        }
162
    }
162
    }
163
   
163
   
164
    srcf = __tcb_get()->fibril_data;
164
    srcf = __tcb_get()->fibril_data;
165
    if (stype != FIBRIL_FROM_DEAD) {
165
    if (stype != FIBRIL_FROM_DEAD) {
166
        /* Save current state */
166
        /* Save current state */
167
        if (!context_save(&srcf->ctx)) {
167
        if (!context_save(&srcf->ctx)) {
168
            /*
-
 
169
             * Make sure to reload srcf with the current fibril
-
 
170
             * address. Its value may be invalid after
-
 
171
             * contex_restore() due to e.g. register recycling.
-
 
172
             */
-
 
173
            srcf = __tcb_get()->fibril_data;
-
 
174
            if (serialization_count)
168
            if (serialization_count)
175
                srcf->flags &= ~FIBRIL_SERIALIZED;
169
                srcf->flags &= ~FIBRIL_SERIALIZED;
176
            if (srcf->clean_after_me) {
170
            if (srcf->clean_after_me) {
177
                /*
171
                /*
178
                 * Cleanup after the dead fibril from which we
172
                 * Cleanup after the dead fibril from which we
179
                 * restored context here.
173
                 * restored context here.
180
                 */
174
                 */
181
                void *stack = srcf->clean_after_me->stack;
175
                void *stack = srcf->clean_after_me->stack;
182
                if (stack) {
176
                if (stack) {
183
                    /*
177
                    /*
184
                     * This check is necessary because a
178
                     * This check is necessary because a
185
                     * thread could have exited like a
179
                     * thread could have exited like a
186
                     * normal fibril using the
180
                     * normal fibril using the
187
                     * FIBRIL_FROM_DEAD switch type. In that
181
                     * FIBRIL_FROM_DEAD switch type. In that
188
                     * case, its fibril will not have the
182
                     * case, its fibril will not have the
189
                     * stack member filled.
183
                     * stack member filled.
190
                     */
184
                     */
191
                    free(stack);
185
                    free(stack);
192
                }
186
                }
193
                fibril_teardown(srcf->clean_after_me);
187
                fibril_teardown(srcf->clean_after_me);
194
                srcf->clean_after_me = NULL;
188
                srcf->clean_after_me = NULL;
195
            }
189
            }
196
            return 1;   /* futex_up already done here */
190
            return 1;   /* futex_up already done here */
197
        }
191
        }
198
 
192
 
199
        /* Save myself to the correct run list */
193
        /* Save myself to the correct run list */
200
        if (stype == FIBRIL_PREEMPT)
194
        if (stype == FIBRIL_PREEMPT)
201
            list_append(&srcf->link, &ready_list);
195
            list_append(&srcf->link, &ready_list);
202
        else if (stype == FIBRIL_FROM_MANAGER) {
196
        else if (stype == FIBRIL_FROM_MANAGER) {
203
            list_append(&srcf->link, &manager_list);
197
            list_append(&srcf->link, &manager_list);
204
            threads_in_manager--;
198
            threads_in_manager--;
205
        } else {   
199
        } else {   
206
            /*
200
            /*
207
             * If stype == FIBRIL_TO_MANAGER, don't put ourselves to
201
             * If stype == FIBRIL_TO_MANAGER, don't put ourselves to
208
             * any list, we should already be somewhere, or we will
202
             * any list, we should already be somewhere, or we will
209
             * be lost.
203
             * be lost.
210
             */
204
             */
211
        }
205
        }
212
    }
206
    }
213
   
207
   
214
    /* Choose a new fibril to run */
208
    /* Choose a new fibril to run */
215
    if (stype == FIBRIL_TO_MANAGER || stype == FIBRIL_FROM_DEAD) {
209
    if (stype == FIBRIL_TO_MANAGER || stype == FIBRIL_FROM_DEAD) {
216
        dstf = list_get_instance(manager_list.next, fibril_t, link);
210
        dstf = list_get_instance(manager_list.next, fibril_t, link);
217
        if (serialization_count && stype == FIBRIL_TO_MANAGER) {
211
        if (serialization_count && stype == FIBRIL_TO_MANAGER) {
218
            serialized_threads++;
212
            serialized_threads++;
219
            srcf->flags |= FIBRIL_SERIALIZED;
213
            srcf->flags |= FIBRIL_SERIALIZED;
220
        }
214
        }
221
        threads_in_manager++;
215
        threads_in_manager++;
222
 
216
 
223
        if (stype == FIBRIL_FROM_DEAD)
217
        if (stype == FIBRIL_FROM_DEAD)
224
            dstf->clean_after_me = srcf;
218
            dstf->clean_after_me = srcf;
225
    } else {
219
    } else {
226
        if (!list_empty(&serialized_list)) {
220
        if (!list_empty(&serialized_list)) {
227
            dstf = list_get_instance(serialized_list.next, fibril_t,
221
            dstf = list_get_instance(serialized_list.next, fibril_t,
228
                link);
222
                link);
229
            serialized_threads--;
223
            serialized_threads--;
230
        } else {
224
        } else {
231
            dstf = list_get_instance(ready_list.next, fibril_t,
225
            dstf = list_get_instance(ready_list.next, fibril_t,
232
                link);
226
                link);
233
        }
227
        }
234
    }
228
    }
235
    list_remove(&dstf->link);
229
    list_remove(&dstf->link);
236
 
230
 
237
    futex_up(&fibril_futex);
231
    futex_up(&fibril_futex);
238
    context_restore(&dstf->ctx);
232
    context_restore(&dstf->ctx);
239
    /* not reached */
233
    /* not reached */
240
 
234
 
241
ret_0:
235
ret_0:
242
    futex_up(&fibril_futex);
236
    futex_up(&fibril_futex);
243
    return retval;
237
    return retval;
244
}
238
}
245
 
239
 
246
/** Create a new fibril.
240
/** Create a new fibril.
247
 *
241
 *
248
 * @param func      Implementing function of the new fibril.
242
 * @param func      Implementing function of the new fibril.
249
 * @param arg       Argument to pass to func.
243
 * @param arg       Argument to pass to func.
250
 *
244
 *
251
 * @return      Return 0 on failure or TLS of the new fibril.
245
 * @return      Return 0 on failure or TLS of the new fibril.
252
 */
246
 */
253
fid_t fibril_create(int (*func)(void *), void *arg)
247
fid_t fibril_create(int (*func)(void *), void *arg)
254
{
248
{
255
    fibril_t *f;
249
    fibril_t *f;
256
 
250
 
257
    f = fibril_setup();
251
    f = fibril_setup();
258
    if (!f)
252
    if (!f)
259
        return 0;
253
        return 0;
260
    f->stack = (char *) malloc(FIBRIL_INITIAL_STACK_PAGES_NO *
254
    f->stack = (char *) malloc(FIBRIL_INITIAL_STACK_PAGES_NO *
261
        getpagesize());
255
        getpagesize());
262
    if (!f->stack) {
256
    if (!f->stack) {
263
        fibril_teardown(f);
257
        fibril_teardown(f);
264
        return 0;
258
        return 0;
265
    }
259
    }
266
   
260
   
267
    f->func = func;
261
    f->func = func;
268
    f->arg = arg;
262
    f->arg = arg;
269
 
263
 
270
    context_save(&f->ctx);
264
    context_save(&f->ctx);
271
    context_set(&f->ctx, FADDR(fibril_main), f->stack,
265
    context_set(&f->ctx, FADDR(fibril_main), f->stack,
272
        FIBRIL_INITIAL_STACK_PAGES_NO * getpagesize(), f->tcb);
266
        FIBRIL_INITIAL_STACK_PAGES_NO * getpagesize(), f->tcb);
273
 
267
 
274
    return (fid_t) f;
268
    return (fid_t) f;
275
}
269
}
276
 
270
 
277
/** Add a fibril to the ready list.
271
/** Add a fibril to the ready list.
278
 *
272
 *
279
 * @param fid       Pointer to the fibril structure of the fibril to be
273
 * @param fid       Pointer to the fibril structure of the fibril to be
280
 *          added.
274
 *          added.
281
 */
275
 */
282
void fibril_add_ready(fid_t fid)
276
void fibril_add_ready(fid_t fid)
283
{
277
{
284
    fibril_t *f;
278
    fibril_t *f;
285
 
279
 
286
    f = (fibril_t *) fid;
280
    f = (fibril_t *) fid;
287
    futex_down(&fibril_futex);
281
    futex_down(&fibril_futex);
288
    if ((f->flags & FIBRIL_SERIALIZED))
282
    if ((f->flags & FIBRIL_SERIALIZED))
289
        list_append(&f->link, &serialized_list);
283
        list_append(&f->link, &serialized_list);
290
    else
284
    else
291
        list_append(&f->link, &ready_list);
285
        list_append(&f->link, &ready_list);
292
    futex_up(&fibril_futex);
286
    futex_up(&fibril_futex);
293
}
287
}
294
 
288
 
295
/** Add a fibril to the manager list.
289
/** Add a fibril to the manager list.
296
 *
290
 *
297
 * @param fid       Pointer to the fibril structure of the fibril to be
291
 * @param fid       Pointer to the fibril structure of the fibril to be
298
 *          added.
292
 *          added.
299
 */
293
 */
300
void fibril_add_manager(fid_t fid)
294
void fibril_add_manager(fid_t fid)
301
{
295
{
302
    fibril_t *f;
296
    fibril_t *f;
303
 
297
 
304
    f = (fibril_t *) fid;
298
    f = (fibril_t *) fid;
305
 
299
 
306
    futex_down(&fibril_futex);
300
    futex_down(&fibril_futex);
307
    list_append(&f->link, &manager_list);
301
    list_append(&f->link, &manager_list);
308
    futex_up(&fibril_futex);
302
    futex_up(&fibril_futex);
309
}
303
}
310
 
304
 
311
/** Remove one manager from the manager list. */
305
/** Remove one manager from the manager list. */
312
void fibril_remove_manager(void)
306
void fibril_remove_manager(void)
313
{
307
{
314
    futex_down(&fibril_futex);
308
    futex_down(&fibril_futex);
315
    if (list_empty(&manager_list)) {
309
    if (list_empty(&manager_list)) {
316
        futex_up(&fibril_futex);
310
        futex_up(&fibril_futex);
317
        return;
311
        return;
318
    }
312
    }
319
    list_remove(manager_list.next);
313
    list_remove(manager_list.next);
320
    futex_up(&fibril_futex);
314
    futex_up(&fibril_futex);
321
}
315
}
322
 
316
 
323
/** Return fibril id of the currently running fibril.
317
/** Return fibril id of the currently running fibril.
324
 *
318
 *
325
 * @return fibril ID of the currently running fibril.
319
 * @return fibril ID of the currently running fibril.
326
 *
320
 *
327
 */
321
 */
328
fid_t fibril_get_id(void)
322
fid_t fibril_get_id(void)
329
{
323
{
330
    return (fid_t) __tcb_get()->fibril_data;
324
    return (fid_t) __tcb_get()->fibril_data;
331
}
325
}
332
 
326
 
333
/** Disable preemption
327
/** Disable preemption
334
 *
328
 *
335
 * If the fibril wants to send several message in a row and does not want to be
329
 * If the fibril wants to send several message in a row and does not want to be
336
 * preempted, it should start async_serialize_start() in the beginning of
330
 * preempted, it should start async_serialize_start() in the beginning of
337
 * communication and async_serialize_end() in the end. If it is a true
331
 * communication and async_serialize_end() in the end. If it is a true
338
 * multithreaded application, it should protect the communication channel by a
332
 * multithreaded application, it should protect the communication channel by a
339
 * futex as well.
333
 * futex as well.
340
 *
334
 *
341
 */
335
 */
342
void fibril_inc_sercount(void)
336
void fibril_inc_sercount(void)
343
{
337
{
344
    serialization_count++;
338
    serialization_count++;
345
}
339
}
346
 
340
 
347
/** Restore the preemption counter to the previous state. */
341
/** Restore the preemption counter to the previous state. */
348
void fibril_dec_sercount(void)
342
void fibril_dec_sercount(void)
349
{
343
{
350
    serialization_count--;
344
    serialization_count--;
351
}
345
}
352
 
346
 
353
/** @}
347
/** @}
354
 */
348
 */
355
 
349