Rev 4055 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1113 | palkovsky | 1 | /* |
2071 | jermar | 2 | * Copyright (c) 2006 Ondrej Palkovsky |
2481 | jermar | 3 | * Copyright (c) 2007 Jakub Jermar |
1113 | palkovsky | 4 | * All rights reserved. |
5 | * |
||
6 | * Redistribution and use in source and binary forms, with or without |
||
7 | * modification, are permitted provided that the following conditions |
||
8 | * are met: |
||
9 | * |
||
10 | * - Redistributions of source code must retain the above copyright |
||
11 | * notice, this list of conditions and the following disclaimer. |
||
12 | * - Redistributions in binary form must reproduce the above copyright |
||
13 | * notice, this list of conditions and the following disclaimer in the |
||
14 | * documentation and/or other materials provided with the distribution. |
||
15 | * - The name of the author may not be used to endorse or promote products |
||
16 | * derived from this software without specific prior written permission. |
||
17 | * |
||
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
28 | */ |
||
29 | |||
1719 | decky | 30 | /** @addtogroup libc |
1653 | cejka | 31 | * @{ |
32 | */ |
||
33 | /** @file |
||
34 | */ |
||
35 | |||
4537 | trochtova | 36 | #include <adt/list.h> |
2482 | jermar | 37 | #include <fibril.h> |
2586 | jermar | 38 | #include <thread.h> |
39 | #include <tls.h> |
||
1113 | palkovsky | 40 | #include <malloc.h> |
41 | #include <unistd.h> |
||
42 | #include <stdio.h> |
||
1781 | jermar | 43 | #include <libarch/faddr.h> |
1392 | palkovsky | 44 | #include <futex.h> |
45 | #include <assert.h> |
||
1407 | palkovsky | 46 | #include <async.h> |
1113 | palkovsky | 47 | |
2482 | jermar | 48 | #ifndef FIBRIL_INITIAL_STACK_PAGES_NO |
49 | #define FIBRIL_INITIAL_STACK_PAGES_NO 1 |
||
1155 | vana | 50 | #endif |
51 | |||
4537 | trochtova | 52 | /** |
53 | * This futex serializes access to ready_list, serialized_list and manager_list. |
||
2483 | jermar | 54 | */ |
55 | static atomic_t fibril_futex = FUTEX_INITIALIZER; |
||
56 | |||
1113 | palkovsky | 57 | static LIST_INITIALIZE(ready_list); |
1610 | palkovsky | 58 | static LIST_INITIALIZE(serialized_list); |
1392 | palkovsky | 59 | static LIST_INITIALIZE(manager_list); |
1113 | palkovsky | 60 | |
2482 | jermar | 61 | static void fibril_main(void); |
1125 | jermar | 62 | |
4537 | trochtova | 63 | /** Number of threads that are executing a manager fibril. */ |
64 | static int threads_in_manager; |
||
65 | /** Number of threads that are executing a manager fibril and are serialized. */ |
||
66 | static int serialized_threads; /* Protected by async_futex */ |
||
67 | /** Fibril-local count of serialization. If > 0, we must not preempt */ |
||
68 | static fibril_local int serialization_count; |
||
1392 | palkovsky | 69 | |
2482 | jermar | 70 | /** Setup fibril information into TCB structure */ |
71 | fibril_t *fibril_setup(void) |
||
1129 | palkovsky | 72 | { |
2482 | jermar | 73 | fibril_t *f; |
1392 | palkovsky | 74 | tcb_t *tcb; |
1129 | palkovsky | 75 | |
1392 | palkovsky | 76 | tcb = __make_tls(); |
77 | if (!tcb) |
||
78 | return NULL; |
||
79 | |||
2568 | jermar | 80 | f = malloc(sizeof(fibril_t)); |
2482 | jermar | 81 | if (!f) { |
1392 | palkovsky | 82 | __free_tls(tcb); |
1129 | palkovsky | 83 | return NULL; |
84 | } |
||
85 | |||
2482 | jermar | 86 | tcb->fibril_data = f; |
87 | f->tcb = tcb; |
||
1129 | palkovsky | 88 | |
2568 | jermar | 89 | f->func = NULL; |
90 | f->arg = NULL; |
||
91 | f->stack = NULL; |
||
92 | f->clean_after_me = NULL; |
||
93 | f->retval = 0; |
||
94 | f->flags = 0; |
||
95 | |||
2482 | jermar | 96 | return f; |
1129 | palkovsky | 97 | } |
98 | |||
2482 | jermar | 99 | void fibril_teardown(fibril_t *f) |
1129 | palkovsky | 100 | { |
2482 | jermar | 101 | __free_tls(f->tcb); |
102 | free(f); |
||
1129 | palkovsky | 103 | } |
104 | |||
2482 | jermar | 105 | /** Function that spans the whole life-cycle of a fibril. |
2481 | jermar | 106 | * |
2483 | jermar | 107 | * Each fibril begins execution in this function. Then the function implementing |
108 | * the fibril logic is called. After its return, the return value is saved. |
||
109 | * The fibril then switches to another fibril, which cleans up after it. |
||
2481 | jermar | 110 | */ |
2482 | jermar | 111 | void fibril_main(void) |
1113 | palkovsky | 112 | { |
2482 | jermar | 113 | fibril_t *f = __tcb_get()->fibril_data; |
1129 | palkovsky | 114 | |
2483 | jermar | 115 | /* Call the implementing function. */ |
2482 | jermar | 116 | f->retval = f->func(f->arg); |
1113 | palkovsky | 117 | |
2568 | jermar | 118 | fibril_switch(FIBRIL_FROM_DEAD); |
2481 | jermar | 119 | /* not reached */ |
1113 | palkovsky | 120 | } |
121 | |||
2568 | jermar | 122 | /** Switch from the current fibril. |
1128 | jermar | 123 | * |
2482 | jermar | 124 | * If calling with FIBRIL_TO_MANAGER parameter, the async_futex should be |
1427 | palkovsky | 125 | * held. |
126 | * |
||
2483 | jermar | 127 | * @param stype Switch type. One of FIBRIL_PREEMPT, FIBRIL_TO_MANAGER, |
2482 | jermar | 128 | * FIBRIL_FROM_MANAGER, FIBRIL_FROM_DEAD. The parameter |
129 | * describes the circumstances of the switch. |
||
130 | * @return Return 0 if there is no ready fibril, |
||
2481 | jermar | 131 | * return 1 otherwise. |
1128 | jermar | 132 | */ |
2568 | jermar | 133 | int fibril_switch(fibril_switch_type_t stype) |
1113 | palkovsky | 134 | { |
2482 | jermar | 135 | fibril_t *srcf, *dstf; |
1392 | palkovsky | 136 | int retval = 0; |
137 | |||
2482 | jermar | 138 | futex_down(&fibril_futex); |
1113 | palkovsky | 139 | |
2482 | jermar | 140 | if (stype == FIBRIL_PREEMPT && list_empty(&ready_list)) |
1392 | palkovsky | 141 | goto ret_0; |
1113 | palkovsky | 142 | |
2482 | jermar | 143 | if (stype == FIBRIL_FROM_MANAGER) { |
1610 | palkovsky | 144 | if (list_empty(&ready_list) && list_empty(&serialized_list)) |
145 | goto ret_0; |
||
2481 | jermar | 146 | /* |
4537 | trochtova | 147 | * Do not preempt if there is not enough threads to run the |
148 | * ready fibrils which are not serialized. |
||
2481 | jermar | 149 | */ |
2568 | jermar | 150 | if (list_empty(&serialized_list) && |
4537 | trochtova | 151 | threads_in_manager <= serialized_threads) { |
1610 | palkovsky | 152 | goto ret_0; |
153 | } |
||
1392 | palkovsky | 154 | } |
1407 | palkovsky | 155 | /* If we are going to manager and none exists, create it */ |
2482 | jermar | 156 | if (stype == FIBRIL_TO_MANAGER || stype == FIBRIL_FROM_DEAD) { |
1610 | palkovsky | 157 | while (list_empty(&manager_list)) { |
2482 | jermar | 158 | futex_up(&fibril_futex); |
1610 | palkovsky | 159 | async_create_manager(); |
2482 | jermar | 160 | futex_down(&fibril_futex); |
1610 | palkovsky | 161 | } |
1427 | palkovsky | 162 | } |
1610 | palkovsky | 163 | |
2482 | jermar | 164 | srcf = __tcb_get()->fibril_data; |
165 | if (stype != FIBRIL_FROM_DEAD) { |
||
1610 | palkovsky | 166 | /* Save current state */ |
2482 | jermar | 167 | if (!context_save(&srcf->ctx)) { |
1610 | palkovsky | 168 | if (serialization_count) |
2482 | jermar | 169 | srcf->flags &= ~FIBRIL_SERIALIZED; |
170 | if (srcf->clean_after_me) { |
||
2481 | jermar | 171 | /* |
2482 | jermar | 172 | * Cleanup after the dead fibril from which we |
173 | * restored context here. |
||
2481 | jermar | 174 | */ |
2568 | jermar | 175 | void *stack = srcf->clean_after_me->stack; |
176 | if (stack) { |
||
177 | /* |
||
178 | * This check is necessary because a |
||
179 | * thread could have exited like a |
||
180 | * normal fibril using the |
||
181 | * FIBRIL_FROM_DEAD switch type. In that |
||
182 | * case, its fibril will not have the |
||
183 | * stack member filled. |
||
184 | */ |
||
185 | free(stack); |
||
186 | } |
||
2482 | jermar | 187 | fibril_teardown(srcf->clean_after_me); |
188 | srcf->clean_after_me = NULL; |
||
2481 | jermar | 189 | } |
190 | return 1; /* futex_up already done here */ |
||
1610 | palkovsky | 191 | } |
1392 | palkovsky | 192 | |
2481 | jermar | 193 | /* Save myself to the correct run list */ |
2482 | jermar | 194 | if (stype == FIBRIL_PREEMPT) |
195 | list_append(&srcf->link, &ready_list); |
||
196 | else if (stype == FIBRIL_FROM_MANAGER) { |
||
197 | list_append(&srcf->link, &manager_list); |
||
4537 | trochtova | 198 | threads_in_manager--; |
2481 | jermar | 199 | } else { |
200 | /* |
||
2482 | jermar | 201 | * If stype == FIBRIL_TO_MANAGER, don't put ourselves to |
2481 | jermar | 202 | * any list, we should already be somewhere, or we will |
203 | * be lost. |
||
204 | */ |
||
205 | } |
||
206 | } |
||
2568 | jermar | 207 | |
2482 | jermar | 208 | /* Choose a new fibril to run */ |
209 | if (stype == FIBRIL_TO_MANAGER || stype == FIBRIL_FROM_DEAD) { |
||
210 | dstf = list_get_instance(manager_list.next, fibril_t, link); |
||
211 | if (serialization_count && stype == FIBRIL_TO_MANAGER) { |
||
4537 | trochtova | 212 | serialized_threads++; |
2482 | jermar | 213 | srcf->flags |= FIBRIL_SERIALIZED; |
1610 | palkovsky | 214 | } |
4537 | trochtova | 215 | threads_in_manager++; |
2481 | jermar | 216 | |
2568 | jermar | 217 | if (stype == FIBRIL_FROM_DEAD) |
2482 | jermar | 218 | dstf->clean_after_me = srcf; |
1610 | palkovsky | 219 | } else { |
220 | if (!list_empty(&serialized_list)) { |
||
2482 | jermar | 221 | dstf = list_get_instance(serialized_list.next, fibril_t, |
222 | link); |
||
4537 | trochtova | 223 | serialized_threads--; |
2481 | jermar | 224 | } else { |
2482 | jermar | 225 | dstf = list_get_instance(ready_list.next, fibril_t, |
226 | link); |
||
2481 | jermar | 227 | } |
1610 | palkovsky | 228 | } |
2482 | jermar | 229 | list_remove(&dstf->link); |
1113 | palkovsky | 230 | |
2482 | jermar | 231 | futex_up(&fibril_futex); |
232 | context_restore(&dstf->ctx); |
||
2481 | jermar | 233 | /* not reached */ |
1392 | palkovsky | 234 | |
235 | ret_0: |
||
2482 | jermar | 236 | futex_up(&fibril_futex); |
1392 | palkovsky | 237 | return retval; |
1113 | palkovsky | 238 | } |
239 | |||
2482 | jermar | 240 | /** Create a new fibril. |
1113 | palkovsky | 241 | * |
2482 | jermar | 242 | * @param func Implementing function of the new fibril. |
2481 | jermar | 243 | * @param arg Argument to pass to func. |
1128 | jermar | 244 | * |
2482 | jermar | 245 | * @return Return 0 on failure or TLS of the new fibril. |
1113 | palkovsky | 246 | */ |
2482 | jermar | 247 | fid_t fibril_create(int (*func)(void *), void *arg) |
1113 | palkovsky | 248 | { |
2482 | jermar | 249 | fibril_t *f; |
1113 | palkovsky | 250 | |
2482 | jermar | 251 | f = fibril_setup(); |
252 | if (!f) |
||
1129 | palkovsky | 253 | return 0; |
2482 | jermar | 254 | f->stack = (char *) malloc(FIBRIL_INITIAL_STACK_PAGES_NO * |
2481 | jermar | 255 | getpagesize()); |
2482 | jermar | 256 | if (!f->stack) { |
257 | fibril_teardown(f); |
||
1113 | palkovsky | 258 | return 0; |
259 | } |
||
2568 | jermar | 260 | |
261 | f->func = func; |
||
2482 | jermar | 262 | f->arg = arg; |
1113 | palkovsky | 263 | |
2482 | jermar | 264 | context_save(&f->ctx); |
265 | context_set(&f->ctx, FADDR(fibril_main), f->stack, |
||
266 | FIBRIL_INITIAL_STACK_PAGES_NO * getpagesize(), f->tcb); |
||
1113 | palkovsky | 267 | |
2482 | jermar | 268 | return (fid_t) f; |
1392 | palkovsky | 269 | } |
270 | |||
2482 | jermar | 271 | /** Add a fibril to the ready list. |
2481 | jermar | 272 | * |
4537 | trochtova | 273 | * @param fid Pointer to the fibril structure of the fibril to be |
2492 | jermar | 274 | * added. |
2481 | jermar | 275 | */ |
2482 | jermar | 276 | void fibril_add_ready(fid_t fid) |
1392 | palkovsky | 277 | { |
2482 | jermar | 278 | fibril_t *f; |
1392 | palkovsky | 279 | |
2482 | jermar | 280 | f = (fibril_t *) fid; |
281 | futex_down(&fibril_futex); |
||
282 | if ((f->flags & FIBRIL_SERIALIZED)) |
||
283 | list_append(&f->link, &serialized_list); |
||
1610 | palkovsky | 284 | else |
2482 | jermar | 285 | list_append(&f->link, &ready_list); |
286 | futex_up(&fibril_futex); |
||
1392 | palkovsky | 287 | } |
1113 | palkovsky | 288 | |
2482 | jermar | 289 | /** Add a fibril to the manager list. |
2481 | jermar | 290 | * |
4537 | trochtova | 291 | * @param fid Pointer to the fibril structure of the fibril to be |
292 | * added. |
||
2481 | jermar | 293 | */ |
2482 | jermar | 294 | void fibril_add_manager(fid_t fid) |
1392 | palkovsky | 295 | { |
2482 | jermar | 296 | fibril_t *f; |
1392 | palkovsky | 297 | |
2482 | jermar | 298 | f = (fibril_t *) fid; |
1392 | palkovsky | 299 | |
2482 | jermar | 300 | futex_down(&fibril_futex); |
301 | list_append(&f->link, &manager_list); |
||
302 | futex_up(&fibril_futex); |
||
1113 | palkovsky | 303 | } |
1392 | palkovsky | 304 | |
2482 | jermar | 305 | /** Remove one manager from the manager list. */ |
306 | void fibril_remove_manager(void) |
||
1392 | palkovsky | 307 | { |
2482 | jermar | 308 | futex_down(&fibril_futex); |
1392 | palkovsky | 309 | if (list_empty(&manager_list)) { |
2482 | jermar | 310 | futex_up(&fibril_futex); |
1392 | palkovsky | 311 | return; |
312 | } |
||
313 | list_remove(manager_list.next); |
||
2482 | jermar | 314 | futex_up(&fibril_futex); |
1392 | palkovsky | 315 | } |
1427 | palkovsky | 316 | |
2482 | jermar | 317 | /** Return fibril id of the currently running fibril. |
2481 | jermar | 318 | * |
4537 | trochtova | 319 | * @return fibril ID of the currently running fibril. |
320 | * |
||
2481 | jermar | 321 | */ |
2482 | jermar | 322 | fid_t fibril_get_id(void) |
1427 | palkovsky | 323 | { |
2482 | jermar | 324 | return (fid_t) __tcb_get()->fibril_data; |
1427 | palkovsky | 325 | } |
1610 | palkovsky | 326 | |
4537 | trochtova | 327 | /** Disable preemption |
1610 | palkovsky | 328 | * |
2482 | jermar | 329 | * If the fibril wants to send several message in a row and does not want to be |
2481 | jermar | 330 | * preempted, it should start async_serialize_start() in the beginning of |
331 | * communication and async_serialize_end() in the end. If it is a true |
||
332 | * multithreaded application, it should protect the communication channel by a |
||
4537 | trochtova | 333 | * futex as well. |
334 | * |
||
1610 | palkovsky | 335 | */ |
2482 | jermar | 336 | void fibril_inc_sercount(void) |
1610 | palkovsky | 337 | { |
338 | serialization_count++; |
||
339 | } |
||
340 | |||
2481 | jermar | 341 | /** Restore the preemption counter to the previous state. */ |
2482 | jermar | 342 | void fibril_dec_sercount(void) |
1610 | palkovsky | 343 | { |
344 | serialization_count--; |
||
345 | } |
||
1653 | cejka | 346 | |
1719 | decky | 347 | /** @} |
1653 | cejka | 348 | */ |