Rev 2481 | Rev 2483 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line |
|---|---|---|---|
| 1113 | palkovsky | 1 | /* |
| 2071 | jermar | 2 | * Copyright (c) 2006 Ondrej Palkovsky |
| 2481 | jermar | 3 | * Copyright (c) 2007 Jakub Jermar |
| 1113 | palkovsky | 4 | * All rights reserved. |
| 5 | * |
||
| 6 | * Redistribution and use in source and binary forms, with or without |
||
| 7 | * modification, are permitted provided that the following conditions |
||
| 8 | * are met: |
||
| 9 | * |
||
| 10 | * - Redistributions of source code must retain the above copyright |
||
| 11 | * notice, this list of conditions and the following disclaimer. |
||
| 12 | * - Redistributions in binary form must reproduce the above copyright |
||
| 13 | * notice, this list of conditions and the following disclaimer in the |
||
| 14 | * documentation and/or other materials provided with the distribution. |
||
| 15 | * - The name of the author may not be used to endorse or promote products |
||
| 16 | * derived from this software without specific prior written permission. |
||
| 17 | * |
||
| 18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
| 19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
| 20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
| 21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
| 22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
| 23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
| 24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
| 25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
| 26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
| 27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
| 28 | */ |
||
| 29 | |||
| 1719 | decky | 30 | /** @addtogroup libc |
| 1653 | cejka | 31 | * @{ |
| 32 | */ |
||
| 33 | /** @file |
||
| 34 | */ |
||
| 35 | |||
| 1113 | palkovsky | 36 | #include <libadt/list.h> |
| 2482 | jermar | 37 | #include <fibril.h> |
| 1113 | palkovsky | 38 | #include <malloc.h> |
| 39 | #include <unistd.h> |
||
| 40 | #include <thread.h> |
||
| 41 | #include <stdio.h> |
||
| 1781 | jermar | 42 | #include <libarch/faddr.h> |
| 1392 | palkovsky | 43 | #include <futex.h> |
| 44 | #include <assert.h> |
||
| 1407 | palkovsky | 45 | #include <async.h> |
| 1113 | palkovsky | 46 | |
| 2482 | jermar | 47 | #ifndef FIBRIL_INITIAL_STACK_PAGES_NO |
| 48 | #define FIBRIL_INITIAL_STACK_PAGES_NO 1 |
||
| 1155 | vana | 49 | #endif |
| 50 | |||
| 1113 | palkovsky | 51 | static LIST_INITIALIZE(ready_list); |
| 1610 | palkovsky | 52 | static LIST_INITIALIZE(serialized_list); |
| 1392 | palkovsky | 53 | static LIST_INITIALIZE(manager_list); |
| 1113 | palkovsky | 54 | |
| 2482 | jermar | 55 | static void fibril_main(void); |
| 1125 | jermar | 56 | |
| 2482 | jermar | 57 | static atomic_t fibril_futex = FUTEX_INITIALIZER; |
| 58 | /** Number of threads that are in async_serialized mode */ |
||
| 2481 | jermar | 59 | static int serialized_threads; /* Protected by async_futex */ |
| 1610 | palkovsky | 60 | /** Thread-local count of serialization. If >0, we must not preempt */ |
| 1614 | palkovsky | 61 | static __thread int serialization_count; |
| 2482 | jermar | 62 | /** Counter for fibrils residing in async_manager */ |
| 63 | static int fibrils_in_manager; |
||
| 1392 | palkovsky | 64 | |
| 2482 | jermar | 65 | /** Setup fibril information into TCB structure */ |
| 66 | fibril_t *fibril_setup(void) |
||
| 1129 | palkovsky | 67 | { |
| 2482 | jermar | 68 | fibril_t *f; |
| 1392 | palkovsky | 69 | tcb_t *tcb; |
| 1129 | palkovsky | 70 | |
| 1392 | palkovsky | 71 | tcb = __make_tls(); |
| 72 | if (!tcb) |
||
| 73 | return NULL; |
||
| 74 | |||
| 2482 | jermar | 75 | f = malloc(sizeof(*f)); |
| 76 | if (!f) { |
||
| 1392 | palkovsky | 77 | __free_tls(tcb); |
| 1129 | palkovsky | 78 | return NULL; |
| 79 | } |
||
| 80 | |||
| 2482 | jermar | 81 | tcb->fibril_data = f; |
| 82 | f->tcb = tcb; |
||
| 1129 | palkovsky | 83 | |
| 2482 | jermar | 84 | return f; |
| 1129 | palkovsky | 85 | } |
| 86 | |||
| 2482 | jermar | 87 | void fibril_teardown(fibril_t *f) |
| 1129 | palkovsky | 88 | { |
| 2482 | jermar | 89 | __free_tls(f->tcb); |
| 90 | free(f); |
||
| 1129 | palkovsky | 91 | } |
| 92 | |||
| 2482 | jermar | 93 | /** Function that spans the whole life-cycle of a fibril. |
| 2481 | jermar | 94 | * |
| 2482 | jermar | 95 | * Each fibril begins execution in this function. Then the function |
| 96 | * implementing the fibril logic is called. After its return, the return value |
||
| 97 | * is saved for a potentional joiner. If the joiner exists, it is woken up. The |
||
| 98 | * fibril then switches to another fibril, which cleans up after it. |
||
| 2481 | jermar | 99 | */ |
| 2482 | jermar | 100 | void fibril_main(void) |
| 1113 | palkovsky | 101 | { |
| 2482 | jermar | 102 | fibril_t *f = __tcb_get()->fibril_data; |
| 1129 | palkovsky | 103 | |
| 2482 | jermar | 104 | f->retval = f->func(f->arg); |
| 1113 | palkovsky | 105 | |
| 2481 | jermar | 106 | /* |
| 107 | * If there is a joiner, wake it up and save our return value. |
||
| 108 | */ |
||
| 2482 | jermar | 109 | if (f->joiner) { |
| 110 | list_append(&f->joiner->link, &ready_list); |
||
| 111 | f->joiner->joinee_retval = f->retval; |
||
| 2481 | jermar | 112 | } |
| 1113 | palkovsky | 113 | |
| 2482 | jermar | 114 | fibril_schedule_next_adv(FIBRIL_FROM_DEAD); |
| 2481 | jermar | 115 | /* not reached */ |
| 1113 | palkovsky | 116 | } |
| 117 | |||
| 2482 | jermar | 118 | /** Schedule next fibril. |
| 1128 | jermar | 119 | * |
| 2482 | jermar | 120 | * If calling with FIBRIL_TO_MANAGER parameter, the async_futex should be |
| 1427 | palkovsky | 121 | * held. |
| 122 | * |
||
| 2482 | jermar | 123 | * @param stype One of FIBRIL_SLEEP, FIBRIL_PREEMPT, FIBRIL_TO_MANAGER, |
| 124 | * FIBRIL_FROM_MANAGER, FIBRIL_FROM_DEAD. The parameter |
||
| 125 | * describes the circumstances of the switch. |
||
| 126 | * @return Return 0 if there is no ready fibril, |
||
| 2481 | jermar | 127 | * return 1 otherwise. |
| 1128 | jermar | 128 | */ |
| 2482 | jermar | 129 | int fibril_schedule_next_adv(fibril_switch_type_t stype) |
| 1113 | palkovsky | 130 | { |
| 2482 | jermar | 131 | fibril_t *srcf, *dstf; |
| 1392 | palkovsky | 132 | int retval = 0; |
| 133 | |||
| 2482 | jermar | 134 | futex_down(&fibril_futex); |
| 1113 | palkovsky | 135 | |
| 2482 | jermar | 136 | if (stype == FIBRIL_PREEMPT && list_empty(&ready_list)) |
| 1392 | palkovsky | 137 | goto ret_0; |
| 2482 | jermar | 138 | if (stype == FIBRIL_SLEEP) { |
| 2481 | jermar | 139 | if (list_empty(&ready_list) && list_empty(&serialized_list)) |
| 140 | goto ret_0; |
||
| 141 | } |
||
| 1113 | palkovsky | 142 | |
| 2482 | jermar | 143 | if (stype == FIBRIL_FROM_MANAGER) { |
| 1610 | palkovsky | 144 | if (list_empty(&ready_list) && list_empty(&serialized_list)) |
| 145 | goto ret_0; |
||
| 2481 | jermar | 146 | /* |
| 147 | * Do not preempt if there is not sufficient count of thread |
||
| 148 | * managers. |
||
| 149 | */ |
||
| 2482 | jermar | 150 | if (list_empty(&serialized_list) && fibrils_in_manager <= |
| 2481 | jermar | 151 | serialized_threads) { |
| 1610 | palkovsky | 152 | goto ret_0; |
| 153 | } |
||
| 1392 | palkovsky | 154 | } |
| 1407 | palkovsky | 155 | /* If we are going to manager and none exists, create it */ |
| 2482 | jermar | 156 | if (stype == FIBRIL_TO_MANAGER || stype == FIBRIL_FROM_DEAD) { |
| 1610 | palkovsky | 157 | while (list_empty(&manager_list)) { |
| 2482 | jermar | 158 | futex_up(&fibril_futex); |
| 1610 | palkovsky | 159 | async_create_manager(); |
| 2482 | jermar | 160 | futex_down(&fibril_futex); |
| 1610 | palkovsky | 161 | } |
| 1427 | palkovsky | 162 | } |
| 1610 | palkovsky | 163 | |
| 2482 | jermar | 164 | srcf = __tcb_get()->fibril_data; |
| 165 | if (stype != FIBRIL_FROM_DEAD) { |
||
| 1610 | palkovsky | 166 | /* Save current state */ |
| 2482 | jermar | 167 | if (!context_save(&srcf->ctx)) { |
| 1610 | palkovsky | 168 | if (serialization_count) |
| 2482 | jermar | 169 | srcf->flags &= ~FIBRIL_SERIALIZED; |
| 170 | if (srcf->clean_after_me) { |
||
| 2481 | jermar | 171 | /* |
| 2482 | jermar | 172 | * Cleanup after the dead fibril from which we |
| 173 | * restored context here. |
||
| 2481 | jermar | 174 | */ |
| 2482 | jermar | 175 | free(srcf->clean_after_me->stack); |
| 176 | fibril_teardown(srcf->clean_after_me); |
||
| 177 | srcf->clean_after_me = NULL; |
||
| 2481 | jermar | 178 | } |
| 179 | return 1; /* futex_up already done here */ |
||
| 1610 | palkovsky | 180 | } |
| 1392 | palkovsky | 181 | |
| 2481 | jermar | 182 | /* Save myself to the correct run list */ |
| 2482 | jermar | 183 | if (stype == FIBRIL_PREEMPT) |
| 184 | list_append(&srcf->link, &ready_list); |
||
| 185 | else if (stype == FIBRIL_FROM_MANAGER) { |
||
| 186 | list_append(&srcf->link, &manager_list); |
||
| 187 | fibrils_in_manager--; |
||
| 2481 | jermar | 188 | } else { |
| 189 | /* |
||
| 2482 | jermar | 190 | * If stype == FIBRIL_TO_MANAGER, don't put ourselves to |
| 2481 | jermar | 191 | * any list, we should already be somewhere, or we will |
| 192 | * be lost. |
||
| 193 | * |
||
| 2482 | jermar | 194 | * The stype == FIBRIL_SLEEP case is similar. The fibril |
| 195 | * has an external refernce which can be used to wake it |
||
| 196 | * up once that time has come. |
||
| 2481 | jermar | 197 | */ |
| 198 | } |
||
| 199 | } |
||
| 1392 | palkovsky | 200 | |
| 2482 | jermar | 201 | /* Choose a new fibril to run */ |
| 202 | if (stype == FIBRIL_TO_MANAGER || stype == FIBRIL_FROM_DEAD) { |
||
| 203 | dstf = list_get_instance(manager_list.next, fibril_t, link); |
||
| 204 | if (serialization_count && stype == FIBRIL_TO_MANAGER) { |
||
| 1610 | palkovsky | 205 | serialized_threads++; |
| 2482 | jermar | 206 | srcf->flags |= FIBRIL_SERIALIZED; |
| 1610 | palkovsky | 207 | } |
| 2482 | jermar | 208 | fibrils_in_manager++; |
| 2481 | jermar | 209 | |
| 2482 | jermar | 210 | if (stype == FIBRIL_FROM_DEAD) |
| 211 | dstf->clean_after_me = srcf; |
||
| 1610 | palkovsky | 212 | } else { |
| 213 | if (!list_empty(&serialized_list)) { |
||
| 2482 | jermar | 214 | dstf = list_get_instance(serialized_list.next, fibril_t, |
| 215 | link); |
||
| 1610 | palkovsky | 216 | serialized_threads--; |
| 2481 | jermar | 217 | } else { |
| 2482 | jermar | 218 | dstf = list_get_instance(ready_list.next, fibril_t, |
| 219 | link); |
||
| 2481 | jermar | 220 | } |
| 1610 | palkovsky | 221 | } |
| 2482 | jermar | 222 | list_remove(&dstf->link); |
| 1113 | palkovsky | 223 | |
| 2482 | jermar | 224 | futex_up(&fibril_futex); |
| 225 | context_restore(&dstf->ctx); |
||
| 2481 | jermar | 226 | /* not reached */ |
| 1392 | palkovsky | 227 | |
| 228 | ret_0: |
||
| 2482 | jermar | 229 | futex_up(&fibril_futex); |
| 1392 | palkovsky | 230 | return retval; |
| 1113 | palkovsky | 231 | } |
| 232 | |||
| 2482 | jermar | 233 | /** Wait for fibril to finish. |
| 1128 | jermar | 234 | * |
| 2482 | jermar | 235 | * Each fibril can be only joined by one other fibril. Moreover, the joiner must |
| 236 | * be from the same thread as the joinee. |
||
| 1128 | jermar | 237 | * |
| 2482 | jermar | 238 | * @param fid Fibril to join. |
| 2481 | jermar | 239 | * |
| 2482 | jermar | 240 | * @return Value returned by the completed fibril. |
| 1128 | jermar | 241 | */ |
| 2482 | jermar | 242 | int fibril_join(fid_t fid) |
| 1113 | palkovsky | 243 | { |
| 2482 | jermar | 244 | fibril_t *f; |
| 245 | fibril_t *cur; |
||
| 1113 | palkovsky | 246 | |
| 2482 | jermar | 247 | /* Handle fid = Kernel address -> it is wait for call */ |
| 248 | f = (fibril_t *) fid; |
||
| 1113 | palkovsky | 249 | |
| 2481 | jermar | 250 | /* |
| 251 | * The joiner is running so the joinee isn't. |
||
| 252 | */ |
||
| 2482 | jermar | 253 | cur = __tcb_get()->fibril_data; |
| 254 | f->joiner = cur; |
||
| 255 | fibril_schedule_next_adv(FIBRIL_SLEEP); |
||
| 1610 | palkovsky | 256 | |
| 2481 | jermar | 257 | /* |
| 258 | * The joinee fills in the return value. |
||
| 259 | */ |
||
| 260 | return cur->joinee_retval; |
||
| 1113 | palkovsky | 261 | } |
| 262 | |||
| 2482 | jermar | 263 | /** Create a new fibril. |
| 1113 | palkovsky | 264 | * |
| 2482 | jermar | 265 | * @param func Implementing function of the new fibril. |
| 2481 | jermar | 266 | * @param arg Argument to pass to func. |
| 1128 | jermar | 267 | * |
| 2482 | jermar | 268 | * @return Return 0 on failure or TLS of the new fibril. |
| 1113 | palkovsky | 269 | */ |
| 2482 | jermar | 270 | fid_t fibril_create(int (*func)(void *), void *arg) |
| 1113 | palkovsky | 271 | { |
| 2482 | jermar | 272 | fibril_t *f; |
| 1113 | palkovsky | 273 | |
| 2482 | jermar | 274 | f = fibril_setup(); |
| 275 | if (!f) |
||
| 1129 | palkovsky | 276 | return 0; |
| 2482 | jermar | 277 | f->stack = (char *) malloc(FIBRIL_INITIAL_STACK_PAGES_NO * |
| 2481 | jermar | 278 | getpagesize()); |
| 1113 | palkovsky | 279 | |
| 2482 | jermar | 280 | if (!f->stack) { |
| 281 | fibril_teardown(f); |
||
| 1113 | palkovsky | 282 | return 0; |
| 283 | } |
||
| 284 | |||
| 2482 | jermar | 285 | f->arg = arg; |
| 286 | f->func = func; |
||
| 287 | f->clean_after_me = NULL; |
||
| 288 | f->joiner = NULL; |
||
| 289 | f->joinee_retval = 0; |
||
| 290 | f->retval = 0; |
||
| 291 | f->flags = 0; |
||
| 1113 | palkovsky | 292 | |
| 2482 | jermar | 293 | context_save(&f->ctx); |
| 294 | context_set(&f->ctx, FADDR(fibril_main), f->stack, |
||
| 295 | FIBRIL_INITIAL_STACK_PAGES_NO * getpagesize(), f->tcb); |
||
| 1113 | palkovsky | 296 | |
| 2482 | jermar | 297 | return (fid_t) f; |
| 1392 | palkovsky | 298 | } |
| 299 | |||
| 2482 | jermar | 300 | /** Add a fibril to the ready list. |
| 2481 | jermar | 301 | * |
| 2482 | jermar | 302 | * @param fid Pinter to the fibril structure of the fibril to be added. |
| 2481 | jermar | 303 | */ |
| 2482 | jermar | 304 | void fibril_add_ready(fid_t fid) |
| 1392 | palkovsky | 305 | { |
| 2482 | jermar | 306 | fibril_t *f; |
| 1392 | palkovsky | 307 | |
| 2482 | jermar | 308 | f = (fibril_t *) fid; |
| 309 | futex_down(&fibril_futex); |
||
| 310 | if ((f->flags & FIBRIL_SERIALIZED)) |
||
| 311 | list_append(&f->link, &serialized_list); |
||
| 1610 | palkovsky | 312 | else |
| 2482 | jermar | 313 | list_append(&f->link, &ready_list); |
| 314 | futex_up(&fibril_futex); |
||
| 1392 | palkovsky | 315 | } |
| 1113 | palkovsky | 316 | |
| 2482 | jermar | 317 | /** Add a fibril to the manager list. |
| 2481 | jermar | 318 | * |
| 2482 | jermar | 319 | * @param fid Pinter to the fibril structure of the fibril to be added. |
| 2481 | jermar | 320 | */ |
| 2482 | jermar | 321 | void fibril_add_manager(fid_t fid) |
| 1392 | palkovsky | 322 | { |
| 2482 | jermar | 323 | fibril_t *f; |
| 1392 | palkovsky | 324 | |
| 2482 | jermar | 325 | f = (fibril_t *) fid; |
| 1392 | palkovsky | 326 | |
| 2482 | jermar | 327 | futex_down(&fibril_futex); |
| 328 | list_append(&f->link, &manager_list); |
||
| 329 | futex_up(&fibril_futex); |
||
| 1113 | palkovsky | 330 | } |
| 1392 | palkovsky | 331 | |
| 2482 | jermar | 332 | /** Remove one manager from the manager list. */ |
| 333 | void fibril_remove_manager(void) |
||
| 1392 | palkovsky | 334 | { |
| 2482 | jermar | 335 | futex_down(&fibril_futex); |
| 1392 | palkovsky | 336 | if (list_empty(&manager_list)) { |
| 2482 | jermar | 337 | futex_up(&fibril_futex); |
| 1392 | palkovsky | 338 | return; |
| 339 | } |
||
| 340 | list_remove(manager_list.next); |
||
| 2482 | jermar | 341 | futex_up(&fibril_futex); |
| 1392 | palkovsky | 342 | } |
| 1427 | palkovsky | 343 | |
| 2482 | jermar | 344 | /** Return fibril id of the currently running fibril. |
| 2481 | jermar | 345 | * |
| 2482 | jermar | 346 | * @return Fibril ID of the currently running pseudo thread. |
| 2481 | jermar | 347 | */ |
| 2482 | jermar | 348 | fid_t fibril_get_id(void) |
| 1427 | palkovsky | 349 | { |
| 2482 | jermar | 350 | return (fid_t) __tcb_get()->fibril_data; |
| 1427 | palkovsky | 351 | } |
| 1610 | palkovsky | 352 | |
| 353 | /** Disable preemption |
||
| 354 | * |
||
| 2482 | jermar | 355 | * If the fibril wants to send several message in a row and does not want to be |
| 2481 | jermar | 356 | * preempted, it should start async_serialize_start() in the beginning of |
| 357 | * communication and async_serialize_end() in the end. If it is a true |
||
| 358 | * multithreaded application, it should protect the communication channel by a |
||
| 359 | * futex as well. Interrupt messages can still be preempted. |
||
| 1610 | palkovsky | 360 | */ |
| 2482 | jermar | 361 | void fibril_inc_sercount(void) |
| 1610 | palkovsky | 362 | { |
| 363 | serialization_count++; |
||
| 364 | } |
||
| 365 | |||
| 2481 | jermar | 366 | /** Restore the preemption counter to the previous state. */ |
| 2482 | jermar | 367 | void fibril_dec_sercount(void) |
| 1610 | palkovsky | 368 | { |
| 369 | serialization_count--; |
||
| 370 | } |
||
| 1653 | cejka | 371 | |
| 1719 | decky | 372 | /** @} |
| 1653 | cejka | 373 | */ |
| 2481 | jermar | 374 |