Rev 2421 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 2421 | Rev 2450 | ||
|---|---|---|---|
| Line 1... | Line 1... | ||
| 1 | /* |
1 | /* |
| 2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
| - | 3 | * Copyright (C) 2007 Vojtech Mencl |
|
| 3 | * All rights reserved. |
4 | * All rights reserved. |
| 4 | * |
5 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
6 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
7 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
8 | * are met: |
| Line 53... | Line 54... | ||
| 53 | #include <proc/thread.h> |
54 | #include <proc/thread.h> |
| 54 | #include <sysinfo/sysinfo.h> |
55 | #include <sysinfo/sysinfo.h> |
| 55 | #include <arch/barrier.h> |
56 | #include <arch/barrier.h> |
| 56 | #include <mm/frame.h> |
57 | #include <mm/frame.h> |
| 57 | #include <ddi/ddi.h> |
58 | #include <ddi/ddi.h> |
| - | 59 | #if defined CONFIG_TIMEOUT_AVL_TREE || defined CONFIG_TIMEOUT_EXTAVL_TREE |
|
| - | 60 | #include <arch/asm.h> |
|
| - | 61 | #include <arch/types.h> |
|
| - | 62 | #include <panic.h> |
|
| 58 | 63 | #endif |
|
| 59 | /* Pointer to variable with uptime */ |
64 | /* Pointer to variable with uptime */ |
| 60 | uptime_t *uptime; |
65 | uptime_t *uptime; |
| 61 | 66 | ||
| 62 | /** Physical memory area of the real time clock */ |
67 | /** Physical memory area of the real time clock */ |
| 63 | static parea_t clock_parea; |
68 | static parea_t clock_parea; |
| Line 136... | Line 141... | ||
| 136 | { |
141 | { |
| 137 | timeout_t *h; |
142 | timeout_t *h; |
| 138 | timeout_handler_t f; |
143 | timeout_handler_t f; |
| 139 | void *arg; |
144 | void *arg; |
| 140 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
145 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
| 141 | uint64_t *i = &(CPU->timeout_active_tree.base); |
146 | uint64_t i = CPU->timeout_active_tree.base; |
| 142 | uint64_t absolute_clock_ticks = *i + missed_clock_ticks; |
147 | uint64_t last_clock_tick = i + missed_clock_ticks; |
| 143 | avltree_node_t *expnode; |
148 | avltree_node_t *expnode; |
| 144 | 149 | ||
| 145 | /* |
150 | /* |
| 146 | * To avoid lock ordering problems, |
151 | * To avoid lock ordering problems, |
| 147 | * run all expired timeouts as you visit them. |
152 | * run all expired timeouts as you visit them. |
| 148 | */ |
153 | */ |
| 149 | 154 | ||
| 150 | for (; *i <= absolute_clock_ticks; (*i)++) { |
- | |
| 151 | /* |
155 | |
| 152 | * Basetime is encreased by missed clock ticks + 1 !! |
156 | for (; i <= last_clock_tick; i++) { |
| 153 | */ |
- | |
| 154 | - | ||
| 155 | clock_update_counters(); |
157 | clock_update_counters(); |
| 156 | spinlock_lock(&CPU->timeoutlock); |
158 | spinlock_lock(&CPU->timeoutlock); |
| 157 | 159 | ||
| 158 | - | ||
| 159 | /* |
160 | /* |
| 160 | * Check whether first timeout (with the smallest key in the tree) time out. If so perform |
161 | * Check whether first timeout (with the smallest key in the tree) time out. If so perform |
| 161 | * callback function and try next timeout (more timeouts can have same timeout). |
162 | * callback function and try next timeout (more timeouts can have same timeout). |
| 162 | */ |
163 | */ |
| 163 | while ((expnode = avltree_find_min(&CPU->timeout_active_tree)) != NULL) { |
164 | while ((expnode = avltree_find_min(&CPU->timeout_active_tree)) != NULL) { |
| 164 | h = avltree_get_instance(expnode,timeout_t,node); |
165 | h = avltree_get_instance(expnode,timeout_t,node); |
| 165 | spinlock_lock(&h->lock); |
166 | spinlock_lock(&h->lock); |
| 166 | if (expnode->key != *i) { |
167 | if (expnode->key != i) { |
| - | 168 | /* |
|
| - | 169 | * Base is increased every for cycle. |
|
| - | 170 | */ |
|
| - | 171 | (CPU->timeout_active_tree.base)++; |
|
| 167 | spinlock_unlock(&h->lock); |
172 | spinlock_unlock(&h->lock); |
| 168 | break; |
173 | break; |
| 169 | } |
174 | } |
| 170 | 175 | ||
| 171 | /* |
176 | /* |
| Line 227... | Line 232... | ||
| 227 | { |
232 | { |
| 228 | timeout_t *h; |
233 | timeout_t *h; |
| 229 | timeout_handler_t f; |
234 | timeout_handler_t f; |
| 230 | void *arg; |
235 | void *arg; |
| 231 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
236 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
| 232 | uint64_t *i = &(CPU->timeout_active_tree.base); |
237 | uint64_t i = CPU->timeout_active_tree.base; |
| 233 | uint64_t absolute_clock_ticks = *i + missed_clock_ticks; |
238 | uint64_t last_clock_tick = i + missed_clock_ticks; |
| 234 | extavltree_node_t *expnode; |
239 | extavltree_node_t *expnode; |
| - | 240 | //ipl_t ipl; |
|
| 235 | 241 | ||
| 236 | /* |
242 | /* |
| 237 | * To avoid lock ordering problems, |
243 | * To avoid lock ordering problems, |
| 238 | * run all expired timeouts as you visit them. |
244 | * run all expired timeouts as you visit them. |
| 239 | */ |
245 | */ |
| 240 | 246 | ||
| 241 | for (; *i <= absolute_clock_ticks; (*i)++) { |
247 | for (; i <= last_clock_tick; i++) { |
| 242 | /* |
- | |
| 243 | * Basetime is encreased by missed clock ticks + 1 !! |
- | |
| 244 | */ |
- | |
| 245 | - | ||
| 246 | clock_update_counters(); |
248 | clock_update_counters(); |
| 247 | spinlock_lock(&CPU->timeoutlock); |
249 | spinlock_lock(&CPU->timeoutlock); |
| 248 | 250 | ||
| 249 | /* |
251 | /* |
| 250 | * Check whether first timeout in list time out. If so perform callback function and try |
252 | * Check whether first timeout in list time out. If so perform callback function and try |
| 251 | * next timeout (more timeouts can have same timeout). |
253 | * next timeout (more timeouts can have same timeout). |
| 252 | */ |
254 | */ |
| 253 | while ((expnode = CPU->timeout_active_tree.head.next) != &(CPU->timeout_active_tree.head)) { |
255 | while ((expnode = CPU->timeout_active_tree.head.next) != &(CPU->timeout_active_tree.head)) { |
| 254 | h = extavltree_get_instance(expnode,timeout_t,node); |
256 | h = extavltree_get_instance(expnode,timeout_t,node); |
| 255 | spinlock_lock(&h->lock); |
257 | spinlock_lock(&h->lock); |
| 256 | if (expnode->key != *i) { |
258 | if (expnode->key != i) { |
| - | 259 | /* |
|
| - | 260 | * Base is increased every for cycle. |
|
| - | 261 | */ |
|
| - | 262 | (CPU->timeout_active_tree.base)++; |
|
| 257 | spinlock_unlock(&h->lock); |
263 | spinlock_unlock(&h->lock); |
| 258 | break; |
264 | break; |
| 259 | } |
265 | } |
| 260 | 266 | ||
| 261 | /* |
267 | /* |