Rev 1433 | Rev 1438 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1433 | Rev 1434 | ||
---|---|---|---|
Line 46... | Line 46... | ||
46 | #include <cpu.h> |
46 | #include <cpu.h> |
47 | #include <arch.h> |
47 | #include <arch.h> |
48 | #include <adt/list.h> |
48 | #include <adt/list.h> |
49 | #include <atomic.h> |
49 | #include <atomic.h> |
50 | #include <proc/thread.h> |
50 | #include <proc/thread.h> |
- | 51 | #include <sysinfo/sysinfo.h> |
|
- | 52 | #include <arch/barrier.h> |
|
- | 53 | ||
- | 54 | /* Pointers to public variables with time */ |
|
- | 55 | struct ptime { |
|
- | 56 | __native seconds; |
|
- | 57 | __native useconds; |
|
- | 58 | __native useconds2; |
|
- | 59 | }; |
|
- | 60 | struct ptime *public_time; |
|
- | 61 | /* Variable holding fragment of second, so that we would update |
|
- | 62 | * seconds correctly |
|
- | 63 | */ |
|
- | 64 | static __native secfrag = 0; |
|
- | 65 | ||
- | 66 | /** Initialize realtime clock counter |
|
- | 67 | * |
|
- | 68 | * The applications (and sometimes kernel) need to access accurate |
|
- | 69 | * information about realtime data. We allocate 1 page with these |
|
- | 70 | * data and update it periodically. |
|
- | 71 | * |
|
- | 72 | * |
|
- | 73 | */ |
|
- | 74 | void clock_counter_init(void) |
|
- | 75 | { |
|
- | 76 | void *faddr; |
|
- | 77 | ||
- | 78 | faddr = (void *)PFN2ADDR(frame_alloc(0, FRAME_ATOMIC)); |
|
- | 79 | if (!faddr) |
|
- | 80 | panic("Cannot allocate page for clock"); |
|
- | 81 | ||
- | 82 | public_time = (struct ptime *)PA2KA(faddr); |
|
- | 83 | ||
- | 84 | /* TODO: We would need some arch dependent settings here */ |
|
- | 85 | public_time->seconds = 0; |
|
- | 86 | public_time->useconds = 0; |
|
- | 87 | ||
- | 88 | sysinfo_set_item_val("clock.faddr", NULL, (__native)faddr); |
|
- | 89 | } |
|
- | 90 | ||
- | 91 | ||
- | 92 | /** Update public counters |
|
- | 93 | * |
|
- | 94 | * Update it only on first processor |
|
- | 95 | * TODO: Do we really need so many write barriers? |
|
- | 96 | */ |
|
- | 97 | static void clock_update_counters(void) |
|
- | 98 | { |
|
- | 99 | if (CPU->id == 0) { |
|
- | 100 | secfrag += 1000000/HZ; |
|
- | 101 | if (secfrag >= 1000000) { |
|
- | 102 | public_time->useconds = 0; |
|
- | 103 | write_barrier(); |
|
- | 104 | public_time->seconds++; |
|
- | 105 | secfrag = 0; |
|
- | 106 | } else |
|
- | 107 | public_time->useconds += 1000000/HZ; |
|
- | 108 | write_barrier(); |
|
- | 109 | public_time->useconds2 = public_time->useconds; |
|
- | 110 | write_barrier(); |
|
- | 111 | } |
|
- | 112 | } |
|
51 | 113 | ||
52 | /** Clock routine |
114 | /** Clock routine |
53 | * |
115 | * |
54 | * Clock routine executed from clock interrupt handler |
116 | * Clock routine executed from clock interrupt handler |
55 | * (assuming interrupts_disable()'d). Runs expired timeouts |
117 | * (assuming interrupts_disable()'d). Runs expired timeouts |
Line 67... | Line 129... | ||
67 | /* |
129 | /* |
68 | * To avoid lock ordering problems, |
130 | * To avoid lock ordering problems, |
69 | * run all expired timeouts as you visit them. |
131 | * run all expired timeouts as you visit them. |
70 | */ |
132 | */ |
71 | for (i = 0; i <= CPU->missed_clock_ticks; i++) { |
133 | for (i = 0; i <= CPU->missed_clock_ticks; i++) { |
- | 134 | clock_update_counters(); |
|
72 | spinlock_lock(&CPU->timeoutlock); |
135 | spinlock_lock(&CPU->timeoutlock); |
73 | while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
136 | while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
74 | h = list_get_instance(l, timeout_t, link); |
137 | h = list_get_instance(l, timeout_t, link); |
75 | spinlock_lock(&h->lock); |
138 | spinlock_lock(&h->lock); |
76 | if (h->ticks-- != 0) { |
139 | if (h->ticks-- != 0) { |