Subversion Repositories HelenOS

Rev

Rev 2307 | Rev 2421 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup time
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   High-level clock interrupt handler.
  36.  *
  37.  * This file contains the clock() function which is the source
  38.  * of preemption. It is also responsible for executing expired
  39.  * timeouts.
  40.  */
  41.  
  42. #include <time/clock.h>
  43. #include <time/timeout.h>
  44. #include <config.h>
  45. #include <synch/spinlock.h>
  46. #include <synch/waitq.h>
  47. #include <func.h>
  48. #include <proc/scheduler.h>
  49. #include <cpu.h>
  50. #include <arch.h>
  51. #include <adt/list.h>
  52. #include <atomic.h>
  53. #include <proc/thread.h>
  54. #include <sysinfo/sysinfo.h>
  55. #include <arch/barrier.h>
  56. #include <mm/frame.h>
  57. #include <ddi/ddi.h>
  58.  
  59. /* Pointer to variable with uptime */
  60. uptime_t *uptime;
  61.  
  62. /** Physical memory area of the real time clock */
  63. static parea_t clock_parea;
  64.  
  65. /* Variable holding fragment of second, so that we would update
  66.  * seconds correctly
  67.  */
  68. static unative_t secfrag = 0;
  69.  
  70. /** Initialize realtime clock counter
  71.  *
  72.  * The applications (and sometimes kernel) need to access accurate
  73.  * information about realtime data. We allocate 1 page with these
  74.  * data and update it periodically.
  75.  */
  76. void clock_counter_init(void)
  77. {
  78.     void *faddr;
  79.  
  80.     faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
  81.     if (!faddr)
  82.         panic("Cannot allocate page for clock");
  83.    
  84.     uptime = (uptime_t *) PA2KA(faddr);
  85.    
  86.     uptime->seconds1 = 0;
  87.     uptime->seconds2 = 0;
  88.     uptime->useconds = 0;
  89.  
  90.     clock_parea.pbase = (uintptr_t) faddr;
  91.     clock_parea.vbase = (uintptr_t) uptime;
  92.     clock_parea.frames = 1;
  93.     clock_parea.cacheable = true;
  94.     ddi_parea_register(&clock_parea);
  95.  
  96.     /*
  97.      * Prepare information for the userspace so that it can successfully
  98.      * physmem_map() the clock_parea.
  99.      */
  100.     sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
  101.     sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
  102. }
  103.  
  104.  
  105. /** Update public counters
  106.  *
  107.  * Update it only on first processor
  108.  * TODO: Do we really need so many write barriers?
  109.  */
  110. static void clock_update_counters(void)
  111. {
  112.     if (CPU->id == 0) {
  113.         secfrag += 1000000 / HZ;
  114.         if (secfrag >= 1000000) {
  115.             secfrag -= 1000000;
  116.             uptime->seconds1++;
  117.             write_barrier();
  118.             uptime->useconds = secfrag;
  119.             write_barrier();
  120.             uptime->seconds2 = uptime->seconds1;
  121.         } else
  122.             uptime->useconds += 1000000 / HZ;
  123.     }
  124. }
  125.  
  126. #ifdef CONFIG_TIMEOUT_EXTAVL_TREE
  127.  
  128. /** Clock routine
  129.  *
  130.  * Clock routine executed from clock interrupt handler
  131.  * (assuming interrupts_disable()'d). Runs expired timeouts
  132.  * and preemptive scheduling.
  133.  *
  134.  */
  135. void clock(void)
  136. {
  137.     timeout_t *h;
  138.     timeout_handler_t f;
  139.     void *arg;
  140.     count_t missed_clock_ticks = CPU->missed_clock_ticks;
  141.     uint64_t *i = &(CPU->timeout_active_tree.basetime);
  142.     uint64_t absolute_clock_ticks = *i +
  143.                                     missed_clock_ticks;
  144.     extavltree_node_t *head = &(CPU->timeout_active_tree.head);
  145.     extavltree_node_t *expnode = head->next;
  146.    
  147.     /*
  148.      * To avoid lock ordering problems,
  149.      * run all expired timeouts as you visit them.
  150.      */
  151.  
  152.     for (; *i <= absolute_clock_ticks; (*i)++) {
  153.         clock_update_counters();
  154.         spinlock_lock(&CPU->timeoutlock);
  155.  
  156.         while ((expnode = head->next) != head) {
  157.             h = extavltree_get_instance(expnode,timeout_t,node);
  158.             spinlock_lock(&h->lock);
  159.             if (expnode->key != *i) {
  160.                 spinlock_unlock(&h->lock);
  161.                 break;
  162.             }
  163.            
  164.             extavltree_delete_min(&CPU->timeout_active_tree);
  165.  
  166.             f = h->handler;
  167.             arg = h->arg;
  168.             timeout_reinitialize(h);
  169.             spinlock_unlock(&h->lock); 
  170.             spinlock_unlock(&CPU->timeoutlock);
  171.  
  172.             f(arg);
  173.  
  174.             spinlock_lock(&CPU->timeoutlock);
  175.         }
  176.         spinlock_unlock(&CPU->timeoutlock);
  177.     }
  178.  
  179.     CPU->missed_clock_ticks = 0;
  180.  
  181.     /*
  182.      * Do CPU usage accounting and find out whether to preempt THREAD.
  183.      */
  184.     if (THREAD) {
  185.         uint64_t ticks;
  186.        
  187.         spinlock_lock(&CPU->lock);
  188.         CPU->needs_relink += 1 + missed_clock_ticks;
  189.         spinlock_unlock(&CPU->lock);   
  190.    
  191.         spinlock_lock(&THREAD->lock);
  192.         if ((ticks = THREAD->ticks)) {
  193.             if (ticks >= 1 + missed_clock_ticks)
  194.                 THREAD->ticks -= 1 + missed_clock_ticks;
  195.             else
  196.                 THREAD->ticks = 0;
  197.         }
  198.         spinlock_unlock(&THREAD->lock);
  199.        
  200.         if (!ticks && !PREEMPTION_DISABLED) {
  201.             scheduler();
  202.         }
  203.     }
  204. }
  205.  
  206.  
  207. #else
  208.  
  209.  
  210. /** Clock routine
  211.  *
  212.  * Clock routine executed from clock interrupt handler
  213.  * (assuming interrupts_disable()'d). Runs expired timeouts
  214.  * and preemptive scheduling.
  215.  *
  216.  */
  217. void clock(void)
  218. {
  219.     link_t *l;
  220.     timeout_t *h;
  221.     timeout_handler_t f;
  222.     void *arg;
  223.     count_t missed_clock_ticks = CPU->missed_clock_ticks;
  224.     int i;
  225.  
  226.     /*
  227.      * To avoid lock ordering problems,
  228.      * run all expired timeouts as you visit them.
  229.      */
  230.     for (i = 0; i <= missed_clock_ticks; i++) {
  231.         clock_update_counters();
  232.         spinlock_lock(&CPU->timeoutlock);
  233.         while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) {
  234.             h = list_get_instance(l, timeout_t, link);
  235.             spinlock_lock(&h->lock);
  236.             if (h->ticks-- != 0) {
  237.                 spinlock_unlock(&h->lock);
  238.                 break;
  239.             }
  240.             list_remove(l);
  241.             f = h->handler;
  242.             arg = h->arg;
  243.             timeout_reinitialize(h);
  244.             spinlock_unlock(&h->lock); 
  245.             spinlock_unlock(&CPU->timeoutlock);
  246.  
  247.             f(arg);
  248.  
  249.             spinlock_lock(&CPU->timeoutlock);
  250.         }
  251.         spinlock_unlock(&CPU->timeoutlock);
  252.     }
  253.     CPU->missed_clock_ticks = 0;
  254.  
  255.     /*
  256.      * Do CPU usage accounting and find out whether to preempt THREAD.
  257.      */
  258.  
  259.     if (THREAD) {
  260.         uint64_t ticks;
  261.        
  262.         spinlock_lock(&CPU->lock);
  263.         CPU->needs_relink += 1 + missed_clock_ticks;
  264.         spinlock_unlock(&CPU->lock);   
  265.    
  266.         spinlock_lock(&THREAD->lock);
  267.         if ((ticks = THREAD->ticks)) {
  268.             if (ticks >= 1 + missed_clock_ticks)
  269.                 THREAD->ticks -= 1 + missed_clock_ticks;
  270.             else
  271.                 THREAD->ticks = 0;
  272.         }
  273.         spinlock_unlock(&THREAD->lock);
  274.        
  275.         if (!ticks && !PREEMPTION_DISABLED) {
  276.             scheduler();
  277.         }
  278.     }
  279.  
  280. }
  281.  
  282. #endif
  283. /** @}
  284.  */
  285.