Subversion Repositories HelenOS

Rev

Rev 3403 | Rev 4346 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup ia32
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #ifndef KERN_ia32_ATOMIC_H_
  36. #define KERN_ia32_ATOMIC_H_
  37.  
  38. #include <arch/types.h>
  39. #include <arch/barrier.h>
  40. #include <preemption.h>
  41.  
  42. static inline void atomic_inc(atomic_t *val) {
  43. #ifdef CONFIG_SMP
  44.     asm volatile (
  45.         "lock incl %[count]\n"
  46.         : [count] "+m" (val->count)
  47.     );
  48. #else
  49.     asm volatile (
  50.         "incl %[count]\n"
  51.         : [count] "+m" (val->count)
  52.     );
  53. #endif /* CONFIG_SMP */
  54. }
  55.  
  56. static inline void atomic_dec(atomic_t *val) {
  57. #ifdef CONFIG_SMP
  58.     asm volatile (
  59.         "lock decl %[count]\n"
  60.         : [count] "+m" (val->count)
  61.     );
  62. #else
  63.     asm volatile (
  64.         "decl %[count]\n"
  65.         : "+m" (val->count)
  66.     );
  67. #endif /* CONFIG_SMP */
  68. }
  69.  
  70. static inline long atomic_postinc(atomic_t *val)
  71. {
  72.     long r = 1;
  73.    
  74.     asm volatile (
  75.         "lock xaddl %[r], %[count]\n"
  76.         : [count] "+m" (val->count), [r] "+r" (r)
  77.     );
  78.    
  79.     return r;
  80. }
  81.  
  82. static inline long atomic_postdec(atomic_t *val)
  83. {
  84.     long r = -1;
  85.    
  86.     asm volatile (
  87.         "lock xaddl %[r], %[count]\n"
  88.         : [count] "+m" (val->count), [r] "+r"(r)
  89.     );
  90.    
  91.     return r;
  92. }
  93.  
  94. #define atomic_preinc(val)  (atomic_postinc(val) + 1)
  95. #define atomic_predec(val)  (atomic_postdec(val) - 1)
  96.  
  97. static inline uint32_t test_and_set(atomic_t *val) {
  98.     uint32_t v;
  99.    
  100.     asm volatile (
  101.         "movl $1, %[v]\n"
  102.         "xchgl %[v], %[count]\n"
  103.         : [v] "=r" (v), [count] "+m" (val->count)
  104.     );
  105.    
  106.     return v;
  107. }
  108.  
  109. /** ia32 specific fast spinlock */
  110. static inline void atomic_lock_arch(atomic_t *val)
  111. {
  112.     uint32_t tmp;
  113.    
  114.     preemption_disable();
  115.     asm volatile (
  116.         "0:\n"
  117. #ifdef CONFIG_HT
  118.         "pause\n"        /* Pentium 4's HT love this instruction */
  119. #endif
  120.         "mov %[count], %[tmp]\n"
  121.         "testl %[tmp], %[tmp]\n"
  122.         "jnz 0b\n"       /* lightweight looping on locked spinlock */
  123.        
  124.         "incl %[tmp]\n"  /* now use the atomic operation */
  125.         "xchgl %[count], %[tmp]\n"
  126.         "testl %[tmp], %[tmp]\n"
  127.         "jnz 0b\n"
  128.         : [count] "+m" (val->count), [tmp] "=&r" (tmp)
  129.     );
  130.     /*
  131.      * Prevent critical section code from bleeding out this way up.
  132.      */
  133.     CS_ENTER_BARRIER();
  134. }
  135.  
  136. #endif
  137.  
  138. /** @}
  139.  */
  140.