Subversion Repositories HelenOS-historic

Rev

Rev 1024 | Rev 1104 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #ifndef __ia32_ATOMIC_H__
  30. #define __ia32_ATOMIC_H__
  31.  
  32. #include <arch/types.h>
  33. #include <arch/barrier.h>
  34. #include <preemption.h>
  35.  
  36. typedef struct { volatile __u32 count; } atomic_t;
  37.  
  38. static inline void atomic_set(atomic_t *val, __u32 i)
  39. {
  40.     val->count = i;
  41. }
  42.  
  43. static inline __u32 atomic_get(atomic_t *val)
  44. {
  45.     return val->count;
  46. }
  47.  
  48. static inline void atomic_inc(atomic_t *val) {
  49. #ifdef CONFIG_SMP
  50.     __asm__ volatile ("lock incl %0\n" : "=m" (val->count));
  51. #else
  52.     __asm__ volatile ("incl %0\n" : "=m" (val->count));
  53. #endif /* CONFIG_SMP */
  54. }
  55.  
  56. static inline void atomic_dec(atomic_t *val) {
  57. #ifdef CONFIG_SMP
  58.     __asm__ volatile ("lock decl %0\n" : "=m" (val->count));
  59. #else
  60.     __asm__ volatile ("decl %0\n" : "=m" (val->count));
  61. #endif /* CONFIG_SMP */
  62. }
  63.  
  64. static inline count_t atomic_postinc(atomic_t *val)
  65. {
  66.     count_t r;
  67.  
  68.     __asm__ volatile (
  69.         "movl $1, %0\n"
  70.         "lock xaddl %0, %1\n"
  71.         : "=r" (r), "=m" (val->count)
  72.     );
  73.  
  74.     return r;
  75. }
  76.  
  77. static inline count_t atomic_postdec(atomic_t *val)
  78. {
  79.     count_t r;
  80.    
  81.     __asm__ volatile (
  82.         "movl $-1, %0\n"
  83.         "lock xaddl %0, %1\n"
  84.         : "=r" (r), "=m" (val->count)
  85.     );
  86.    
  87.     return r;
  88. }
  89.  
  90. #define atomic_preinc(val) (atomic_postinc(val)+1)
  91. #define atomic_predec(val) (atomic_postdec(val)-1)
  92.  
  93. static inline __u32 test_and_set(atomic_t *val) {
  94.     __u32 v;
  95.    
  96.     __asm__ volatile (
  97.         "movl $1, %0\n"
  98.         "xchgl %0, %1\n"
  99.         : "=r" (v),"=m" (val->count)
  100.     );
  101.    
  102.     return v;
  103. }
  104.  
  105. /** Ia32 specific fast spinlock */
  106. static inline void atomic_lock_arch(atomic_t *val)
  107. {
  108.     __u32 tmp;
  109.  
  110.     preemption_disable();
  111.     __asm__ volatile (
  112.         "0:;"
  113. #ifdef CONFIG_HT
  114.         "pause;" /* Pentium 4's HT love this instruction */
  115. #endif
  116.         "mov %0, %1;"
  117.         "testl %1, %1;"
  118.         "jnz 0b;"       /* Leightweight looping on locked spinlock */
  119.        
  120.         "incl %1;"      /* now use the atomic operation */
  121.         "xchgl %0, %1;"
  122.         "testl %1, %1;"
  123.         "jnz 0b;"
  124.                 : "=m"(val->count),"=r"(tmp)
  125.         );
  126.     /*
  127.      * Prevent critical section code from bleeding out this way up.
  128.      */
  129.     CS_ENTER_BARRIER();
  130. }
  131.  
  132. #endif
  133.