Subversion Repositories HelenOS

Rev

Rev 2089 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup sparc64
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #ifndef KERN_sparc64_ATOMIC_H_
  36. #define KERN_sparc64_ATOMIC_H_
  37.  
  38. #include <arch/barrier.h>
  39. #include <arch/types.h>
  40.  
  41. /** Atomic add operation.
  42.  *
  43.  * Use atomic compare and swap operation to atomically add signed value.
  44.  *
  45.  * @param val Atomic variable.
  46.  * @param i Signed value to be added.
  47.  *
  48.  * @return Value of the atomic variable as it existed before addition.
  49.  */
  50. static inline long atomic_add(atomic_t *val, int i)
  51. {
  52.     uint64_t a, b;
  53.  
  54.     do {
  55.         volatile uintptr_t x = (uint64_t) &val->count;
  56.  
  57.         a = *((uint64_t *) x);
  58.         b = a + i;
  59.         asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a));
  60.     } while (a != b);
  61.  
  62.     return a;
  63. }
  64.  
  65. static inline long atomic_preinc(atomic_t *val)
  66. {
  67.     return atomic_add(val, 1) + 1;
  68. }
  69.  
  70. static inline long atomic_postinc(atomic_t *val)
  71. {
  72.     return atomic_add(val, 1);
  73. }
  74.  
  75. static inline long atomic_predec(atomic_t *val)
  76. {
  77.     return atomic_add(val, -1) - 1;
  78. }
  79.  
  80. static inline long atomic_postdec(atomic_t *val)
  81. {
  82.     return atomic_add(val, -1);
  83. }
  84.  
  85. static inline void atomic_inc(atomic_t *val)
  86. {
  87.     (void) atomic_add(val, 1);
  88. }
  89.  
  90. static inline void atomic_dec(atomic_t *val)
  91. {
  92.     (void) atomic_add(val, -1);
  93. }
  94.  
  95. static inline long test_and_set(atomic_t *val)
  96. {
  97.     uint64_t v = 1;
  98.     volatile uintptr_t x = (uint64_t) &val->count;
  99.  
  100.     asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0));
  101.  
  102.     return v;
  103. }
  104.  
  105. static inline void atomic_lock_arch(atomic_t *val)
  106. {
  107.     uint64_t tmp1 = 1;
  108.     uint64_t tmp2 = 0;
  109.  
  110.     volatile uintptr_t x = (uint64_t) &val->count;
  111.  
  112.     asm volatile (
  113.     "0:\n"
  114.         "casx %0, %3, %1\n"
  115.         "brz %1, 2f\n"
  116.         "nop\n"
  117.     "1:\n"
  118.         "ldx %0, %2\n"
  119.         "brz %2, 0b\n"
  120.         "nop\n"
  121.         "ba 1b\n"
  122.         "nop\n"
  123.     "2:\n"
  124.         : "+m" (*((uint64_t *) x)), "+r" (tmp1), "+r" (tmp2) : "r" (0)
  125.     );
  126.    
  127.     /*
  128.      * Prevent critical section code from bleeding out this way up.
  129.      */
  130.     CS_ENTER_BARRIER();
  131. }
  132.  
  133. #endif
  134.  
  135. /** @}
  136.  */
  137.