Subversion Repositories HelenOS-historic

Rev

Rev 472 | Rev 534 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #ifndef __ia64_ASM_H__
  30. #define __ia64_ASM_H__
  31.  
  32. #include <arch/types.h>
  33. #include <config.h>
  34. #include <arch/register.h>
  35.  
  36. /** Return base address of current stack
  37.  *
  38.  * Return the base address of the current stack.
  39.  * The stack is assumed to be STACK_SIZE long.
  40.  * The stack must start on page boundary.
  41.  */
  42. static inline __address get_stack_base(void)
  43. {
  44.     __u64 v;
  45.  
  46.     __asm__ volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1)));
  47.    
  48.     return v;
  49. }
  50.  
  51. /** Read IVA (Interruption Vector Address).
  52.  *
  53.  * @return Return location of interruption vector table.
  54.  */
  55. static inline __u64 iva_read(void)
  56. {
  57.     __u64 v;
  58.    
  59.     __asm__ volatile ("mov %0 = cr.iva\n" : "=r" (v));
  60.    
  61.     return v;
  62. }
  63.  
  64. /** Write IVA (Interruption Vector Address) register.
  65.  *
  66.  * @param New location of interruption vector table.
  67.  */
  68. static inline void iva_write(__u64 v)
  69. {
  70.     __asm__ volatile ("mov cr.iva = %0\n" : : "r" (v));
  71. }
  72.  
  73.  
  74. /** Read IVR (External Interrupt Vector Register).
  75.  *
  76.  * @return Highest priority, pending, unmasked external interrupt vector.
  77.  */
  78. static inline __u64 ivr_read(void)
  79. {
  80.     __u64 v;
  81.    
  82.     __asm__ volatile ("mov %0 = cr.ivr\n" : "=r" (v));
  83.    
  84.     return v;
  85. }
  86.  
  87. /** Write ITC (Interval Timer Counter) register.
  88.  *
  89.  * @param New counter value.
  90.  */
  91. static inline void itc_write(__u64 v)
  92. {
  93.     __asm__ volatile ("mov ar.itc = %0\n" : : "r" (v));
  94. }
  95.  
  96. /** Read ITC (Interval Timer Counter) register.
  97.  *
  98.  * @return Current counter value.
  99.  */
  100. static inline __u64 itc_read(void)
  101. {
  102.     __u64 v;
  103.    
  104.     __asm__ volatile ("mov %0 = ar.itc\n" : "=r" (v));
  105.    
  106.     return v;
  107. }
  108.  
  109. /** Write ITM (Interval Timer Match) register.
  110.  *
  111.  * @param New match value.
  112.  */
  113. static inline void itm_write(__u64 v)
  114. {
  115.     __asm__ volatile ("mov cr.itm = %0\n" : : "r" (v));
  116. }
  117.  
  118. /** Read ITV (Interval Timer Vector) register.
  119.  *
  120.  * @return Current vector and mask bit.
  121.  */
  122. static inline __u64 itv_read(void)
  123. {
  124.     __u64 v;
  125.    
  126.     __asm__ volatile ("mov %0 = cr.itv\n" : "=r" (v));
  127.    
  128.     return v;
  129. }
  130.  
  131. /** Write ITV (Interval Timer Vector) register.
  132.  *
  133.  * @param New vector and mask bit.
  134.  */
  135. static inline void itv_write(__u64 v)
  136. {
  137.     __asm__ volatile ("mov cr.itv = %0\n" : : "r" (v));
  138. }
  139.  
  140. /** Write EOI (End Of Interrupt) register.
  141.  *
  142.  * @param This value is ignored.
  143.  */
  144. static inline void eoi_write(__u64 v)
  145. {
  146.     __asm__ volatile ("mov cr.eoi = %0\n" : : "r" (v));
  147. }
  148.  
  149. /** Read TPR (Task Priority Register).
  150.  *
  151.  * @return Current value of TPR.
  152.  */
  153. static inline __u64 tpr_read(void)
  154. {
  155.     __u64 v;
  156.  
  157.     __asm__ volatile ("mov %0 = cr.tpr\n"  : "=r" (v));
  158.    
  159.     return v;
  160. }
  161.  
  162. /** Write TPR (Task Priority Register).
  163.  *
  164.  * @param New value of TPR.
  165.  */
  166. static inline void tpr_write(__u64 v)
  167. {
  168.     __asm__ volatile ("mov cr.tpr = %0\n" : : "r" (v));
  169. }
  170.  
  171. /** Disable interrupts.
  172.  *
  173.  * Disable interrupts and return previous
  174.  * value of PSR.
  175.  *
  176.  * @return Old interrupt priority level.
  177.  */
  178. static ipl_t interrupts_disable(void)
  179. {
  180.     __u64 v;
  181.    
  182.     __asm__ volatile (
  183.         "mov %0 = psr\n"
  184.         "rsm %1\n"
  185.         : "=r" (v)
  186.         : "i" (PSR_I_MASK)
  187.     );
  188.    
  189.     return (ipl_t) v;
  190. }
  191.  
  192. /** Enable interrupts.
  193.  *
  194.  * Enable interrupts and return previous
  195.  * value of PSR.
  196.  *
  197.  * @return Old interrupt priority level.
  198.  */
  199. static ipl_t interrupts_enable(void)
  200. {
  201.     __u64 v;
  202.    
  203.     __asm__ volatile (
  204.         "mov %0 = psr\n"
  205.         "ssm %1\n"
  206.         ";;\n"
  207.         "srlz.d\n"
  208.         : "=r" (v)
  209.         : "i" (PSR_I_MASK)
  210.     );
  211.    
  212.     return (ipl_t) v;
  213. }
  214.  
  215. /** Restore interrupt priority level.
  216.  *
  217.  * Restore PSR.
  218.  *
  219.  * @param ipl Saved interrupt priority level.
  220.  */
  221. static inline void interrupts_restore(ipl_t ipl)
  222. {
  223.     if (ipl & PSR_I_MASK)
  224.         (void) interrupts_enable();
  225.     else
  226.         (void) interrupts_disable();
  227. }
  228.  
  229. /** Return interrupt priority level.
  230.  *
  231.  * @return PSR.
  232.  */
  233. static inline ipl_t interrupts_read(void)
  234. {
  235.     __u64 v;
  236.    
  237.     __asm__ volatile ("mov %0 = psr\n" : "=r" (v));
  238.    
  239.     return (ipl_t) v;
  240. }
  241.  
  242. extern void cpu_halt(void);
  243. extern void cpu_sleep(void);
  244. extern void asm_delay_loop(__u32 t);
  245.  
  246. #endif
  247.