Subversion Repositories HelenOS-historic

Rev

Rev 919 | Rev 1488 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #ifndef __ia64_ASM_H__
  30. #define __ia64_ASM_H__
  31.  
  32. #include <config.h>
  33. #include <arch/types.h>
  34. #include <arch/register.h>
  35.  
  36. /** Return base address of current stack
  37.  *
  38.  * Return the base address of the current stack.
  39.  * The stack is assumed to be STACK_SIZE long.
  40.  * The stack must start on page boundary.
  41.  */
  42. static inline __address get_stack_base(void)
  43. {
  44.     __u64 v;
  45.  
  46.     __asm__ volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1)));
  47.    
  48.     return v;
  49. }
  50.  
  51. /** Return Processor State Register.
  52.  *
  53.  * @return PSR.
  54.  */
  55. static inline __u64 psr_read(void)
  56. {
  57.     __u64 v;
  58.    
  59.     __asm__ volatile ("mov %0 = psr\n" : "=r" (v));
  60.    
  61.     return v;
  62. }
  63.  
  64. /** Read IVA (Interruption Vector Address).
  65.  *
  66.  * @return Return location of interruption vector table.
  67.  */
  68. static inline __u64 iva_read(void)
  69. {
  70.     __u64 v;
  71.    
  72.     __asm__ volatile ("mov %0 = cr.iva\n" : "=r" (v));
  73.    
  74.     return v;
  75. }
  76.  
  77. /** Write IVA (Interruption Vector Address) register.
  78.  *
  79.  * @param New location of interruption vector table.
  80.  */
  81. static inline void iva_write(__u64 v)
  82. {
  83.     __asm__ volatile ("mov cr.iva = %0\n" : : "r" (v));
  84. }
  85.  
  86.  
  87. /** Read IVR (External Interrupt Vector Register).
  88.  *
  89.  * @return Highest priority, pending, unmasked external interrupt vector.
  90.  */
  91. static inline __u64 ivr_read(void)
  92. {
  93.     __u64 v;
  94.    
  95.     __asm__ volatile ("mov %0 = cr.ivr\n" : "=r" (v));
  96.    
  97.     return v;
  98. }
  99.  
  100. /** Write ITC (Interval Timer Counter) register.
  101.  *
  102.  * @param New counter value.
  103.  */
  104. static inline void itc_write(__u64 v)
  105. {
  106.     __asm__ volatile ("mov ar.itc = %0\n" : : "r" (v));
  107. }
  108.  
  109. /** Read ITC (Interval Timer Counter) register.
  110.  *
  111.  * @return Current counter value.
  112.  */
  113. static inline __u64 itc_read(void)
  114. {
  115.     __u64 v;
  116.    
  117.     __asm__ volatile ("mov %0 = ar.itc\n" : "=r" (v));
  118.    
  119.     return v;
  120. }
  121.  
  122. /** Write ITM (Interval Timer Match) register.
  123.  *
  124.  * @param New match value.
  125.  */
  126. static inline void itm_write(__u64 v)
  127. {
  128.     __asm__ volatile ("mov cr.itm = %0\n" : : "r" (v));
  129. }
  130.  
  131. /** Read ITV (Interval Timer Vector) register.
  132.  *
  133.  * @return Current vector and mask bit.
  134.  */
  135. static inline __u64 itv_read(void)
  136. {
  137.     __u64 v;
  138.    
  139.     __asm__ volatile ("mov %0 = cr.itv\n" : "=r" (v));
  140.    
  141.     return v;
  142. }
  143.  
  144. /** Write ITV (Interval Timer Vector) register.
  145.  *
  146.  * @param New vector and mask bit.
  147.  */
  148. static inline void itv_write(__u64 v)
  149. {
  150.     __asm__ volatile ("mov cr.itv = %0\n" : : "r" (v));
  151. }
  152.  
  153. /** Write EOI (End Of Interrupt) register.
  154.  *
  155.  * @param This value is ignored.
  156.  */
  157. static inline void eoi_write(__u64 v)
  158. {
  159.     __asm__ volatile ("mov cr.eoi = %0\n" : : "r" (v));
  160. }
  161.  
  162. /** Read TPR (Task Priority Register).
  163.  *
  164.  * @return Current value of TPR.
  165.  */
  166. static inline __u64 tpr_read(void)
  167. {
  168.     __u64 v;
  169.  
  170.     __asm__ volatile ("mov %0 = cr.tpr\n"  : "=r" (v));
  171.    
  172.     return v;
  173. }
  174.  
  175. /** Write TPR (Task Priority Register).
  176.  *
  177.  * @param New value of TPR.
  178.  */
  179. static inline void tpr_write(__u64 v)
  180. {
  181.     __asm__ volatile ("mov cr.tpr = %0\n" : : "r" (v));
  182. }
  183.  
  184. /** Disable interrupts.
  185.  *
  186.  * Disable interrupts and return previous
  187.  * value of PSR.
  188.  *
  189.  * @return Old interrupt priority level.
  190.  */
  191. static ipl_t interrupts_disable(void)
  192. {
  193.     __u64 v;
  194.    
  195.     __asm__ volatile (
  196.         "mov %0 = psr\n"
  197.         "rsm %1\n"
  198.         : "=r" (v)
  199.         : "i" (PSR_I_MASK)
  200.     );
  201.    
  202.     return (ipl_t) v;
  203. }
  204.  
  205. /** Enable interrupts.
  206.  *
  207.  * Enable interrupts and return previous
  208.  * value of PSR.
  209.  *
  210.  * @return Old interrupt priority level.
  211.  */
  212. static ipl_t interrupts_enable(void)
  213. {
  214.     __u64 v;
  215.    
  216.     __asm__ volatile (
  217.         "mov %0 = psr\n"
  218.         "ssm %1\n"
  219.         ";;\n"
  220.         "srlz.d\n"
  221.         : "=r" (v)
  222.         : "i" (PSR_I_MASK)
  223.     );
  224.    
  225.     return (ipl_t) v;
  226. }
  227.  
  228. /** Restore interrupt priority level.
  229.  *
  230.  * Restore PSR.
  231.  *
  232.  * @param ipl Saved interrupt priority level.
  233.  */
  234. static inline void interrupts_restore(ipl_t ipl)
  235. {
  236.     if (ipl & PSR_I_MASK)
  237.         (void) interrupts_enable();
  238.     else
  239.         (void) interrupts_disable();
  240. }
  241.  
  242. /** Return interrupt priority level.
  243.  *
  244.  * @return PSR.
  245.  */
  246. static inline ipl_t interrupts_read(void)
  247. {
  248.     return (ipl_t) psr_read();
  249. }
  250.  
  251. /** Disable protection key checking. */
  252. static inline void pk_disable(void)
  253. {
  254.     __asm__ volatile ("rsm %0\n" : : "i" (PSR_PK_MASK));
  255. }
  256.  
  257. extern void cpu_halt(void);
  258. extern void cpu_sleep(void);
  259. extern void asm_delay_loop(__u32 t);
  260.  
  261. extern void switch_to_userspace(__address entry, __address sp, __address bsp, __address uspace_uarg, __u64 ipsr, __u64 rsc);
  262.  
  263. #endif
  264.