Subversion Repositories HelenOS

Rev

Rev 3779 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup ia64   
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #ifndef KERN_ia64_ASM_H_
  36. #define KERN_ia64_ASM_H_
  37.  
  38. #include <config.h>
  39. #include <arch/types.h>
  40. #include <arch/register.h>
  41.  
  42. #define IA64_IOSPACE_ADDRESS 0xE001000000000000ULL
  43.  
  44. static inline void  outb(ioport_t port, uint8_t v)
  45. {
  46.     *((uint8_t *)(IA64_IOSPACE_ADDRESS +
  47.         ((port & 0xfff) | ((port >> 2) << 12)))) = v;
  48.  
  49.     asm volatile ("mf\n" ::: "memory");
  50. }
  51.  
  52. static inline void  outw(ioport_t port, uint16_t v)
  53. {
  54.     *((uint16_t *)(IA64_IOSPACE_ADDRESS +
  55.         ((port & 0xfff) | ((port >> 2) << 12)))) = v;
  56.  
  57.     asm volatile ("mf\n" ::: "memory");
  58. }
  59.  
  60. static inline void  outl(ioport_t port, uint32_t v)
  61. {
  62.     *((uint32_t *)(IA64_IOSPACE_ADDRESS +
  63.         ((port & 0xfff) | ((port >> 2) << 12)))) = v;
  64.  
  65.     asm volatile ("mf\n" ::: "memory");
  66. }
  67.  
  68. static inline uint8_t inb(ioport_t port)
  69. {
  70.     asm volatile ("mf\n" ::: "memory");
  71.  
  72.     return *((uint8_t *)(IA64_IOSPACE_ADDRESS +
  73.         ((port & 0xfff) | ((port >> 2) << 12))));
  74. }
  75.  
  76. static inline uint16_t inw(ioport_t port)
  77. {
  78.     asm volatile ("mf\n" ::: "memory");
  79.  
  80.     return *((uint16_t *)(IA64_IOSPACE_ADDRESS +
  81.         ((port & 0xffE) | ((port >> 2) << 12))));
  82. }
  83.  
  84. static inline uint32_t inl(ioport_t port)
  85. {
  86.     asm volatile ("mf\n" ::: "memory");
  87.  
  88.     return *((uint32_t *)(IA64_IOSPACE_ADDRESS +
  89.         ((port & 0xfff) | ((port >> 2) << 12))));
  90. }
  91.  
  92. /** Return base address of current stack
  93.  *
  94.  * Return the base address of the current stack.
  95.  * The stack is assumed to be STACK_SIZE long.
  96.  * The stack must start on page boundary.
  97.  */
  98. static inline uintptr_t get_stack_base(void)
  99. {
  100.     uint64_t v;
  101.  
  102.     //I'm not sure why but this code bad inlines in scheduler,
  103.     //so THE shifts about 16B and causes kernel panic
  104.     //asm volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1)));
  105.     //return v;
  106.    
  107.     //this code have the same meaning but inlines well
  108.     asm volatile ("mov %0 = r12" : "=r" (v)  );
  109.     return v & (~(STACK_SIZE-1));
  110. }
  111.  
  112. /** Return Processor State Register.
  113.  *
  114.  * @return PSR.
  115.  */
  116. static inline uint64_t psr_read(void)
  117. {
  118.     uint64_t v;
  119.    
  120.     asm volatile ("mov %0 = psr\n" : "=r" (v));
  121.    
  122.     return v;
  123. }
  124.  
  125. /** Read IVA (Interruption Vector Address).
  126.  *
  127.  * @return Return location of interruption vector table.
  128.  */
  129. static inline uint64_t iva_read(void)
  130. {
  131.     uint64_t v;
  132.    
  133.     asm volatile ("mov %0 = cr.iva\n" : "=r" (v));
  134.    
  135.     return v;
  136. }
  137.  
  138. /** Write IVA (Interruption Vector Address) register.
  139.  *
  140.  * @param v New location of interruption vector table.
  141.  */
  142. static inline void iva_write(uint64_t v)
  143. {
  144.     asm volatile ("mov cr.iva = %0\n" : : "r" (v));
  145. }
  146.  
  147.  
  148. /** Read IVR (External Interrupt Vector Register).
  149.  *
  150.  * @return Highest priority, pending, unmasked external interrupt vector.
  151.  */
  152. static inline uint64_t ivr_read(void)
  153. {
  154.     uint64_t v;
  155.    
  156.     asm volatile ("mov %0 = cr.ivr\n" : "=r" (v));
  157.    
  158.     return v;
  159. }
  160.  
  161. static inline uint64_t cr64_read(void)
  162. {
  163.     uint64_t v;
  164.    
  165.     asm volatile ("mov %0 = cr64\n" : "=r" (v));
  166.    
  167.     return v;
  168. }
  169.  
  170.  
  171. /** Write ITC (Interval Timer Counter) register.
  172.  *
  173.  * @param v New counter value.
  174.  */
  175. static inline void itc_write(uint64_t v)
  176. {
  177.     asm volatile ("mov ar.itc = %0\n" : : "r" (v));
  178. }
  179.  
  180. /** Read ITC (Interval Timer Counter) register.
  181.  *
  182.  * @return Current counter value.
  183.  */
  184. static inline uint64_t itc_read(void)
  185. {
  186.     uint64_t v;
  187.    
  188.     asm volatile ("mov %0 = ar.itc\n" : "=r" (v));
  189.    
  190.     return v;
  191. }
  192.  
  193. /** Write ITM (Interval Timer Match) register.
  194.  *
  195.  * @param v New match value.
  196.  */
  197. static inline void itm_write(uint64_t v)
  198. {
  199.     asm volatile ("mov cr.itm = %0\n" : : "r" (v));
  200. }
  201.  
  202. /** Read ITM (Interval Timer Match) register.
  203.  *
  204.  * @return Match value.
  205.  */
  206. static inline uint64_t itm_read(void)
  207. {
  208.     uint64_t v;
  209.    
  210.     asm volatile ("mov %0 = cr.itm\n" : "=r" (v));
  211.    
  212.     return v;
  213. }
  214.  
  215. /** Read ITV (Interval Timer Vector) register.
  216.  *
  217.  * @return Current vector and mask bit.
  218.  */
  219. static inline uint64_t itv_read(void)
  220. {
  221.     uint64_t v;
  222.    
  223.     asm volatile ("mov %0 = cr.itv\n" : "=r" (v));
  224.    
  225.     return v;
  226. }
  227.  
  228. /** Write ITV (Interval Timer Vector) register.
  229.  *
  230.  * @param v New vector and mask bit.
  231.  */
  232. static inline void itv_write(uint64_t v)
  233. {
  234.     asm volatile ("mov cr.itv = %0\n" : : "r" (v));
  235. }
  236.  
  237. /** Write EOI (End Of Interrupt) register.
  238.  *
  239.  * @param v This value is ignored.
  240.  */
  241. static inline void eoi_write(uint64_t v)
  242. {
  243.     asm volatile ("mov cr.eoi = %0\n" : : "r" (v));
  244. }
  245.  
  246. /** Read TPR (Task Priority Register).
  247.  *
  248.  * @return Current value of TPR.
  249.  */
  250. static inline uint64_t tpr_read(void)
  251. {
  252.     uint64_t v;
  253.  
  254.     asm volatile ("mov %0 = cr.tpr\n"  : "=r" (v));
  255.    
  256.     return v;
  257. }
  258.  
  259. /** Write TPR (Task Priority Register).
  260.  *
  261.  * @param v New value of TPR.
  262.  */
  263. static inline void tpr_write(uint64_t v)
  264. {
  265.     asm volatile ("mov cr.tpr = %0\n" : : "r" (v));
  266. }
  267.  
  268. /** Disable interrupts.
  269.  *
  270.  * Disable interrupts and return previous
  271.  * value of PSR.
  272.  *
  273.  * @return Old interrupt priority level.
  274.  */
  275. static ipl_t interrupts_disable(void)
  276. {
  277.     uint64_t v;
  278.    
  279.     asm volatile (
  280.         "mov %0 = psr\n"
  281.         "rsm %1\n"
  282.         : "=r" (v)
  283.         : "i" (PSR_I_MASK)
  284.     );
  285.    
  286.     return (ipl_t) v;
  287. }
  288.  
  289. /** Enable interrupts.
  290.  *
  291.  * Enable interrupts and return previous
  292.  * value of PSR.
  293.  *
  294.  * @return Old interrupt priority level.
  295.  */
  296. static ipl_t interrupts_enable(void)
  297. {
  298.     uint64_t v;
  299.    
  300.     asm volatile (
  301.         "mov %0 = psr\n"
  302.         "ssm %1\n"
  303.         ";;\n"
  304.         "srlz.d\n"
  305.         : "=r" (v)
  306.         : "i" (PSR_I_MASK)
  307.     );
  308.    
  309.     return (ipl_t) v;
  310. }
  311.  
  312. /** Restore interrupt priority level.
  313.  *
  314.  * Restore PSR.
  315.  *
  316.  * @param ipl Saved interrupt priority level.
  317.  */
  318. static inline void interrupts_restore(ipl_t ipl)
  319. {
  320.     if (ipl & PSR_I_MASK)
  321.         (void) interrupts_enable();
  322.     else
  323.         (void) interrupts_disable();
  324. }
  325.  
  326. /** Return interrupt priority level.
  327.  *
  328.  * @return PSR.
  329.  */
  330. static inline ipl_t interrupts_read(void)
  331. {
  332.     return (ipl_t) psr_read();
  333. }
  334.  
  335. /** Disable protection key checking. */
  336. static inline void pk_disable(void)
  337. {
  338.     asm volatile ("rsm %0\n" : : "i" (PSR_PK_MASK));
  339. }
  340.  
  341. extern void cpu_halt(void);
  342. extern void cpu_sleep(void);
  343. extern void asm_delay_loop(uint32_t t);
  344.  
  345. extern void switch_to_userspace(uintptr_t, uintptr_t, uintptr_t, uintptr_t,
  346.     uint64_t, uint64_t);
  347.  
  348. #endif
  349.  
  350. /** @}
  351.  */
  352.