Subversion Repositories HelenOS-historic

Rev

Rev 658 | Rev 665 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #ifndef __sparc64_ASM_H__
  30. #define __sparc64_ASM_H__
  31.  
  32. #include <typedefs.h>
  33. #include <arch/types.h>
  34. #include <arch/register.h>
  35. #include <config.h>
  36.  
  37. /** Read Processor State register.
  38.  *
  39.  * @return Value of PSTATE register.
  40.  */
  41. static inline __u64 pstate_read(void)
  42. {
  43.     __u64 v;
  44.    
  45.     __asm__ volatile ("rdpr %%pstate, %0\n" : "=r" (v));
  46.    
  47.     return v;
  48. }
  49.  
  50. /** Write Processor State register.
  51.  *
  52.  * @param New value of PSTATE register.
  53.  */
  54. static inline void pstate_write(__u64 v)
  55. {
  56.     __asm__ volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0));
  57. }
  58.  
  59. /** Read TICK_compare Register.
  60.  *
  61.  * @return Value of TICK_comapre register.
  62.  */
  63. static inline __u64 tick_compare_read(void)
  64. {
  65.     __u64 v;
  66.    
  67.     __asm__ volatile ("rd %%tick_cmpr, %0\n" : "=r" (v));
  68.    
  69.     return v;
  70. }
  71.  
  72. /** Write TICK_compare Register.
  73.  *
  74.  * @param New value of TICK_comapre register.
  75.  */
  76. static inline void tick_compare_write(__u64 v)
  77. {
  78.     __asm__ volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0));
  79. }
  80.  
  81. /** Read TICK Register.
  82.  *
  83.  * @return Value of TICK register.
  84.  */
  85. static inline __u64 tick_read(void)
  86. {
  87.     __u64 v;
  88.    
  89.     __asm__ volatile ("rdpr %%tick, %0\n" : "=r" (v));
  90.    
  91.     return v;
  92. }
  93.  
  94. /** Write TICK Register.
  95.  *
  96.  * @param New value of TICK register.
  97.  */
  98. static inline void tick_write(__u64 v)
  99. {
  100.     __asm__ volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0));
  101. }
  102.  
  103. /** Read SOFTINT Register.
  104.  *
  105.  * @return Value of SOFTINT register.
  106.  */
  107. static inline __u64 softint_read(void)
  108. {
  109.     __u64 v;
  110.  
  111.     __asm__ volatile ("rd %%softint, %0\n" : "=r" (v));
  112.  
  113.     return v;
  114. }
  115.  
  116. /** Write SOFTINT Register.
  117.  *
  118.  * @param New value of SOFTINT register.
  119.  */
  120. static inline void softint_write(__u64 v)
  121. {
  122.     __asm__ volatile ("wr %0, %1, %%softint\n" : : "r" (v), "i" (0));
  123. }
  124.  
  125. /** Enable interrupts.
  126.  *
  127.  * Enable interrupts and return previous
  128.  * value of IPL.
  129.  *
  130.  * @return Old interrupt priority level.
  131.  */
  132. static inline ipl_t interrupts_enable(void) {
  133.     pstate_reg_t pstate;
  134.     __u64 value;
  135.    
  136.     value = pstate_read();
  137.     pstate.value = value;
  138.     pstate.ie = true;
  139.     pstate_write(pstate.value);
  140.    
  141.     return (ipl_t) value;
  142. }
  143.  
  144. /** Disable interrupts.
  145.  *
  146.  * Disable interrupts and return previous
  147.  * value of IPL.
  148.  *
  149.  * @return Old interrupt priority level.
  150.  */
  151. static inline ipl_t interrupts_disable(void) {
  152.     pstate_reg_t pstate;
  153.     __u64 value;
  154.    
  155.     value = pstate_read();
  156.     pstate.value = value;
  157.     pstate.ie = false;
  158.     pstate_write(pstate.value);
  159.    
  160.     return (ipl_t) value;
  161. }
  162.  
  163. /** Restore interrupt priority level.
  164.  *
  165.  * Restore IPL.
  166.  *
  167.  * @param ipl Saved interrupt priority level.
  168.  */
  169. static inline void interrupts_restore(ipl_t ipl) {
  170.     pstate_reg_t pstate;
  171.    
  172.     pstate.value = pstate_read();
  173.     pstate.ie = ((pstate_reg_t) ipl).ie;
  174.     pstate_write(pstate.value);
  175. }
  176.  
  177. /** Return interrupt priority level.
  178.  *
  179.  * Return IPL.
  180.  *
  181.  * @return Current interrupt priority level.
  182.  */
  183. static inline ipl_t interrupts_read(void) {
  184.     return (ipl_t) pstate_read();
  185. }
  186.  
  187. /** Return base address of current stack.
  188.  *
  189.  * Return the base address of the current stack.
  190.  * The stack is assumed to be STACK_SIZE bytes long.
  191.  * The stack must start on page boundary.
  192.  */
  193. static inline __address get_stack_base(void)
  194. {
  195.     __address v;
  196.    
  197.     __asm__ volatile ("and %%sp, %1, %0\n" : "=r" (v) : "r" (~(STACK_SIZE-1)));
  198.    
  199.     return v;
  200. }
  201.  
  202. /** Read Version Register.
  203.  *
  204.  * @return Value of VER register.
  205.  */
  206. static inline __u64 ver_read(void)
  207. {
  208.     __u64 v;
  209.    
  210.     __asm__ volatile ("rdpr %%ver, %0\n" : "=r" (v));
  211.    
  212.     return v;
  213. }
  214.  
  215. /** Read Trap Base Address register.
  216.  *
  217.  * @return Current value in TBA.
  218.  */
  219. static inline __u64 tba_read(void)
  220. {
  221.     __u64 v;
  222.    
  223.     __asm__ volatile ("rdpr %%tba, %0\n" : "=r" (v));
  224.    
  225.     return v;
  226. }
  227.  
  228. /** Write Trap Base Address register.
  229.  *
  230.  * @param New value of TBA.
  231.  */
  232. static inline void tba_write(__u64 v)
  233. {
  234.     __asm__ volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0));
  235. }
  236.  
  237. /** Load __u64 from alternate space.
  238.  *
  239.  * @param asi ASI determining the alternate space.
  240.  * @param va Virtual address within the ASI.
  241.  *
  242.  * @return Value read from the virtual address in the specified address space.
  243.  */
  244. static inline __u64 asi_u64_read(asi_t asi, __address va)
  245. {
  246.     __u64 v;
  247.    
  248.     __asm__ volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" (asi));
  249.    
  250.     return v;
  251. }
  252.  
  253. /** Store __u64 to alternate space.
  254.  *
  255.  * @param asi ASI determining the alternate space.
  256.  * @param va Virtual address within the ASI.
  257.  * @param v Value to be written.
  258.  */
  259. static inline void asi_u64_write(asi_t asi, __address va, __u64 v)
  260. {
  261.     __asm__ volatile ("stxa %0, [%1] %2\n" : :  "r" (v), "r" (va), "i" (asi) : "memory");
  262. }
  263.  
  264.  
  265.  
  266. void cpu_halt(void);
  267. void cpu_sleep(void);
  268. void asm_delay_loop(__u32 t);
  269.  
  270. #endif
  271.