Subversion Repositories HelenOS

Rev

Rev 1882 | Rev 1899 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup sparc64
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #ifndef KERN_sparc64_ASM_H_
  36. #define KERN_sparc64_ASM_H_
  37.  
  38. #include <arch.h>
  39. #include <typedefs.h>
  40. #include <arch/types.h>
  41. #include <arch/register.h>
  42. #include <config.h>
  43. #include <time/clock.h>
  44. #include <arch/stack.h>
  45.  
  46. /** Read Processor State register.
  47.  *
  48.  * @return Value of PSTATE register.
  49.  */
  50. static inline uint64_t pstate_read(void)
  51. {
  52.     uint64_t v;
  53.    
  54.     __asm__ volatile ("rdpr %%pstate, %0\n" : "=r" (v));
  55.    
  56.     return v;
  57. }
  58.  
  59. /** Write Processor State register.
  60.  *
  61.  * @param v New value of PSTATE register.
  62.  */
  63. static inline void pstate_write(uint64_t v)
  64. {
  65.     __asm__ volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0));
  66. }
  67.  
  68. /** Read TICK_compare Register.
  69.  *
  70.  * @return Value of TICK_comapre register.
  71.  */
  72. static inline uint64_t tick_compare_read(void)
  73. {
  74.     uint64_t v;
  75.    
  76.     __asm__ volatile ("rd %%tick_cmpr, %0\n" : "=r" (v));
  77.    
  78.     return v;
  79. }
  80.  
  81. /** Write TICK_compare Register.
  82.  *
  83.  * @param v New value of TICK_comapre register.
  84.  */
  85. static inline void tick_compare_write(uint64_t v)
  86. {
  87.     __asm__ volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0));
  88. }
  89.  
  90. /** Read TICK Register.
  91.  *
  92.  * @return Value of TICK register.
  93.  */
  94. static inline uint64_t tick_read(void)
  95. {
  96.     uint64_t v;
  97.    
  98.     __asm__ volatile ("rdpr %%tick, %0\n" : "=r" (v));
  99.    
  100.     return v;
  101. }
  102.  
  103. /** Write TICK Register.
  104.  *
  105.  * @param v New value of TICK register.
  106.  */
  107. static inline void tick_write(uint64_t v)
  108. {
  109.     __asm__ volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0));
  110. }
  111.  
  112. /** Read FPRS Register.
  113.  *
  114.  * @return Value of FPRS register.
  115.  */
  116. static inline uint64_t fprs_read(void)
  117. {
  118.     uint64_t v;
  119.    
  120.     __asm__ volatile ("rd %%fprs, %0\n" : "=r" (v));
  121.    
  122.     return v;
  123. }
  124.  
  125. /** Write FPRS Register.
  126.  *
  127.  * @param v New value of FPRS register.
  128.  */
  129. static inline void fprs_write(uint64_t v)
  130. {
  131.     __asm__ volatile ("wr %0, %1, %%fprs\n" : : "r" (v), "i" (0));
  132. }
  133.  
  134. /** Read SOFTINT Register.
  135.  *
  136.  * @return Value of SOFTINT register.
  137.  */
  138. static inline uint64_t softint_read(void)
  139. {
  140.     uint64_t v;
  141.  
  142.     __asm__ volatile ("rd %%softint, %0\n" : "=r" (v));
  143.  
  144.     return v;
  145. }
  146.  
  147. /** Write SOFTINT Register.
  148.  *
  149.  * @param v New value of SOFTINT register.
  150.  */
  151. static inline void softint_write(uint64_t v)
  152. {
  153.     __asm__ volatile ("wr %0, %1, %%softint\n" : : "r" (v), "i" (0));
  154. }
  155.  
  156. /** Write CLEAR_SOFTINT Register.
  157.  *
  158.  * Bits set in CLEAR_SOFTINT register will be cleared in SOFTINT register.
  159.  *
  160.  * @param v New value of CLEAR_SOFTINT register.
  161.  */
  162. static inline void clear_softint_write(uint64_t v)
  163. {
  164.     __asm__ volatile ("wr %0, %1, %%clear_softint\n" : : "r" (v), "i" (0));
  165. }
  166.  
  167. /** Write SET_SOFTINT Register.
  168.  *
  169.  * Bits set in SET_SOFTINT register will be set in SOFTINT register.
  170.  *
  171.  * @param v New value of SET_SOFTINT register.
  172.  */
  173. static inline void set_softint_write(uint64_t v)
  174. {
  175.     __asm__ volatile ("wr %0, %1, %%set_softint\n" : : "r" (v), "i" (0));
  176. }
  177.  
  178. /** Enable interrupts.
  179.  *
  180.  * Enable interrupts and return previous
  181.  * value of IPL.
  182.  *
  183.  * @return Old interrupt priority level.
  184.  */
  185. static inline ipl_t interrupts_enable(void) {
  186.     pstate_reg_t pstate;
  187.     uint64_t value;
  188.    
  189.     value = pstate_read();
  190.     pstate.value = value;
  191.     pstate.ie = true;
  192.     pstate_write(pstate.value);
  193.    
  194.     return (ipl_t) value;
  195. }
  196.  
  197. /** Disable interrupts.
  198.  *
  199.  * Disable interrupts and return previous
  200.  * value of IPL.
  201.  *
  202.  * @return Old interrupt priority level.
  203.  */
  204. static inline ipl_t interrupts_disable(void) {
  205.     pstate_reg_t pstate;
  206.     uint64_t value;
  207.    
  208.     value = pstate_read();
  209.     pstate.value = value;
  210.     pstate.ie = false;
  211.     pstate_write(pstate.value);
  212.    
  213.     return (ipl_t) value;
  214. }
  215.  
  216. /** Restore interrupt priority level.
  217.  *
  218.  * Restore IPL.
  219.  *
  220.  * @param ipl Saved interrupt priority level.
  221.  */
  222. static inline void interrupts_restore(ipl_t ipl) {
  223.     pstate_reg_t pstate;
  224.    
  225.     pstate.value = pstate_read();
  226.     pstate.ie = ((pstate_reg_t) ipl).ie;
  227.     pstate_write(pstate.value);
  228. }
  229.  
  230. /** Return interrupt priority level.
  231.  *
  232.  * Return IPL.
  233.  *
  234.  * @return Current interrupt priority level.
  235.  */
  236. static inline ipl_t interrupts_read(void) {
  237.     return (ipl_t) pstate_read();
  238. }
  239.  
  240. /** Return base address of current stack.
  241.  *
  242.  * Return the base address of the current stack.
  243.  * The stack is assumed to be STACK_SIZE bytes long.
  244.  * The stack must start on page boundary.
  245.  */
  246. static inline uintptr_t get_stack_base(void)
  247. {
  248.     uintptr_t unbiased_sp;
  249.    
  250.     __asm__ volatile ("add %%sp, %1, %0\n" : "=r" (unbiased_sp) : "i" (STACK_BIAS));
  251.    
  252.     return ALIGN_DOWN(unbiased_sp, STACK_SIZE);
  253. }
  254.  
  255. /** Read Version Register.
  256.  *
  257.  * @return Value of VER register.
  258.  */
  259. static inline uint64_t ver_read(void)
  260. {
  261.     uint64_t v;
  262.    
  263.     __asm__ volatile ("rdpr %%ver, %0\n" : "=r" (v));
  264.    
  265.     return v;
  266. }
  267.  
  268. /** Read Trap Base Address register.
  269.  *
  270.  * @return Current value in TBA.
  271.  */
  272. static inline uint64_t tba_read(void)
  273. {
  274.     uint64_t v;
  275.    
  276.     __asm__ volatile ("rdpr %%tba, %0\n" : "=r" (v));
  277.    
  278.     return v;
  279. }
  280.  
  281. /** Read Trap Program Counter register.
  282.  *
  283.  * @return Current value in TPC.
  284.  */
  285. static inline uint64_t tpc_read(void)
  286. {
  287.     uint64_t v;
  288.    
  289.     __asm__ volatile ("rdpr %%tpc, %0\n" : "=r" (v));
  290.    
  291.     return v;
  292. }
  293.  
  294. /** Read Trap Level register.
  295.  *
  296.  * @return Current value in TL.
  297.  */
  298. static inline uint64_t tl_read(void)
  299. {
  300.     uint64_t v;
  301.    
  302.     __asm__ volatile ("rdpr %%tl, %0\n" : "=r" (v));
  303.    
  304.     return v;
  305. }
  306.  
  307. /** Write Trap Base Address register.
  308.  *
  309.  * @param v New value of TBA.
  310.  */
  311. static inline void tba_write(uint64_t v)
  312. {
  313.     __asm__ volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0));
  314. }
  315.  
  316. /** Load uint64_t from alternate space.
  317.  *
  318.  * @param asi ASI determining the alternate space.
  319.  * @param va Virtual address within the ASI.
  320.  *
  321.  * @return Value read from the virtual address in the specified address space.
  322.  */
  323. static inline uint64_t asi_u64_read(asi_t asi, uintptr_t va)
  324. {
  325.     uint64_t v;
  326.    
  327.     __asm__ volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" (asi));
  328.    
  329.     return v;
  330. }
  331.  
  332. /** Store uint64_t to alternate space.
  333.  *
  334.  * @param asi ASI determining the alternate space.
  335.  * @param va Virtual address within the ASI.
  336.  * @param v Value to be written.
  337.  */
  338. static inline void asi_u64_write(asi_t asi, uintptr_t va, uint64_t v)
  339. {
  340.     __asm__ volatile ("stxa %0, [%1] %2\n" : :  "r" (v), "r" (va), "i" (asi) : "memory");
  341. }
  342.  
  343. /** Flush all valid register windows to memory. */
  344. static inline void flushw(void)
  345. {
  346.     __asm__ volatile ("flushw\n");
  347. }
  348.  
  349. /** Switch to nucleus by setting TL to 1. */
  350. static inline void nucleus_enter(void)
  351. {
  352.     __asm__ volatile ("wrpr %g0, 1, %tl\n");
  353. }
  354.  
  355. /** Switch from nucleus by setting TL to 0. */
  356. static inline void nucleus_leave(void)
  357. {
  358.     __asm__ volatile ("wrpr %g0, %g0, %tl\n");
  359. }
  360.  
  361. extern void cpu_halt(void);
  362. extern void cpu_sleep(void);
  363. extern void asm_delay_loop(const uint32_t usec);
  364.  
  365. extern uint64_t read_from_ag_g7(void);
  366. extern void write_to_ag_g6(uint64_t val);
  367. extern void write_to_ag_g7(uint64_t val);
  368. extern void write_to_ig_g6(uint64_t val);
  369.  
  370. extern void switch_to_userspace(uint64_t pc, uint64_t sp, uint64_t uarg);
  371.  
  372. #endif
  373.  
  374. /** @}
  375.  */
  376.