Subversion Repositories HelenOS

Rev

Rev 1829 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * Copyright (C) 2005 Sergey Bondari
  4.  * All rights reserved.
  5.  *
  6.  * Redistribution and use in source and binary forms, with or without
  7.  * modification, are permitted provided that the following conditions
  8.  * are met:
  9.  *
  10.  * - Redistributions of source code must retain the above copyright
  11.  *   notice, this list of conditions and the following disclaimer.
  12.  * - Redistributions in binary form must reproduce the above copyright
  13.  *   notice, this list of conditions and the following disclaimer in the
  14.  *   documentation and/or other materials provided with the distribution.
  15.  * - The name of the author may not be used to endorse or promote products
  16.  *   derived from this software without specific prior written permission.
  17.  *
  18.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  19.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  20.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  21.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  22.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  23.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  27.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28.  */
  29.  
  30. /** @addtogroup xen32
  31.  * @{
  32.  */
  33. /** @file
  34.  */
  35.  
  36. #ifndef __xen32_ASM_H__
  37. #define __xen32_ASM_H__
  38.  
  39. #include <arch/pm.h>
  40. #include <arch/types.h>
  41. #include <arch/barrier.h>
  42. #include <config.h>
  43.  
  44. extern uint32_t interrupt_handler_size;
  45.  
  46. extern void interrupt_handlers(void);
  47.  
  48. extern void enable_l_apic_in_msr(void);
  49.  
  50.  
  51. extern void asm_delay_loop(uint32_t t);
  52. extern void asm_fake_loop(uint32_t t);
  53.  
  54.  
  55. /** Halt CPU
  56.  *
  57.  * Halt the current CPU until interrupt event.
  58.  */
  59. #define cpu_halt() ((void) 0)
  60. #define cpu_sleep() ((void) 0)
  61.  
  62. #define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \
  63.     { \
  64.     unative_t res; \
  65.     __asm__ volatile ("movl %%" #reg ", %0" : "=r" (res) ); \
  66.     return res; \
  67.     }
  68.  
  69. #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \
  70.     { \
  71.     __asm__ volatile ("movl %0, %%" #reg : : "r" (regn)); \
  72.     }
  73.  
  74. GEN_READ_REG(cr0);
  75. GEN_READ_REG(cr2);
  76.  
  77. GEN_READ_REG(dr0);
  78. GEN_READ_REG(dr1);
  79. GEN_READ_REG(dr2);
  80. GEN_READ_REG(dr3);
  81. GEN_READ_REG(dr6);
  82. GEN_READ_REG(dr7);
  83.  
  84. GEN_WRITE_REG(dr0);
  85. GEN_WRITE_REG(dr1);
  86. GEN_WRITE_REG(dr2);
  87. GEN_WRITE_REG(dr3);
  88. GEN_WRITE_REG(dr6);
  89. GEN_WRITE_REG(dr7);
  90.  
  91. /** Byte to port
  92.  *
  93.  * Output byte to port
  94.  *
  95.  * @param port Port to write to
  96.  * @param val Value to write
  97.  */
  98. static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); }
  99.  
  100. /** Word to port
  101.  *
  102.  * Output word to port
  103.  *
  104.  * @param port Port to write to
  105.  * @param val Value to write
  106.  */
  107. static inline void outw(uint16_t port, uint16_t val) { __asm__ volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); }
  108.  
  109. /** Double word to port
  110.  *
  111.  * Output double word to port
  112.  *
  113.  * @param port Port to write to
  114.  * @param val Value to write
  115.  */
  116. static inline void outl(uint16_t port, uint32_t val) { __asm__ volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); }
  117.  
  118. /** Byte from port
  119.  *
  120.  * Get byte from port
  121.  *
  122.  * @param port Port to read from
  123.  * @return Value read
  124.  */
  125. static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; }
  126.  
  127. /** Word from port
  128.  *
  129.  * Get word from port
  130.  *
  131.  * @param port Port to read from
  132.  * @return Value read
  133.  */
  134. static inline uint16_t inw(uint16_t port) { uint16_t val; __asm__ volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); return val; }
  135.  
  136. /** Double word from port
  137.  *
  138.  * Get double word from port
  139.  *
  140.  * @param port Port to read from
  141.  * @return Value read
  142.  */
  143. static inline uint32_t inl(uint16_t port) { uint32_t val; __asm__ volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); return val; }
  144.  
  145. /** Enable interrupts.
  146.  *
  147.  * Enable interrupts and return previous
  148.  * value of EFLAGS.
  149.  *
  150.  * @return Old interrupt priority level.
  151.  */
  152. static inline ipl_t interrupts_enable(void)
  153. {
  154.     // FIXME SMP
  155.    
  156.     ipl_t v = shared_info.vcpu_info[0].evtchn_upcall_mask;
  157.     write_barrier();
  158.     shared_info.vcpu_info[0].evtchn_upcall_mask = 0;
  159.     write_barrier();
  160.     if (shared_info.vcpu_info[0].evtchn_upcall_pending)
  161.         force_evtchn_callback();
  162.    
  163.     return v;
  164. }
  165.  
  166. /** Disable interrupts.
  167.  *
  168.  * Disable interrupts and return previous
  169.  * value of EFLAGS.
  170.  *
  171.  * @return Old interrupt priority level.
  172.  */
  173. static inline ipl_t interrupts_disable(void)
  174. {
  175.     // FIXME SMP
  176.    
  177.     ipl_t v = shared_info.vcpu_info[0].evtchn_upcall_mask;
  178.     shared_info.vcpu_info[0].evtchn_upcall_mask = 1;
  179.     write_barrier();
  180.    
  181.     return v;
  182. }
  183.  
  184. /** Restore interrupt priority level.
  185.  *
  186.  * Restore EFLAGS.
  187.  *
  188.  * @param ipl Saved interrupt priority level.
  189.  */
  190. static inline void interrupts_restore(ipl_t ipl)
  191. {
  192.     if (ipl == 0)
  193.         interrupts_enable();
  194.     else
  195.         interrupts_disable();
  196. }
  197.  
  198. /** Return interrupt priority level.
  199.  *
  200.  * @return EFLAFS.
  201.  */
  202. static inline ipl_t interrupts_read(void)
  203. {
  204.     // FIXME SMP
  205.    
  206.     return shared_info.vcpu_info[0].evtchn_upcall_mask;
  207. }
  208.  
  209. /** Return base address of current stack
  210.  *
  211.  * Return the base address of the current stack.
  212.  * The stack is assumed to be STACK_SIZE bytes long.
  213.  * The stack must start on page boundary.
  214.  */
  215. static inline uintptr_t get_stack_base(void)
  216. {
  217.     uintptr_t v;
  218.    
  219.     __asm__ volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1)));
  220.    
  221.     return v;
  222. }
  223.  
  224. static inline uint64_t rdtsc(void)
  225. {
  226.     uint64_t v;
  227.    
  228.     __asm__ volatile("rdtsc\n" : "=A" (v));
  229.    
  230.     return v;
  231. }
  232.  
  233. /** Return current IP address */
  234. static inline uintptr_t * get_ip()
  235. {
  236.     uintptr_t *ip;
  237.  
  238.     __asm__ volatile (
  239.         "mov %%eip, %0"
  240.         : "=r" (ip)
  241.         );
  242.     return ip;
  243. }
  244.  
  245. /** Invalidate TLB Entry.
  246.  *
  247.  * @param addr Address on a page whose TLB entry is to be invalidated.
  248.  */
  249. static inline void invlpg(uintptr_t addr)
  250. {
  251.     __asm__ volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr));
  252. }
  253.  
  254. /** Load GDTR register from memory.
  255.  *
  256.  * @param gdtr_reg Address of memory from where to load GDTR.
  257.  */
  258. static inline void gdtr_load(ptr_16_32_t *gdtr_reg)
  259. {
  260.     __asm__ volatile ("lgdtl %0\n" : : "m" (*gdtr_reg));
  261. }
  262.  
  263. /** Store GDTR register to memory.
  264.  *
  265.  * @param gdtr_reg Address of memory to where to load GDTR.
  266.  */
  267. static inline void gdtr_store(ptr_16_32_t *gdtr_reg)
  268. {
  269.     __asm__ volatile ("sgdtl %0\n" : : "m" (*gdtr_reg));
  270. }
  271.  
  272. /** Load TR from descriptor table.
  273.  *
  274.  * @param sel Selector specifying descriptor of TSS segment.
  275.  */
  276. static inline void tr_load(uint16_t sel)
  277. {
  278.     __asm__ volatile ("ltr %0" : : "r" (sel));
  279. }
  280.  
  281. #endif
  282.  
  283. /** @}
  284.  */
  285.