Subversion Repositories HelenOS-historic

Rev

Rev 224 | Rev 252 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #ifndef __amd64_ASM_H__
  30. #define __amd64_ASM_H__
  31.  
  32. #include <arch/types.h>
  33. #include <config.h>
  34.  
  35.  
  36. void asm_delay_loop(__u32 t);
  37. void asm_fake_loop(__u32 t);
  38.  
  39. static inline __address get_stack_base(void)
  40. {
  41.     __address v;
  42.    
  43.     __asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((__u64)STACK_SIZE-1)));
  44.    
  45.     return v;
  46. }
  47.  
  48. static inline void cpu_sleep(void) { __asm__("hlt"); };
  49. static inline void cpu_halt(void) { __asm__("hlt"); };
  50.  
  51.  
  52. static inline __u8 inb(__u16 port)
  53. {
  54.     __u8 out;
  55.  
  56.     asm (
  57.         "mov %1, %%dx;"
  58.         "inb %%dx,%%al;"
  59.         "mov %%al, %0;"
  60.         :"=m"(out)
  61.         :"m"(port)
  62.         :"%dx","%al"
  63.         );
  64.     return out;
  65. }
  66.  
  67. static inline __u8 outb(__u16 port,__u8 b)
  68. {
  69.     asm (
  70.         "mov %0,%%dx;"
  71.         "mov %1,%%al;"
  72.         "outb %%al,%%dx;"
  73.         :
  74.         :"m"( port), "m" (b)
  75.         :"%dx","%al"
  76.         );
  77. }
  78.  
  79. /** Set priority level low
  80.  *
  81.  * Enable interrupts and return previous
  82.  * value of EFLAGS.
  83.  */
  84. static inline pri_t cpu_priority_low(void) {
  85.     pri_t v;
  86.     __asm__ volatile (
  87.         "pushfq\n"
  88.         "popq %0\n"
  89.         "sti\n"
  90.         : "=r" (v)
  91.     );
  92.     return v;
  93. }
  94.  
  95. /** Set priority level high
  96.  *
  97.  * Disable interrupts and return previous
  98.  * value of EFLAGS.
  99.  */
  100. static inline pri_t cpu_priority_high(void) {
  101.     pri_t v;
  102.     __asm__ volatile (
  103.         "pushfq\n"
  104.         "popq %0\n"
  105.         "cli\n"
  106.         : "=r" (v)
  107.         );
  108.     return v;
  109. }
  110.  
  111. /** Restore priority level
  112.  *
  113.  * Restore EFLAGS.
  114.  */
  115. static inline void cpu_priority_restore(pri_t pri) {
  116.     __asm__ volatile (
  117.         "pushq %0\n"
  118.         "popfq\n"
  119.         : : "r" (pri)
  120.         );
  121. }
  122.  
  123. /** Return raw priority level
  124.  *
  125.  * Return EFLAFS.
  126.  */
  127. static inline pri_t cpu_priority_read(void) {
  128.     pri_t v;
  129.     __asm__ volatile (
  130.         "pushfq\n"
  131.         "popq %0\n"
  132.         : "=r" (v)
  133.     );
  134.     return v;
  135. }
  136.  
  137. /** Read CR2
  138.  *
  139.  * Return value in CR2
  140.  *
  141.  * @return Value read.
  142.  */
  143. static inline __u64 read_cr2(void) { __u64 v; __asm__ volatile ("movq %%cr2,%0" : "=r" (v)); return v; }
  144.  
  145. /** Write CR3
  146.  *
  147.  * Write value to CR3.
  148.  *
  149.  * @param v Value to be written.
  150.  */
  151. static inline void write_cr3(__u64 v) { __asm__ volatile ("movq %0,%%cr3\n" : : "r" (v)); }
  152.  
  153. /** Read CR3
  154.  *
  155.  * Return value in CR3
  156.  *
  157.  * @return Value read.
  158.  */
  159. static inline __u64 read_cr3(void) { __u64 v; __asm__ volatile ("movq %%cr3,%0" : "=r" (v)); return v; }
  160.  
  161.  
  162. extern size_t interrupt_handler_size;
  163. extern void interrupt_handlers(void);
  164.  
  165. #endif
  166.