Rev 177 | Rev 192 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 177 | Rev 180 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #ifndef __ia32_ASM_H__ |
29 | #ifndef __ia32_ASM_H__ |
30 | #define __ia32_ASM_H__ |
30 | #define __ia32_ASM_H__ |
31 | 31 | ||
32 | #include <arch/types.h> |
32 | #include <arch/types.h> |
33 | #include <typedefs.h> |
33 | #include <typedefs.h> |
34 | #include <config.h> |
34 | #include <config.h> |
35 | #include <synch/spinlock.h> |
35 | #include <synch/spinlock.h> |
36 | #include <arch/boot/memmap.h> |
36 | #include <arch/boot/memmap.h> |
37 | #include <config.h> |
37 | #include <config.h> |
38 | 38 | ||
39 | extern __u32 interrupt_handler_size; |
39 | extern __u32 interrupt_handler_size; |
40 | 40 | ||
41 | extern void paging_on(void); |
41 | extern void paging_on(void); |
42 | 42 | ||
43 | extern void interrupt_handlers(void); |
43 | extern void interrupt_handlers(void); |
44 | 44 | ||
45 | extern __u8 inb(int port); |
45 | extern __u8 inb(int port); |
46 | extern __u16 inw(int port); |
46 | extern __u16 inw(int port); |
47 | extern __u32 inl(int port); |
47 | extern __u32 inl(int port); |
48 | 48 | ||
49 | extern void outb(int port, __u8 b); |
49 | extern void outb(int port, __u8 b); |
50 | extern void outw(int port, __u16 w); |
50 | extern void outw(int port, __u16 w); |
51 | extern void outl(int port, __u32 l); |
51 | extern void outl(int port, __u32 l); |
52 | 52 | ||
53 | extern void enable_l_apic_in_msr(void); |
53 | extern void enable_l_apic_in_msr(void); |
54 | 54 | ||
55 | /** Halt CPU |
55 | /** Halt CPU |
56 | * |
56 | * |
57 | * Halt the current CPU until interrupt event. |
57 | * Halt the current CPU until interrupt event. |
58 | */ |
58 | */ |
59 | static inline void cpu_halt(void) { __asm__("hlt"); }; |
59 | static inline void cpu_halt(void) { __asm__("hlt"); }; |
60 | static inline void cpu_sleep(void) { __asm__("hlt"); }; |
60 | static inline void cpu_sleep(void) { __asm__("hlt"); }; |
61 | 61 | ||
62 | /** Read CR2 |
62 | /** Read CR2 |
63 | * |
63 | * |
64 | * Return value in CR2 |
64 | * Return value in CR2 |
65 | * |
65 | * |
66 | * @return Value read. |
66 | * @return Value read. |
67 | */ |
67 | */ |
68 | static inline __u32 read_cr2(void) { __u32 v; __asm__ volatile ("movl %%cr2,%0" : "=r" (v)); return v; } |
68 | static inline __u32 read_cr2(void) { __u32 v; __asm__ volatile ("movl %%cr2,%0" : "=r" (v)); return v; } |
69 | 69 | ||
70 | /** Write CR3 |
70 | /** Write CR3 |
71 | * |
71 | * |
72 | * Write value to CR3. |
72 | * Write value to CR3. |
73 | * |
73 | * |
74 | * @param v Value to be written. |
74 | * @param v Value to be written. |
75 | */ |
75 | */ |
76 | static inline void write_cr3(__u32 v) { __asm__ volatile ("movl %0,%%cr3\n" : : "r" (v)); } |
76 | static inline void write_cr3(__u32 v) { __asm__ volatile ("movl %0,%%cr3\n" : : "r" (v)); } |
77 | 77 | ||
78 | /** Read CR3 |
78 | /** Read CR3 |
79 | * |
79 | * |
80 | * Return value in CR3 |
80 | * Return value in CR3 |
81 | * |
81 | * |
82 | * @return Value read. |
82 | * @return Value read. |
83 | */ |
83 | */ |
84 | static inline __u32 read_cr3(void) { __u32 v; __asm__ volatile ("movl %%cr3,%0" : "=r" (v)); return v; } |
84 | static inline __u32 read_cr3(void) { __u32 v; __asm__ volatile ("movl %%cr3,%0" : "=r" (v)); return v; } |
85 | 85 | ||
86 | /** Write DR0 |
86 | /** Write DR0 |
87 | * |
87 | * |
88 | * Write value to DR0. |
88 | * Write value to DR0. |
89 | * |
89 | * |
90 | * @param v Value to be written. |
90 | * @param v Value to be written. |
91 | */ |
91 | */ |
92 | static inline void write_dr0(__u32 v) { __asm__ volatile ("movl %0,%%dr0\n" : : "r" (v)); } |
92 | static inline void write_dr0(__u32 v) { __asm__ volatile ("movl %0,%%dr0\n" : : "r" (v)); } |
93 | 93 | ||
94 | /** Read DR0 |
94 | /** Read DR0 |
95 | * |
95 | * |
96 | * Return value in DR0 |
96 | * Return value in DR0 |
97 | * |
97 | * |
98 | * @return Value read. |
98 | * @return Value read. |
99 | */ |
99 | */ |
100 | static inline __u32 read_dr0(void) { __u32 v; __asm__ volatile ("movl %%dr0,%0" : "=r" (v)); return v; } |
100 | static inline __u32 read_dr0(void) { __u32 v; __asm__ volatile ("movl %%dr0,%0" : "=r" (v)); return v; } |
101 | 101 | ||
102 | /** Set priority level low |
102 | /** Set priority level low |
103 | * |
103 | * |
104 | * Enable interrupts and return previous |
104 | * Enable interrupts and return previous |
105 | * value of EFLAGS. |
105 | * value of EFLAGS. |
106 | */ |
106 | */ |
107 | static inline pri_t cpu_priority_low(void) { |
107 | static inline pri_t cpu_priority_low(void) { |
108 | pri_t v; |
108 | pri_t v; |
109 | __asm__ volatile ( |
109 | __asm__ volatile ( |
110 | "pushf\n" |
110 | "pushf\n" |
111 | "popl %0\n" |
111 | "popl %0\n" |
112 | "sti\n" |
112 | "sti\n" |
113 | : "=r" (v) |
113 | : "=r" (v) |
114 | ); |
114 | ); |
115 | return v; |
115 | return v; |
116 | } |
116 | } |
117 | 117 | ||
118 | /** Set priority level high |
118 | /** Set priority level high |
119 | * |
119 | * |
120 | * Disable interrupts and return previous |
120 | * Disable interrupts and return previous |
121 | * value of EFLAGS. |
121 | * value of EFLAGS. |
122 | */ |
122 | */ |
123 | static inline pri_t cpu_priority_high(void) { |
123 | static inline pri_t cpu_priority_high(void) { |
124 | pri_t v; |
124 | pri_t v; |
125 | __asm__ volatile ( |
125 | __asm__ volatile ( |
126 | "pushf\n" |
126 | "pushf\n" |
127 | "popl %0\n" |
127 | "popl %0\n" |
128 | "cli\n" |
128 | "cli\n" |
129 | : "=r" (v) |
129 | : "=r" (v) |
130 | ); |
130 | ); |
131 | return v; |
131 | return v; |
132 | } |
132 | } |
133 | 133 | ||
134 | /** Restore priority level |
134 | /** Restore priority level |
135 | * |
135 | * |
136 | * Restore EFLAGS. |
136 | * Restore EFLAGS. |
137 | */ |
137 | */ |
138 | static inline void cpu_priority_restore(pri_t pri) { |
138 | static inline void cpu_priority_restore(pri_t pri) { |
139 | __asm__ volatile ( |
139 | __asm__ volatile ( |
140 | "pushl %0\n" |
140 | "pushl %0\n" |
141 | "popf\n" |
141 | "popf\n" |
142 | : : "r" (pri) |
142 | : : "r" (pri) |
143 | ); |
143 | ); |
144 | } |
144 | } |
145 | 145 | ||
146 | /** Return raw priority level |
146 | /** Return raw priority level |
147 | * |
147 | * |
148 | * Return EFLAFS. |
148 | * Return EFLAFS. |
149 | */ |
149 | */ |
150 | static inline pri_t cpu_priority_read(void) { |
150 | static inline pri_t cpu_priority_read(void) { |
151 | pri_t v; |
151 | pri_t v; |
152 | __asm__ volatile ( |
152 | __asm__ volatile ( |
153 | "pushf\n" |
153 | "pushf\n" |
154 | "popl %0\n" |
154 | "popl %0\n" |
155 | : "=r" (v) |
155 | : "=r" (v) |
156 | ); |
156 | ); |
157 | return v; |
157 | return v; |
158 | } |
158 | } |
159 | 159 | ||
160 | /** Return base address of current stack |
160 | /** Return base address of current stack |
161 | * |
161 | * |
162 | * Return the base address of the current stack. |
162 | * Return the base address of the current stack. |
163 | * The stack is assumed to be STACK_SIZE bytes long. |
163 | * The stack is assumed to be STACK_SIZE bytes long. |
- | 164 | * The stack must start on page boundary. |
|
164 | */ |
165 | */ |
165 | static inline __address get_stack_base(void) |
166 | static inline __address get_stack_base(void) |
166 | { |
167 | { |
167 | __address v; |
168 | __address v; |
168 | 169 | ||
169 | __asm__ volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1))); |
170 | __asm__ volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1))); |
170 | 171 | ||
171 | return v; |
172 | return v; |
172 | } |
173 | } |
173 | 174 | ||
174 | #endif |
175 | #endif |
175 | 176 |