Rev 2131 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2131 | Rev 2307 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2005 Jakub Jermar |
2 | * Copyright (c) 2005 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup amd64 |
29 | /** @addtogroup amd64 |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #ifndef KERN_amd64_ASM_H_ |
35 | #ifndef KERN_amd64_ASM_H_ |
36 | #define KERN_amd64_ASM_H_ |
36 | #define KERN_amd64_ASM_H_ |
37 | 37 | ||
38 | #include <config.h> |
38 | #include <config.h> |
39 | 39 | ||
40 | extern void asm_delay_loop(uint32_t t); |
40 | extern void asm_delay_loop(uint32_t t); |
41 | extern void asm_fake_loop(uint32_t t); |
41 | extern void asm_fake_loop(uint32_t t); |
42 | 42 | ||
43 | /** Return base address of current stack. |
43 | /** Return base address of current stack. |
44 | * |
44 | * |
45 | * Return the base address of the current stack. |
45 | * Return the base address of the current stack. |
46 | * The stack is assumed to be STACK_SIZE bytes long. |
46 | * The stack is assumed to be STACK_SIZE bytes long. |
47 | * The stack must start on page boundary. |
47 | * The stack must start on page boundary. |
48 | */ |
48 | */ |
49 | static inline uintptr_t get_stack_base(void) |
49 | static inline uintptr_t get_stack_base(void) |
50 | { |
50 | { |
51 | uintptr_t v; |
51 | uintptr_t v; |
52 | 52 | ||
53 | asm volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1))); |
53 | asm volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1))); |
54 | 54 | ||
55 | return v; |
55 | return v; |
56 | } |
56 | } |
57 | 57 | ||
58 | static inline void cpu_sleep(void) { __asm__ volatile ("hlt\n"); }; |
58 | static inline void cpu_sleep(void) |
- | 59 | { |
|
- | 60 | asm volatile ("hlt\n"); |
|
- | 61 | }; |
|
- | 62 | ||
59 | static inline void cpu_halt(void) { __asm__ volatile ("hlt\n"); }; |
63 | static inline void cpu_halt(void) |
- | 64 | { |
|
- | 65 | asm volatile ("hlt\n"); |
|
- | 66 | }; |
|
60 | 67 | ||
61 | 68 | ||
62 | /** Byte from port |
69 | /** Byte from port |
63 | * |
70 | * |
64 | * Get byte from port |
71 | * Get byte from port |
65 | * |
72 | * |
66 | * @param port Port to read from |
73 | * @param port Port to read from |
67 | * @return Value read |
74 | * @return Value read |
68 | */ |
75 | */ |
69 | static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } |
76 | static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } |
70 | 77 | ||
71 | /** Byte to port |
78 | /** Byte to port |
72 | * |
79 | * |
73 | * Output byte to port |
80 | * Output byte to port |
74 | * |
81 | * |
75 | * @param port Port to write to |
82 | * @param port Port to write to |
76 | * @param val Value to write |
83 | * @param val Value to write |
77 | */ |
84 | */ |
78 | static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } |
85 | static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } |
79 | 86 | ||
80 | /** Swap Hidden part of GS register with visible one */ |
87 | /** Swap Hidden part of GS register with visible one */ |
81 | static inline void swapgs(void) { __asm__ volatile("swapgs"); } |
88 | static inline void swapgs(void) { __asm__ volatile("swapgs"); } |
82 | 89 | ||
83 | /** Enable interrupts. |
90 | /** Enable interrupts. |
84 | * |
91 | * |
85 | * Enable interrupts and return previous |
92 | * Enable interrupts and return previous |
86 | * value of EFLAGS. |
93 | * value of EFLAGS. |
87 | * |
94 | * |
88 | * @return Old interrupt priority level. |
95 | * @return Old interrupt priority level. |
89 | */ |
96 | */ |
90 | static inline ipl_t interrupts_enable(void) { |
97 | static inline ipl_t interrupts_enable(void) { |
91 | ipl_t v; |
98 | ipl_t v; |
92 | __asm__ volatile ( |
99 | __asm__ volatile ( |
93 | "pushfq\n" |
100 | "pushfq\n" |
94 | "popq %0\n" |
101 | "popq %0\n" |
95 | "sti\n" |
102 | "sti\n" |
96 | : "=r" (v) |
103 | : "=r" (v) |
97 | ); |
104 | ); |
98 | return v; |
105 | return v; |
99 | } |
106 | } |
100 | 107 | ||
101 | /** Disable interrupts. |
108 | /** Disable interrupts. |
102 | * |
109 | * |
103 | * Disable interrupts and return previous |
110 | * Disable interrupts and return previous |
104 | * value of EFLAGS. |
111 | * value of EFLAGS. |
105 | * |
112 | * |
106 | * @return Old interrupt priority level. |
113 | * @return Old interrupt priority level. |
107 | */ |
114 | */ |
108 | static inline ipl_t interrupts_disable(void) { |
115 | static inline ipl_t interrupts_disable(void) { |
109 | ipl_t v; |
116 | ipl_t v; |
110 | __asm__ volatile ( |
117 | __asm__ volatile ( |
111 | "pushfq\n" |
118 | "pushfq\n" |
112 | "popq %0\n" |
119 | "popq %0\n" |
113 | "cli\n" |
120 | "cli\n" |
114 | : "=r" (v) |
121 | : "=r" (v) |
115 | ); |
122 | ); |
116 | return v; |
123 | return v; |
117 | } |
124 | } |
118 | 125 | ||
119 | /** Restore interrupt priority level. |
126 | /** Restore interrupt priority level. |
120 | * |
127 | * |
121 | * Restore EFLAGS. |
128 | * Restore EFLAGS. |
122 | * |
129 | * |
123 | * @param ipl Saved interrupt priority level. |
130 | * @param ipl Saved interrupt priority level. |
124 | */ |
131 | */ |
125 | static inline void interrupts_restore(ipl_t ipl) { |
132 | static inline void interrupts_restore(ipl_t ipl) { |
126 | __asm__ volatile ( |
133 | __asm__ volatile ( |
127 | "pushq %0\n" |
134 | "pushq %0\n" |
128 | "popfq\n" |
135 | "popfq\n" |
129 | : : "r" (ipl) |
136 | : : "r" (ipl) |
130 | ); |
137 | ); |
131 | } |
138 | } |
132 | 139 | ||
133 | /** Return interrupt priority level. |
140 | /** Return interrupt priority level. |
134 | * |
141 | * |
135 | * Return EFLAFS. |
142 | * Return EFLAFS. |
136 | * |
143 | * |
137 | * @return Current interrupt priority level. |
144 | * @return Current interrupt priority level. |
138 | */ |
145 | */ |
139 | static inline ipl_t interrupts_read(void) { |
146 | static inline ipl_t interrupts_read(void) { |
140 | ipl_t v; |
147 | ipl_t v; |
141 | __asm__ volatile ( |
148 | __asm__ volatile ( |
142 | "pushfq\n" |
149 | "pushfq\n" |
143 | "popq %0\n" |
150 | "popq %0\n" |
144 | : "=r" (v) |
151 | : "=r" (v) |
145 | ); |
152 | ); |
146 | return v; |
153 | return v; |
147 | } |
154 | } |
148 | 155 | ||
149 | /** Write to MSR */ |
156 | /** Write to MSR */ |
150 | static inline void write_msr(uint32_t msr, uint64_t value) |
157 | static inline void write_msr(uint32_t msr, uint64_t value) |
151 | { |
158 | { |
152 | __asm__ volatile ( |
159 | __asm__ volatile ( |
153 | "wrmsr;" : : "c" (msr), |
160 | "wrmsr;" : : "c" (msr), |
154 | "a" ((uint32_t)(value)), |
161 | "a" ((uint32_t)(value)), |
155 | "d" ((uint32_t)(value >> 32)) |
162 | "d" ((uint32_t)(value >> 32)) |
156 | ); |
163 | ); |
157 | } |
164 | } |
158 | 165 | ||
159 | static inline unative_t read_msr(uint32_t msr) |
166 | static inline unative_t read_msr(uint32_t msr) |
160 | { |
167 | { |
161 | uint32_t ax, dx; |
168 | uint32_t ax, dx; |
162 | 169 | ||
163 | __asm__ volatile ( |
170 | __asm__ volatile ( |
164 | "rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr) |
171 | "rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr) |
165 | ); |
172 | ); |
166 | return ((uint64_t)dx << 32) | ax; |
173 | return ((uint64_t)dx << 32) | ax; |
167 | } |
174 | } |
168 | 175 | ||
169 | 176 | ||
170 | /** Enable local APIC |
177 | /** Enable local APIC |
171 | * |
178 | * |
172 | * Enable local APIC in MSR. |
179 | * Enable local APIC in MSR. |
173 | */ |
180 | */ |
174 | static inline void enable_l_apic_in_msr() |
181 | static inline void enable_l_apic_in_msr() |
175 | { |
182 | { |
176 | __asm__ volatile ( |
183 | __asm__ volatile ( |
177 | "movl $0x1b, %%ecx\n" |
184 | "movl $0x1b, %%ecx\n" |
178 | "rdmsr\n" |
185 | "rdmsr\n" |
179 | "orl $(1<<11),%%eax\n" |
186 | "orl $(1<<11),%%eax\n" |
180 | "orl $(0xfee00000),%%eax\n" |
187 | "orl $(0xfee00000),%%eax\n" |
181 | "wrmsr\n" |
188 | "wrmsr\n" |
182 | : |
189 | : |
183 | : |
190 | : |
184 | :"%eax","%ecx","%edx" |
191 | :"%eax","%ecx","%edx" |
185 | ); |
192 | ); |
186 | } |
193 | } |
187 | 194 | ||
188 | static inline uintptr_t * get_ip() |
195 | static inline uintptr_t * get_ip() |
189 | { |
196 | { |
190 | uintptr_t *ip; |
197 | uintptr_t *ip; |
191 | 198 | ||
192 | __asm__ volatile ( |
199 | __asm__ volatile ( |
193 | "mov %%rip, %0" |
200 | "mov %%rip, %0" |
194 | : "=r" (ip) |
201 | : "=r" (ip) |
195 | ); |
202 | ); |
196 | return ip; |
203 | return ip; |
197 | } |
204 | } |
198 | 205 | ||
199 | /** Invalidate TLB Entry. |
206 | /** Invalidate TLB Entry. |
200 | * |
207 | * |
201 | * @param addr Address on a page whose TLB entry is to be invalidated. |
208 | * @param addr Address on a page whose TLB entry is to be invalidated. |
202 | */ |
209 | */ |
203 | static inline void invlpg(uintptr_t addr) |
210 | static inline void invlpg(uintptr_t addr) |
204 | { |
211 | { |
205 | __asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr))); |
212 | __asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr))); |
206 | } |
213 | } |
207 | 214 | ||
208 | /** Load GDTR register from memory. |
215 | /** Load GDTR register from memory. |
209 | * |
216 | * |
210 | * @param gdtr_reg Address of memory from where to load GDTR. |
217 | * @param gdtr_reg Address of memory from where to load GDTR. |
211 | */ |
218 | */ |
212 | static inline void gdtr_load(struct ptr_16_64 *gdtr_reg) |
219 | static inline void gdtr_load(struct ptr_16_64 *gdtr_reg) |
213 | { |
220 | { |
214 | __asm__ volatile ("lgdtq %0\n" : : "m" (*gdtr_reg)); |
221 | __asm__ volatile ("lgdtq %0\n" : : "m" (*gdtr_reg)); |
215 | } |
222 | } |
216 | 223 | ||
217 | /** Store GDTR register to memory. |
224 | /** Store GDTR register to memory. |
218 | * |
225 | * |
219 | * @param gdtr_reg Address of memory to where to load GDTR. |
226 | * @param gdtr_reg Address of memory to where to load GDTR. |
220 | */ |
227 | */ |
221 | static inline void gdtr_store(struct ptr_16_64 *gdtr_reg) |
228 | static inline void gdtr_store(struct ptr_16_64 *gdtr_reg) |
222 | { |
229 | { |
223 | __asm__ volatile ("sgdtq %0\n" : : "m" (*gdtr_reg)); |
230 | __asm__ volatile ("sgdtq %0\n" : : "m" (*gdtr_reg)); |
224 | } |
231 | } |
225 | 232 | ||
226 | /** Load IDTR register from memory. |
233 | /** Load IDTR register from memory. |
227 | * |
234 | * |
228 | * @param idtr_reg Address of memory from where to load IDTR. |
235 | * @param idtr_reg Address of memory from where to load IDTR. |
229 | */ |
236 | */ |
230 | static inline void idtr_load(struct ptr_16_64 *idtr_reg) |
237 | static inline void idtr_load(struct ptr_16_64 *idtr_reg) |
231 | { |
238 | { |
232 | __asm__ volatile ("lidtq %0\n" : : "m" (*idtr_reg)); |
239 | __asm__ volatile ("lidtq %0\n" : : "m" (*idtr_reg)); |
233 | } |
240 | } |
234 | 241 | ||
235 | /** Load TR from descriptor table. |
242 | /** Load TR from descriptor table. |
236 | * |
243 | * |
237 | * @param sel Selector specifying descriptor of TSS segment. |
244 | * @param sel Selector specifying descriptor of TSS segment. |
238 | */ |
245 | */ |
239 | static inline void tr_load(uint16_t sel) |
246 | static inline void tr_load(uint16_t sel) |
240 | { |
247 | { |
241 | __asm__ volatile ("ltr %0" : : "r" (sel)); |
248 | __asm__ volatile ("ltr %0" : : "r" (sel)); |
242 | } |
249 | } |
243 | 250 | ||
244 | #define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ |
251 | #define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ |
245 | { \ |
252 | { \ |
246 | unative_t res; \ |
253 | unative_t res; \ |
247 | __asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \ |
254 | __asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \ |
248 | return res; \ |
255 | return res; \ |
249 | } |
256 | } |
250 | 257 | ||
251 | #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ |
258 | #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ |
252 | { \ |
259 | { \ |
253 | __asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \ |
260 | __asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \ |
254 | } |
261 | } |
255 | 262 | ||
256 | GEN_READ_REG(cr0); |
263 | GEN_READ_REG(cr0); |
257 | GEN_READ_REG(cr2); |
264 | GEN_READ_REG(cr2); |
258 | GEN_READ_REG(cr3); |
265 | GEN_READ_REG(cr3); |
259 | GEN_WRITE_REG(cr3); |
266 | GEN_WRITE_REG(cr3); |
260 | 267 | ||
261 | GEN_READ_REG(dr0); |
268 | GEN_READ_REG(dr0); |
262 | GEN_READ_REG(dr1); |
269 | GEN_READ_REG(dr1); |
263 | GEN_READ_REG(dr2); |
270 | GEN_READ_REG(dr2); |
264 | GEN_READ_REG(dr3); |
271 | GEN_READ_REG(dr3); |
265 | GEN_READ_REG(dr6); |
272 | GEN_READ_REG(dr6); |
266 | GEN_READ_REG(dr7); |
273 | GEN_READ_REG(dr7); |
267 | 274 | ||
268 | GEN_WRITE_REG(dr0); |
275 | GEN_WRITE_REG(dr0); |
269 | GEN_WRITE_REG(dr1); |
276 | GEN_WRITE_REG(dr1); |
270 | GEN_WRITE_REG(dr2); |
277 | GEN_WRITE_REG(dr2); |
271 | GEN_WRITE_REG(dr3); |
278 | GEN_WRITE_REG(dr3); |
272 | GEN_WRITE_REG(dr6); |
279 | GEN_WRITE_REG(dr6); |
273 | GEN_WRITE_REG(dr7); |
280 | GEN_WRITE_REG(dr7); |
274 | 281 | ||
275 | extern size_t interrupt_handler_size; |
282 | extern size_t interrupt_handler_size; |
276 | extern void interrupt_handlers(void); |
283 | extern void interrupt_handlers(void); |
277 | 284 | ||
278 | #endif |
285 | #endif |
279 | 286 | ||
280 | /** @} |
287 | /** @} |
281 | */ |
288 | */ |
282 | 289 |