Rev 3862 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3862 | Rev 3993 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2005 Jakub Jermar |
2 | * Copyright (c) 2005 Jakub Jermar |
3 | * Copyright (c) 2008 Pavel Rimsky |
3 | * Copyright (c) 2008 Pavel Rimsky |
4 | * All rights reserved. |
4 | * All rights reserved. |
5 | * |
5 | * |
6 | * Redistribution and use in source and binary forms, with or without |
6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions |
7 | * modification, are permitted provided that the following conditions |
8 | * are met: |
8 | * are met: |
9 | * |
9 | * |
10 | * - Redistributions of source code must retain the above copyright |
10 | * - Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. |
11 | * notice, this list of conditions and the following disclaimer. |
12 | * - Redistributions in binary form must reproduce the above copyright |
12 | * - Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the |
13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. |
14 | * documentation and/or other materials provided with the distribution. |
15 | * - The name of the author may not be used to endorse or promote products |
15 | * - The name of the author may not be used to endorse or promote products |
16 | * derived from this software without specific prior written permission. |
16 | * derived from this software without specific prior written permission. |
17 | * |
17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | */ |
28 | */ |
29 | 29 | ||
30 | /** @addtogroup sparc64mm |
30 | /** @addtogroup sparc64mm |
31 | * @{ |
31 | * @{ |
32 | */ |
32 | */ |
33 | /** @file |
33 | /** @file |
34 | */ |
34 | */ |
35 | 35 | ||
36 | #ifndef KERN_sparc64_sun4v_TLB_H_ |
36 | #ifndef KERN_sparc64_sun4v_TLB_H_ |
37 | #define KERN_sparc64_sun4v_TLB_H_ |
37 | #define KERN_sparc64_sun4v_TLB_H_ |
38 | 38 | ||
39 | #define MMU_FSA_ALIGNMENT 64 |
39 | #define MMU_FSA_ALIGNMENT 64 |
40 | #define MMU_FSA_SIZE 128 |
40 | #define MMU_FSA_SIZE 128 |
41 | 41 | ||
42 | #ifndef __ASM__ |
42 | #ifndef __ASM__ |
43 | 43 | ||
44 | #include <arch/mm/tte.h> |
44 | #include <arch/mm/tte.h> |
45 | #include <arch/mm/mmu.h> |
45 | #include <arch/mm/mmu.h> |
46 | #include <arch/mm/page.h> |
46 | #include <arch/mm/page.h> |
47 | #include <arch/asm.h> |
47 | #include <arch/asm.h> |
48 | #include <arch/barrier.h> |
48 | #include <arch/barrier.h> |
49 | #include <arch/types.h> |
49 | #include <arch/types.h> |
50 | #include <arch/register.h> |
50 | #include <arch/register.h> |
51 | #include <arch/cpu.h> |
51 | #include <arch/cpu.h> |
52 | #include <arch/sun4v/hypercall.h> |
52 | #include <arch/sun4v/hypercall.h> |
53 | 53 | ||
54 | /** |
54 | /** |
55 | * Structure filled by hypervisor (or directly CPU, if implemented so) when |
55 | * Structure filled by hypervisor (or directly CPU, if implemented so) when |
56 | * a MMU fault occurs. The structure describes the exact condition which |
56 | * a MMU fault occurs. The structure describes the exact condition which |
57 | * has caused the fault. |
57 | * has caused the fault. |
58 | */ |
58 | */ |
59 | typedef struct mmu_fault_status_area { |
59 | typedef struct mmu_fault_status_area { |
60 | uint64_t ift; /**< Instruction fault type (IFT) */ |
60 | uint64_t ift; /**< Instruction fault type (IFT) */ |
61 | uint64_t ifa; /**< Instruction fault address (IFA) */ |
61 | uint64_t ifa; /**< Instruction fault address (IFA) */ |
62 | uint64_t ifc; /**< Instruction fault context (IFC) */ |
62 | uint64_t ifc; /**< Instruction fault context (IFC) */ |
63 | uint8_t reserved1[0x28]; |
63 | uint8_t reserved1[0x28]; |
64 | 64 | ||
65 | uint64_t dft; /**< Data fault type (DFT) */ |
65 | uint64_t dft; /**< Data fault type (DFT) */ |
66 | uint64_t dfa; /**< Data fault address (DFA) */ |
66 | uint64_t dfa; /**< Data fault address (DFA) */ |
67 | uint64_t dfc; /**< Data fault context (DFC) */ |
67 | uint64_t dfc; /**< Data fault context (DFC) */ |
68 | uint8_t reserved2[0x28]; |
68 | uint8_t reserved2[0x28]; |
69 | } __attribute__ ((packed)) mmu_fault_status_area_t; |
69 | } __attribute__ ((packed)) mmu_fault_status_area_t; |
70 | 70 | ||
71 | #define DTLB_MAX_LOCKED_ENTRIES 8 |
71 | #define DTLB_MAX_LOCKED_ENTRIES 8 |
72 | 72 | ||
73 | /** Bit width of the TLB-locked portion of kernel address space. */ |
73 | /** Bit width of the TLB-locked portion of kernel address space. */ |
74 | #define KERNEL_PAGE_WIDTH 22 /* 4M */ |
74 | #define KERNEL_PAGE_WIDTH 22 /* 4M */ |
75 | 75 | ||
76 | /* |
76 | /* |
77 | * Reading and writing context registers. |
77 | * Reading and writing context registers. |
78 | * |
78 | * |
79 | * Note that UltraSPARC Architecture-compatible processors do not require |
79 | * Note that UltraSPARC Architecture-compatible processors do not require |
80 | * a MEMBAR #Sync, FLUSH, DONE, or RETRY instruction after a store to an |
80 | * a MEMBAR #Sync, FLUSH, DONE, or RETRY instruction after a store to an |
81 | * MMU register for proper operation. |
81 | * MMU register for proper operation. |
82 | * |
82 | * |
83 | */ |
83 | */ |
84 | 84 | ||
85 | /** Read MMU Primary Context Register. |
85 | /** Read MMU Primary Context Register. |
86 | * |
86 | * |
87 | * @return Current value of Primary Context Register. |
87 | * @return Current value of Primary Context Register. |
88 | */ |
88 | */ |
89 | static inline uint64_t mmu_primary_context_read(void) |
89 | static inline uint64_t mmu_primary_context_read(void) |
90 | { |
90 | { |
91 | return asi_u64_read(ASI_PRIMARY_CONTEXT_REG, VA_PRIMARY_CONTEXT_REG); |
91 | return asi_u64_read(ASI_PRIMARY_CONTEXT_REG, VA_PRIMARY_CONTEXT_REG); |
92 | } |
92 | } |
93 | 93 | ||
94 | /** Write MMU Primary Context Register. |
94 | /** Write MMU Primary Context Register. |
95 | * |
95 | * |
96 | * @param v New value of Primary Context Register. |
96 | * @param v New value of Primary Context Register. |
97 | */ |
97 | */ |
98 | static inline void mmu_primary_context_write(uint64_t v) |
98 | static inline void mmu_primary_context_write(uint64_t v) |
99 | { |
99 | { |
100 | asi_u64_write(ASI_PRIMARY_CONTEXT_REG, VA_PRIMARY_CONTEXT_REG, v); |
100 | asi_u64_write(ASI_PRIMARY_CONTEXT_REG, VA_PRIMARY_CONTEXT_REG, v); |
101 | } |
101 | } |
102 | 102 | ||
103 | /** Read MMU Secondary Context Register. |
103 | /** Read MMU Secondary Context Register. |
104 | * |
104 | * |
105 | * @return Current value of Secondary Context Register. |
105 | * @return Current value of Secondary Context Register. |
106 | */ |
106 | */ |
107 | static inline uint64_t mmu_secondary_context_read(void) |
107 | static inline uint64_t mmu_secondary_context_read(void) |
108 | { |
108 | { |
109 | return asi_u64_read(ASI_SECONDARY_CONTEXT_REG, VA_SECONDARY_CONTEXT_REG); |
109 | return asi_u64_read(ASI_SECONDARY_CONTEXT_REG, VA_SECONDARY_CONTEXT_REG); |
110 | } |
110 | } |
111 | 111 | ||
112 | /** Write MMU Secondary Context Register. |
112 | /** Write MMU Secondary Context Register. |
113 | * |
113 | * |
114 | * @param v New value of Secondary Context Register. |
114 | * @param v New value of Secondary Context Register. |
115 | */ |
115 | */ |
116 | static inline void mmu_secondary_context_write(uint64_t v) |
116 | static inline void mmu_secondary_context_write(uint64_t v) |
117 | { |
117 | { |
118 | asi_u64_write(ASI_SECONDARY_CONTEXT_REG, VA_SECONDARY_CONTEXT_REG, v); |
118 | asi_u64_write(ASI_SECONDARY_CONTEXT_REG, VA_SECONDARY_CONTEXT_REG, v); |
119 | } |
119 | } |
120 | 120 | ||
121 | /** Perform IMMU TLB Demap Operation. |
121 | /** Perform IMMU TLB Demap Operation. |
122 | * |
122 | * |
123 | * @param type Selects between context and page demap (and entire MMU |
123 | * @param type Selects between context and page demap (and entire MMU |
124 | * demap on US3). |
124 | * demap on US3). |
125 | * @param context_encoding Specifies which Context register has Context ID for |
125 | * @param context_encoding Specifies which Context register has Context ID for |
126 | * demap. |
126 | * demap. |
127 | * @param page Address which is on the page to be demapped. |
127 | * @param page Address which is on the page to be demapped. |
128 | */ |
128 | */ |
129 | static inline void itlb_demap(int type, int context_encoding, uintptr_t page) |
129 | static inline void itlb_demap(int type, int context_encoding, uintptr_t page) |
130 | { |
130 | { |
131 | } |
131 | } |
132 | 132 | ||
133 | /** Perform DMMU TLB Demap Operation. |
133 | /** Perform DMMU TLB Demap Operation. |
134 | * |
134 | * |
135 | * @param type One of TLB_DEMAP_PAGE and TLB_DEMAP_CONTEXT. Selects |
135 | * @param type One of TLB_DEMAP_PAGE and TLB_DEMAP_CONTEXT. Selects |
136 | * between context and page demap. |
136 | * between context and page demap. |
137 | * @param context_encoding Specifies which Context register has Context ID for |
137 | * @param context_encoding Specifies which Context register has Context ID for |
138 | * demap. |
138 | * demap. |
139 | * @param page Address which is on the page to be demapped. |
139 | * @param page Address which is on the page to be demapped. |
140 | */ |
140 | */ |
141 | static inline void dtlb_demap(int type, int context_encoding, uintptr_t page) |
141 | static inline void dtlb_demap(int type, int context_encoding, uintptr_t page) |
142 | { |
142 | { |
143 | #if 0 |
143 | #if 0 |
144 | - this implementation is not correct!!! |
144 | - this implementation is not correct!!! |
145 | if (type == TLB_DEMAP_PAGE) { |
145 | if (type == TLB_DEMAP_PAGE) { |
146 | __hypercall_fast5( |
146 | __hypercall_fast5( |
147 | MMU_DEMAP_PAGE, 0, 0, |
147 | MMU_DEMAP_PAGE, 0, 0, |
148 | page, context_encoding, MMU_FLAG_DTLB); |
148 | page, context_encoding, MMU_FLAG_DTLB); |
149 | } else if (type == TLB_DEMAP_CONTEXT) { |
149 | } else if (type == TLB_DEMAP_CONTEXT) { |
150 | __hypercall_fast4( |
150 | __hypercall_fast4( |
151 | MMU_DEMAP_CTX, 0, 0, |
151 | MMU_DEMAP_CTX, 0, 0, |
152 | context_encoding, MMU_FLAG_DTLB); |
152 | context_encoding, MMU_FLAG_DTLB); |
153 | } |
153 | } |
154 | #endif |
154 | #endif |
155 | } |
155 | } |
156 | 156 | ||
157 | /** |
157 | /** |
158 | * Demaps all mappings in a context. |
158 | * Demaps all mappings in a context. |
159 | * |
159 | * |
160 | * @param context number of the context |
160 | * @param context number of the context |
161 | * @param mmu_flag MMU_FLAG_DTLB, MMU_FLAG_ITLB or a combination of both |
161 | * @param mmu_flag MMU_FLAG_DTLB, MMU_FLAG_ITLB or a combination of both |
162 | */ |
162 | */ |
163 | static inline void mmu_demap_ctx(int context, int mmu_flag) { |
163 | static inline void mmu_demap_ctx(int context, int mmu_flag) { |
164 | __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, context, mmu_flag); |
164 | __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, context, mmu_flag); |
165 | } |
165 | } |
166 | 166 | ||
167 | static inline void mmu_demap_page(uintptr_t vaddr, int context, int mmu_flag) { |
167 | static inline void mmu_demap_page(uintptr_t vaddr, int context, int mmu_flag) { |
168 | __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, vaddr, context, mmu_flag); |
168 | __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, vaddr, context, mmu_flag); |
169 | } |
169 | } |
170 | 170 | ||
171 | static inline void mmu_map_perm_addr(uintptr_t vaddr, uintptr_t ra, |
171 | static inline void mmu_map_perm_addr(uintptr_t vaddr, uintptr_t ra, |
172 | bool cacheable, bool privileged, bool executable, |
172 | bool cacheable, bool privileged, bool executable, |
173 | bool writable, unsigned size, unsigned mmu_flags) { |
173 | bool writable, unsigned size, unsigned mmu_flags) { |
174 | 174 | ||
175 | tte_data_t data; |
175 | tte_data_t data; |
176 | data.value = 0; |
176 | data.value = 0; |
177 | 177 | ||
178 | data.v = true; |
178 | data.v = true; |
179 | data.ra = ra; |
179 | data.ra = ra; |
180 | data.cp = data.cv = cacheable; |
180 | data.cp = data.cv = cacheable; |
181 | data.p = privileged; |
181 | data.p = privileged; |
182 | data.x = executable; |
182 | data.x = executable; |
183 | data.w = writable; |
183 | data.w = writable; |
184 | data.size = size; |
184 | data.size = size; |
185 | 185 | ||
186 | __hypercall_fast4(MMU_MAP_PERM_ADDR, vaddr, 0, data.value, mmu_flags); |
186 | __hypercall_fast4(MMU_MAP_PERM_ADDR, vaddr, 0, data.value, mmu_flags); |
187 | } |
187 | } |
188 | 188 | ||
189 | extern void fast_instruction_access_mmu_miss(unative_t, istate_t *); |
189 | extern void fast_instruction_access_mmu_miss(unative_t, istate_t *); |
- | 190 | extern void fast_data_access_mmu_miss(unative_t, istate_t *); |
|
- | 191 | extern void fast_data_access_protection(unative_t, istate_t *); |
|
190 | 192 | ||
191 | extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool); |
193 | extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool); |
192 | 194 | ||
193 | extern void describe_mmu_fault(void); |
195 | extern void describe_mmu_fault(void); |
194 | 196 | ||
195 | #endif /* !def __ASM__ */ |
197 | #endif /* !def __ASM__ */ |
196 | 198 | ||
197 | #endif |
199 | #endif |
198 | 200 | ||
199 | /** @} |
201 | /** @} |
200 | */ |
202 | */ |
201 | 203 |