Rev 3743 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3743 | Rev 3770 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2005 Jakub Jermar |
2 | * Copyright (c) 2005 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup sparc64mm |
29 | /** @addtogroup sparc64mm |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #include <arch/mm/page.h> |
35 | #include <arch/mm/page.h> |
- | 36 | #include <arch/mm/pagesize.h> |
|
36 | #include <arch/mm/tlb.h> |
37 | #include <arch/mm/sun4u/tlb.h> |
37 | #include <arch/mm/sun4u/tlb.h> |
38 | #include <arch/mm/sun4u/tlb.h> |
38 | #include <genarch/mm/page_ht.h> |
39 | #include <genarch/mm/page_ht.h> |
39 | #include <mm/frame.h> |
40 | #include <mm/frame.h> |
40 | #include <arch/mm/frame.h> |
41 | #include <arch/mm/frame.h> |
41 | #include <bitops.h> |
42 | #include <bitops.h> |
42 | #include <debug.h> |
43 | #include <debug.h> |
43 | #include <align.h> |
44 | #include <align.h> |
44 | #include <config.h> |
45 | #include <config.h> |
45 | 46 | ||
46 | #ifdef CONFIG_SMP |
47 | #ifdef CONFIG_SMP |
47 | /** Entries locked in DTLB of BSP. |
48 | /** Entries locked in DTLB of BSP. |
48 | * |
49 | * |
49 | * Application processors need to have the same locked entries in their DTLBs as |
50 | * Application processors need to have the same locked entries in their DTLBs as |
50 | * the bootstrap processor. |
51 | * the bootstrap processor. |
51 | */ |
52 | */ |
52 | static struct { |
53 | static struct { |
53 | uintptr_t virt_page; |
54 | uintptr_t virt_page; |
54 | uintptr_t phys_page; |
55 | uintptr_t phys_page; |
55 | int pagesize_code; |
56 | int pagesize_code; |
56 | } bsp_locked_dtlb_entry[DTLB_MAX_LOCKED_ENTRIES]; |
57 | } bsp_locked_dtlb_entry[DTLB_MAX_LOCKED_ENTRIES]; |
57 | 58 | ||
58 | /** Number of entries in bsp_locked_dtlb_entry array. */ |
59 | /** Number of entries in bsp_locked_dtlb_entry array. */ |
59 | static count_t bsp_locked_dtlb_entries = 0; |
60 | static count_t bsp_locked_dtlb_entries = 0; |
60 | #endif /* CONFIG_SMP */ |
61 | #endif /* CONFIG_SMP */ |
61 | 62 | ||
62 | /** Perform sparc64 specific initialization of paging. */ |
63 | /** Perform sparc64 specific initialization of paging. */ |
63 | void page_arch_init(void) |
64 | void page_arch_init(void) |
64 | { |
65 | { |
65 | if (config.cpu_active == 1) { |
66 | if (config.cpu_active == 1) { |
66 | page_mapping_operations = &ht_mapping_operations; |
67 | page_mapping_operations = &ht_mapping_operations; |
67 | } else { |
68 | } else { |
68 | 69 | ||
69 | #ifdef CONFIG_SMP |
70 | #ifdef CONFIG_SMP |
70 | unsigned int i; |
71 | unsigned int i; |
71 | 72 | ||
72 | /* |
73 | /* |
73 | * Copy locked DTLB entries from the BSP. |
74 | * Copy locked DTLB entries from the BSP. |
74 | */ |
75 | */ |
75 | for (i = 0; i < bsp_locked_dtlb_entries; i++) { |
76 | for (i = 0; i < bsp_locked_dtlb_entries; i++) { |
76 | dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, |
77 | dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, |
77 | bsp_locked_dtlb_entry[i].phys_page, |
78 | bsp_locked_dtlb_entry[i].phys_page, |
78 | bsp_locked_dtlb_entry[i].pagesize_code, true, |
79 | bsp_locked_dtlb_entry[i].pagesize_code, true, |
79 | false); |
80 | false); |
80 | } |
81 | } |
81 | #endif |
82 | #endif |
82 | 83 | ||
83 | } |
84 | } |
84 | } |
85 | } |
85 | 86 | ||
86 | /** Map memory-mapped device into virtual memory. |
87 | /** Map memory-mapped device into virtual memory. |
87 | * |
88 | * |
88 | * So far, only DTLB is used to map devices into memory. Chances are that there |
89 | * So far, only DTLB is used to map devices into memory. Chances are that there |
89 | * will be only a limited amount of devices that the kernel itself needs to |
90 | * will be only a limited amount of devices that the kernel itself needs to |
90 | * lock in DTLB. |
91 | * lock in DTLB. |
91 | * |
92 | * |
92 | * @param physaddr Physical address of the page where the device is located. |
93 | * @param physaddr Physical address of the page where the device is located. |
93 | * Must be at least page-aligned. |
94 | * Must be at least page-aligned. |
94 | * @param size Size of the device's registers. Must not exceed 4M and must |
95 | * @param size Size of the device's registers. Must not exceed 4M and must |
95 | * include extra space caused by the alignment. |
96 | * include extra space caused by the alignment. |
96 | * |
97 | * |
97 | * @return Virtual address of the page where the device is mapped. |
98 | * @return Virtual address of the page where the device is mapped. |
98 | */ |
99 | */ |
99 | uintptr_t hw_map(uintptr_t physaddr, size_t size) |
100 | uintptr_t hw_map(uintptr_t physaddr, size_t size) |
100 | { |
101 | { |
101 | unsigned int order; |
102 | unsigned int order; |
102 | unsigned int i; |
103 | unsigned int i; |
103 | 104 | ||
104 | ASSERT(config.cpu_active == 1); |
105 | ASSERT(config.cpu_active == 1); |
105 | 106 | ||
106 | struct { |
107 | struct { |
107 | int pagesize_code; |
108 | int pagesize_code; |
108 | size_t increment; |
109 | size_t increment; |
109 | count_t count; |
110 | count_t count; |
110 | } sizemap[] = { |
111 | } sizemap[] = { |
111 | { PAGESIZE_8K, 0, 1 }, /* 8K */ |
112 | { PAGESIZE_8K, 0, 1 }, /* 8K */ |
112 | { PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */ |
113 | { PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */ |
113 | { PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */ |
114 | { PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */ |
114 | { PAGESIZE_64K, 0, 1}, /* 64K */ |
115 | { PAGESIZE_64K, 0, 1}, /* 64K */ |
115 | { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */ |
116 | { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */ |
116 | { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */ |
117 | { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */ |
117 | { PAGESIZE_512K, 0, 1 }, /* 512K */ |
118 | { PAGESIZE_512K, 0, 1 }, /* 512K */ |
118 | { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */ |
119 | { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */ |
119 | { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */ |
120 | { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */ |
120 | { PAGESIZE_4M, 0, 1 }, /* 4M */ |
121 | { PAGESIZE_4M, 0, 1 }, /* 4M */ |
121 | { PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */ |
122 | { PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */ |
122 | }; |
123 | }; |
123 | 124 | ||
124 | ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr); |
125 | ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr); |
125 | ASSERT(size <= 8 * 1024 * 1024); |
126 | ASSERT(size <= 8 * 1024 * 1024); |
126 | 127 | ||
127 | if (size <= MMU_FRAME_SIZE) |
128 | if (size <= MMU_FRAME_SIZE) |
128 | order = 0; |
129 | order = 0; |
129 | else |
130 | else |
130 | order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH; |
131 | order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH; |
131 | 132 | ||
132 | /* |
133 | /* |
133 | * Use virtual addresses that are beyond the limit of physical memory. |
134 | * Use virtual addresses that are beyond the limit of physical memory. |
134 | * Thus, the physical address space will not be wasted by holes created |
135 | * Thus, the physical address space will not be wasted by holes created |
135 | * by frame_alloc(). |
136 | * by frame_alloc(). |
136 | */ |
137 | */ |
137 | ASSERT(PA2KA(last_frame)); |
138 | ASSERT(PA2KA(last_frame)); |
138 | uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), |
139 | uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), |
139 | 1 << (order + FRAME_WIDTH)); |
140 | 1 << (order + FRAME_WIDTH)); |
140 | last_frame = ALIGN_UP(KA2PA(virtaddr) + size, |
141 | last_frame = ALIGN_UP(KA2PA(virtaddr) + size, |
141 | 1 << (order + FRAME_WIDTH)); |
142 | 1 << (order + FRAME_WIDTH)); |
142 | 143 | ||
143 | for (i = 0; i < sizemap[order].count; i++) { |
144 | for (i = 0; i < sizemap[order].count; i++) { |
144 | /* |
145 | /* |
145 | * First, insert the mapping into DTLB. |
146 | * First, insert the mapping into DTLB. |
146 | */ |
147 | */ |
147 | dtlb_insert_mapping(virtaddr + i * sizemap[order].increment, |
148 | dtlb_insert_mapping(virtaddr + i * sizemap[order].increment, |
148 | physaddr + i * sizemap[order].increment, |
149 | physaddr + i * sizemap[order].increment, |
149 | sizemap[order].pagesize_code, true, false); |
150 | sizemap[order].pagesize_code, true, false); |
150 | 151 | ||
151 | #ifdef CONFIG_SMP |
152 | #ifdef CONFIG_SMP |
152 | /* |
153 | /* |
153 | * Second, save the information about the mapping for APs. |
154 | * Second, save the information about the mapping for APs. |
154 | */ |
155 | */ |
155 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = |
156 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = |
156 | virtaddr + i * sizemap[order].increment; |
157 | virtaddr + i * sizemap[order].increment; |
157 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = |
158 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = |
158 | physaddr + i * sizemap[order].increment; |
159 | physaddr + i * sizemap[order].increment; |
159 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = |
160 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = |
160 | sizemap[order].pagesize_code; |
161 | sizemap[order].pagesize_code; |
161 | bsp_locked_dtlb_entries++; |
162 | bsp_locked_dtlb_entries++; |
162 | #endif |
163 | #endif |
163 | } |
164 | } |
164 | 165 | ||
165 | return virtaddr; |
166 | return virtaddr; |
166 | } |
167 | } |
167 | 168 | ||
168 | /** @} |
169 | /** @} |
169 | */ |
170 | */ |
170 | 171 | ||
171 | 172 |