Rev 2015 | Rev 2071 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2015 | Rev 2048 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2005 Jakub Jermar |
2 | * Copyright (C) 2005 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup sparc64mm |
29 | /** @addtogroup sparc64mm |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #include <arch/mm/page.h> |
35 | #include <arch/mm/page.h> |
36 | #include <arch/mm/tlb.h> |
36 | #include <arch/mm/tlb.h> |
37 | #include <genarch/mm/page_ht.h> |
37 | #include <genarch/mm/page_ht.h> |
38 | #include <mm/frame.h> |
38 | #include <mm/frame.h> |
39 | #include <arch/mm/frame.h> |
39 | #include <arch/mm/frame.h> |
40 | #include <bitops.h> |
40 | #include <bitops.h> |
41 | #include <debug.h> |
41 | #include <debug.h> |
42 | #include <align.h> |
42 | #include <align.h> |
43 | #include <config.h> |
43 | #include <config.h> |
44 | 44 | ||
45 | #ifdef CONFIG_SMP |
45 | #ifdef CONFIG_SMP |
46 | /** Entries locked in DTLB of BSP. |
46 | /** Entries locked in DTLB of BSP. |
47 | * |
47 | * |
48 | * Application processors need to have the same locked entries |
48 | * Application processors need to have the same locked entries in their DTLBs as |
49 | * in their DTLBs as the bootstrap processor. |
49 | * the bootstrap processor. |
50 | */ |
50 | */ |
51 | static struct { |
51 | static struct { |
52 | uintptr_t virt_page; |
52 | uintptr_t virt_page; |
53 | uintptr_t phys_page; |
53 | uintptr_t phys_page; |
54 | int pagesize_code; |
54 | int pagesize_code; |
55 | } bsp_locked_dtlb_entry[DTLB_ENTRY_COUNT]; |
55 | } bsp_locked_dtlb_entry[DTLB_ENTRY_COUNT]; |
56 | 56 | ||
57 | /** Number of entries in bsp_locked_dtlb_entry array. */ |
57 | /** Number of entries in bsp_locked_dtlb_entry array. */ |
58 | static count_t bsp_locked_dtlb_entries = 0; |
58 | static count_t bsp_locked_dtlb_entries = 0; |
59 | #endif /* CONFIG_SMP */ |
59 | #endif /* CONFIG_SMP */ |
60 | 60 | ||
61 | /** Perform sparc64 specific initialization of paging. */ |
61 | /** Perform sparc64 specific initialization of paging. */ |
62 | void page_arch_init(void) |
62 | void page_arch_init(void) |
63 | { |
63 | { |
64 | if (config.cpu_active == 1) { |
64 | if (config.cpu_active == 1) { |
65 | page_mapping_operations = &ht_mapping_operations; |
65 | page_mapping_operations = &ht_mapping_operations; |
66 | } else { |
66 | } else { |
67 | 67 | ||
68 | #ifdef CONFIG_SMP |
68 | #ifdef CONFIG_SMP |
69 | int i; |
69 | int i; |
70 | 70 | ||
71 | /* |
71 | /* |
72 | * Copy locked DTLB entries from the BSP. |
72 | * Copy locked DTLB entries from the BSP. |
73 | */ |
73 | */ |
74 | for (i = 0; i < bsp_locked_dtlb_entries; i++) { |
74 | for (i = 0; i < bsp_locked_dtlb_entries; i++) { |
75 | dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, |
75 | dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, |
76 | bsp_locked_dtlb_entry[i].phys_page, |
76 | bsp_locked_dtlb_entry[i].phys_page, |
77 | bsp_locked_dtlb_entry[i].pagesize_code, true, |
77 | bsp_locked_dtlb_entry[i].pagesize_code, true, |
78 | false); |
78 | false); |
79 | } |
79 | } |
80 | #endif |
80 | #endif |
81 | 81 | ||
82 | } |
82 | } |
83 | } |
83 | } |
84 | 84 | ||
85 | /** Map memory-mapped device into virtual memory. |
85 | /** Map memory-mapped device into virtual memory. |
86 | * |
86 | * |
87 | * So far, only DTLB is used to map devices into memory. |
87 | * So far, only DTLB is used to map devices into memory. Chances are that there |
88 | * Chances are that there will be only a limited amount of |
88 | * will be only a limited amount of devices that the kernel itself needs to |
89 | * devices that the kernel itself needs to lock in DTLB. |
89 | * lock in DTLB. |
90 | * |
90 | * |
91 | * @param physaddr Physical address of the page where the |
91 | * @param physaddr Physical address of the page where the device is located. |
92 | * device is located. Must be at least |
- | |
93 | * page-aligned. |
92 | * Must be at least page-aligned. |
94 | * @param size Size of the device's registers. Must not |
93 | * @param size Size of the device's registers. Must not exceed 4M and must |
95 | * exceed 4M and must include extra space |
- | |
96 | * caused by the alignment. |
94 | * include extra space caused by the alignment. |
97 | * |
95 | * |
98 | * @return Virtual address of the page where the device is |
96 | * @return Virtual address of the page where the device is mapped. |
99 | * mapped. |
- | |
100 | */ |
97 | */ |
101 | uintptr_t hw_map(uintptr_t physaddr, size_t size) |
98 | uintptr_t hw_map(uintptr_t physaddr, size_t size) |
102 | { |
99 | { |
103 | unsigned int order; |
100 | unsigned int order; |
104 | int i; |
101 | int i; |
105 | 102 | ||
106 | ASSERT(config.cpu_active == 1); |
103 | ASSERT(config.cpu_active == 1); |
107 | 104 | ||
108 | struct { |
105 | struct { |
109 | int pagesize_code; |
106 | int pagesize_code; |
110 | size_t increment; |
107 | size_t increment; |
111 | count_t count; |
108 | count_t count; |
112 | } sizemap[] = { |
109 | } sizemap[] = { |
113 | { PAGESIZE_8K, 0, 1 }, /* 8K */ |
110 | { PAGESIZE_8K, 0, 1 }, /* 8K */ |
114 | { PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */ |
111 | { PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */ |
115 | { PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */ |
112 | { PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */ |
116 | { PAGESIZE_64K, 0, 1}, /* 64K */ |
113 | { PAGESIZE_64K, 0, 1}, /* 64K */ |
117 | { PAGESIZE_64K, 8*PAGE_SIZE, 2 }, /* 128K */ |
114 | { PAGESIZE_64K, 8 * PAGE_SIZE, 2 }, /* 128K */ |
118 | { PAGESIZE_64K, 8*PAGE_SIZE, 4 }, /* 256K */ |
115 | { PAGESIZE_64K, 8 * PAGE_SIZE, 4 }, /* 256K */ |
119 | { PAGESIZE_512K, 0, 1 }, /* 512K */ |
116 | { PAGESIZE_512K, 0, 1 }, /* 512K */ |
120 | { PAGESIZE_512K, 64*PAGE_SIZE, 2 }, /* 1M */ |
117 | { PAGESIZE_512K, 64 * PAGE_SIZE, 2 }, /* 1M */ |
121 | { PAGESIZE_512K, 64*PAGE_SIZE, 4 }, /* 2M */ |
118 | { PAGESIZE_512K, 64 * PAGE_SIZE, 4 }, /* 2M */ |
122 | { PAGESIZE_4M, 0, 1 }, /* 4M */ |
119 | { PAGESIZE_4M, 0, 1 }, /* 4M */ |
123 | { PAGESIZE_4M, 512*PAGE_SIZE, 2 } /* 8M */ |
120 | { PAGESIZE_4M, 512 * PAGE_SIZE, 2 } /* 8M */ |
124 | }; |
121 | }; |
125 | 122 | ||
126 | ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr); |
123 | ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr); |
127 | ASSERT(size <= 8*1024*1024); |
124 | ASSERT(size <= 8 * 1024 * 1024); |
128 | 125 | ||
129 | if (size <= FRAME_SIZE) |
126 | if (size <= FRAME_SIZE) |
130 | order = 0; |
127 | order = 0; |
131 | else |
128 | else |
132 | order = (fnzb64(size - 1) + 1) - FRAME_WIDTH; |
129 | order = (fnzb64(size - 1) + 1) - FRAME_WIDTH; |
133 | 130 | ||
134 | /* |
131 | /* |
135 | * Use virtual addresses that are beyond the limit of physical memory. |
132 | * Use virtual addresses that are beyond the limit of physical memory. |
136 | * Thus, the physical address space will not be wasted by holes created |
133 | * Thus, the physical address space will not be wasted by holes created |
137 | * by frame_alloc(). |
134 | * by frame_alloc(). |
138 | */ |
135 | */ |
139 | ASSERT(PA2KA(last_frame)); |
136 | ASSERT(PA2KA(last_frame)); |
140 | uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), 1 << (order + FRAME_WIDTH)); |
137 | uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), 1 << (order + FRAME_WIDTH)); |
141 | last_frame = ALIGN_UP(KA2PA(virtaddr) + size, 1 << (order + FRAME_WIDTH)); |
138 | last_frame = ALIGN_UP(KA2PA(virtaddr) + size, 1 << (order + FRAME_WIDTH)); |
142 | 139 | ||
143 | for (i = 0; i < sizemap[order].count; i++) { |
140 | for (i = 0; i < sizemap[order].count; i++) { |
144 | /* |
141 | /* |
145 | * First, insert the mapping into DTLB. |
142 | * First, insert the mapping into DTLB. |
146 | */ |
143 | */ |
147 | dtlb_insert_mapping(virtaddr + i*sizemap[order].increment, |
144 | dtlb_insert_mapping(virtaddr + i * sizemap[order].increment, |
148 | physaddr + i*sizemap[order].increment, |
145 | physaddr + i * sizemap[order].increment, |
149 | sizemap[order].pagesize_code, true, false); |
146 | sizemap[order].pagesize_code, true, false); |
150 | 147 | ||
151 | #ifdef CONFIG_SMP |
148 | #ifdef CONFIG_SMP |
152 | /* |
149 | /* |
153 | * Second, save the information about the mapping for APs. |
150 | * Second, save the information about the mapping for APs. |
154 | */ |
151 | */ |
155 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = |
152 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = |
156 | virtaddr + i*sizemap[order].increment; |
153 | virtaddr + i * sizemap[order].increment; |
157 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = |
154 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = |
158 | physaddr + i*sizemap[order].increment; |
155 | physaddr + i * sizemap[order].increment; |
159 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = |
156 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = |
160 | sizemap[order].pagesize_code; |
157 | sizemap[order].pagesize_code; |
161 | bsp_locked_dtlb_entries++; |
158 | bsp_locked_dtlb_entries++; |
162 | #endif |
159 | #endif |
163 | } |
160 | } |
164 | 161 | ||
165 | return virtaddr; |
162 | return virtaddr; |
166 | } |
163 | } |
167 | 164 | ||
168 | /** @} |
165 | /** @} |
169 | */ |
166 | */ |
170 | 167 |