Rev 1903 | Rev 1918 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1903 | Rev 1917 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2005 Jakub Jermar |
2 | * Copyright (C) 2005 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup sparc64mm |
29 | /** @addtogroup sparc64mm |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #include <arch/mm/page.h> |
35 | #include <arch/mm/page.h> |
36 | #include <arch/mm/tlb.h> |
36 | #include <arch/mm/tlb.h> |
37 | #include <genarch/mm/page_ht.h> |
37 | #include <genarch/mm/page_ht.h> |
38 | #include <mm/frame.h> |
38 | #include <mm/frame.h> |
39 | #include <arch/mm/frame.h> |
39 | #include <arch/mm/frame.h> |
40 | #include <bitops.h> |
40 | #include <bitops.h> |
41 | #include <debug.h> |
41 | #include <debug.h> |
42 | #include <align.h> |
42 | #include <align.h> |
43 | #include <config.h> |
43 | #include <config.h> |
44 | 44 | ||
45 | #ifdef CONFIG_SMP |
45 | #ifdef CONFIG_SMP |
46 | /** Entries locked in DTLB of BSP. |
46 | /** Entries locked in DTLB of BSP. |
47 | * |
47 | * |
48 | * Application processors need to have the same locked entries |
48 | * Application processors need to have the same locked entries |
49 | * in their DTLBs as the bootstrap processor. |
49 | * in their DTLBs as the bootstrap processor. |
50 | */ |
50 | */ |
51 | static struct { |
51 | static struct { |
52 | uintptr_t virt_page; |
52 | uintptr_t virt_page; |
53 | uintptr_t phys_page; |
53 | uintptr_t phys_page; |
54 | int pagesize_code; |
54 | int pagesize_code; |
55 | } bsp_locked_dtlb_entry[DTLB_ENTRY_COUNT]; |
55 | } bsp_locked_dtlb_entry[DTLB_ENTRY_COUNT]; |
56 | 56 | ||
57 | /** Number of entries in bsp_locked_dtlb_entry array. */ |
57 | /** Number of entries in bsp_locked_dtlb_entry array. */ |
58 | static count_t bsp_locked_dtlb_entries = 0; |
58 | static count_t bsp_locked_dtlb_entries = 0; |
59 | #endif /* CONFIG_SMP */ |
59 | #endif /* CONFIG_SMP */ |
60 | 60 | ||
61 | /** Perform sparc64 specific initialization of paging. */ |
61 | /** Perform sparc64 specific initialization of paging. */ |
62 | void page_arch_init(void) |
62 | void page_arch_init(void) |
63 | { |
63 | { |
64 | if (config.cpu_active == 1) { |
64 | if (config.cpu_active == 1) { |
65 | page_mapping_operations = &ht_mapping_operations; |
65 | page_mapping_operations = &ht_mapping_operations; |
66 | } else { |
66 | } else { |
67 | 67 | ||
68 | #ifdef CONFIG_SMP |
68 | #ifdef CONFIG_SMP |
69 | int i; |
69 | int i; |
70 | 70 | ||
71 | /* |
71 | /* |
72 | * Copy locked DTLB entries from the BSP. |
72 | * Copy locked DTLB entries from the BSP. |
73 | */ |
73 | */ |
74 | for (i = 0; i < bsp_locked_dtlb_entries; i++) { |
74 | for (i = 0; i < bsp_locked_dtlb_entries; i++) { |
75 | dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, |
75 | dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, |
76 | bsp_locked_dtlb_entry[i].phys_page, bsp_locked_dtlb_entry[i].pagesize_code, |
76 | bsp_locked_dtlb_entry[i].phys_page, bsp_locked_dtlb_entry[i].pagesize_code, |
77 | true, false); |
77 | true, false); |
78 | } |
78 | } |
79 | #endif |
79 | #endif |
80 | 80 | ||
81 | } |
81 | } |
82 | } |
82 | } |
83 | 83 | ||
84 | /** Map memory-mapped device into virtual memory. |
84 | /** Map memory-mapped device into virtual memory. |
85 | * |
85 | * |
86 | * So far, only DTLB is used to map devices into memory. |
86 | * So far, only DTLB is used to map devices into memory. |
87 | * Chances are that there will be only a limited amount of |
87 | * Chances are that there will be only a limited amount of |
88 | * devices that the kernel itself needs to lock in DTLB. |
88 | * devices that the kernel itself needs to lock in DTLB. |
89 | * |
89 | * |
90 | * @param physaddr Physical address of the page where the |
90 | * @param physaddr Physical address of the page where the |
91 | * device is located. Must be at least |
91 | * device is located. Must be at least |
92 | * page-aligned. |
92 | * page-aligned. |
93 | * @param size Size of the device's registers. Must not |
93 | * @param size Size of the device's registers. Must not |
94 | * exceed 4M and must include extra space |
94 | * exceed 4M and must include extra space |
95 | * caused by the alignment. |
95 | * caused by the alignment. |
96 | * |
96 | * |
97 | * @return Virtual address of the page where the device is |
97 | * @return Virtual address of the page where the device is |
98 | * mapped. |
98 | * mapped. |
99 | */ |
99 | */ |
100 | uintptr_t hw_map(uintptr_t physaddr, size_t size) |
100 | uintptr_t hw_map(uintptr_t physaddr, size_t size) |
101 | { |
101 | { |
102 | unsigned int order; |
102 | unsigned int order; |
103 | int i; |
103 | int i; |
104 | 104 | ||
105 | ASSERT(config.cpu_active == 1); |
105 | ASSERT(config.cpu_active == 1); |
106 | 106 | ||
107 | struct { |
107 | struct { |
108 | int pagesize_code; |
108 | int pagesize_code; |
109 | size_t increment; |
109 | size_t increment; |
110 | count_t count; |
110 | count_t count; |
111 | } sizemap[] = { |
111 | } sizemap[] = { |
112 | { PAGESIZE_8K, 0, 1 }, /* 8K */ |
112 | { PAGESIZE_8K, 0, 1 }, /* 8K */ |
113 | { PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */ |
113 | { PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */ |
114 | { PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */ |
114 | { PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */ |
115 | { PAGESIZE_64K, 0, 1}, /* 64K */ |
115 | { PAGESIZE_64K, 0, 1}, /* 64K */ |
116 | { PAGESIZE_64K, 8*PAGE_SIZE, 2 }, /* 128K */ |
116 | { PAGESIZE_64K, 8*PAGE_SIZE, 2 }, /* 128K */ |
117 | { PAGESIZE_64K, 8*PAGE_SIZE, 4 }, /* 256K */ |
117 | { PAGESIZE_64K, 8*PAGE_SIZE, 4 }, /* 256K */ |
118 | { PAGESIZE_512K, 0, 1 }, /* 512K */ |
118 | { PAGESIZE_512K, 0, 1 }, /* 512K */ |
119 | { PAGESIZE_512K, 64*PAGE_SIZE, 2 }, /* 1M */ |
119 | { PAGESIZE_512K, 64*PAGE_SIZE, 2 }, /* 1M */ |
120 | { PAGESIZE_512K, 64*PAGE_SIZE, 4 }, /* 2M */ |
120 | { PAGESIZE_512K, 64*PAGE_SIZE, 4 }, /* 2M */ |
121 | { PAGESIZE_4M, 0, 1 } /* 4M */ |
121 | { PAGESIZE_4M, 0, 1 } /* 4M */ |
- | 122 | { PAGESIZE_4M, 512*PAGE_SIZE, 2 } /* 8M */ |
|
122 | }; |
123 | }; |
123 | 124 | ||
124 | ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr); |
125 | ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr); |
125 | ASSERT(size <= 4*1024*1024); |
126 | ASSERT(size <= 8*1024*1024); |
126 | 127 | ||
127 | if (size <= FRAME_SIZE) |
128 | if (size <= FRAME_SIZE) |
128 | order = 0; |
129 | order = 0; |
129 | else |
130 | else |
130 | order = (fnzb64(size - 1) + 1) - FRAME_WIDTH; |
131 | order = (fnzb64(size - 1) + 1) - FRAME_WIDTH; |
131 | 132 | ||
132 | /* |
133 | /* |
133 | * Use virtual addresses that are beyond the limit of physical memory. |
134 | * Use virtual addresses that are beyond the limit of physical memory. |
134 | * Thus, the physical address space will not be wasted by holes created |
135 | * Thus, the physical address space will not be wasted by holes created |
135 | * by frame_alloc(). |
136 | * by frame_alloc(). |
136 | */ |
137 | */ |
137 | ASSERT(last_frame); |
138 | ASSERT(last_frame); |
138 | uintptr_t virtaddr = ALIGN_UP(last_frame, 1<<(order + FRAME_WIDTH)); |
139 | uintptr_t virtaddr = ALIGN_UP(last_frame, 1<<(order + FRAME_WIDTH)); |
139 | last_frame = ALIGN_UP(virtaddr + size, 1<<(order + FRAME_WIDTH)); |
140 | last_frame = ALIGN_UP(virtaddr + size, 1<<(order + FRAME_WIDTH)); |
140 | 141 | ||
141 | for (i = 0; i < sizemap[order].count; i++) { |
142 | for (i = 0; i < sizemap[order].count; i++) { |
142 | /* |
143 | /* |
143 | * First, insert the mapping into DTLB. |
144 | * First, insert the mapping into DTLB. |
144 | */ |
145 | */ |
145 | dtlb_insert_mapping(virtaddr + i*sizemap[order].increment, |
146 | dtlb_insert_mapping(virtaddr + i*sizemap[order].increment, |
146 | physaddr + i*sizemap[order].increment, |
147 | physaddr + i*sizemap[order].increment, |
147 | sizemap[order].pagesize_code, true, false); |
148 | sizemap[order].pagesize_code, true, false); |
148 | 149 | ||
149 | #ifdef CONFIG_SMP |
150 | #ifdef CONFIG_SMP |
150 | /* |
151 | /* |
151 | * Second, save the information about the mapping for APs. |
152 | * Second, save the information about the mapping for APs. |
152 | */ |
153 | */ |
153 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = virtaddr + i*sizemap[order].increment; |
154 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = virtaddr + i*sizemap[order].increment; |
154 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = physaddr + i*sizemap[order].increment; |
155 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = physaddr + i*sizemap[order].increment; |
155 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = sizemap[order].pagesize_code; |
156 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = sizemap[order].pagesize_code; |
156 | bsp_locked_dtlb_entries++; |
157 | bsp_locked_dtlb_entries++; |
157 | #endif |
158 | #endif |
158 | } |
159 | } |
159 | 160 | ||
160 | return virtaddr; |
161 | return virtaddr; |
161 | } |
162 | } |
162 | 163 | ||
163 | /** @} |
164 | /** @} |
164 | */ |
165 | */ |
165 | 166 |