Rev 3675 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3675 | Rev 4377 | ||
---|---|---|---|
Line 1... | Line 1... | ||
1 | /* |
1 | /* |
2 | * Copyright (c) 2005 Jakub Jermar |
2 | * Copyright (c) 2009 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
Line 24... | Line 24... | ||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup sparc64mm |
29 | /** @addtogroup sparc64mm |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
Line 40... | Line 40... | ||
40 | #include <bitops.h> |
40 | #include <bitops.h> |
41 | #include <debug.h> |
41 | #include <debug.h> |
42 | #include <align.h> |
42 | #include <align.h> |
43 | #include <config.h> |
43 | #include <config.h> |
44 | 44 | ||
45 | #ifdef CONFIG_SMP |
- | |
46 | /** Entries locked in DTLB of BSP. |
- | |
47 | * |
- | |
48 | * Application processors need to have the same locked entries in their DTLBs as |
- | |
49 | * the bootstrap processor. |
- | |
50 | */ |
- | |
51 | static struct { |
- | |
52 | uintptr_t virt_page; |
- | |
53 | uintptr_t phys_page; |
- | |
54 | int pagesize_code; |
- | |
55 | } bsp_locked_dtlb_entry[DTLB_MAX_LOCKED_ENTRIES]; |
- | |
56 | - | ||
57 | /** Number of entries in bsp_locked_dtlb_entry array. */ |
- | |
58 | static count_t bsp_locked_dtlb_entries = 0; |
- | |
59 | #endif /* CONFIG_SMP */ |
- | |
60 | - | ||
61 | /** Perform sparc64 specific initialization of paging. */ |
45 | /** Perform sparc64 specific initialization of paging. */ |
62 | void page_arch_init(void) |
46 | void page_arch_init(void) |
63 | { |
47 | { |
64 | if (config.cpu_active == 1) { |
48 | if (config.cpu_active == 1) |
65 | page_mapping_operations = &ht_mapping_operations; |
49 | page_mapping_operations = &ht_mapping_operations; |
66 | } else { |
- | |
67 | - | ||
68 | #ifdef CONFIG_SMP |
- | |
69 | unsigned int i; |
- | |
70 | - | ||
71 | /* |
- | |
72 | * Copy locked DTLB entries from the BSP. |
- | |
73 | */ |
- | |
74 | for (i = 0; i < bsp_locked_dtlb_entries; i++) { |
- | |
75 | dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, |
- | |
76 | bsp_locked_dtlb_entry[i].phys_page, |
- | |
77 | bsp_locked_dtlb_entry[i].pagesize_code, true, |
- | |
78 | false); |
- | |
79 | } |
- | |
80 | #endif |
- | |
81 | - | ||
82 | } |
- | |
83 | } |
50 | } |
84 | 51 | ||
85 | /** Map memory-mapped device into virtual memory. |
52 | /** Map memory-mapped device into virtual memory. |
86 | * |
53 | * |
87 | * So far, only DTLB is used to map devices into memory. Chances are that there |
54 | * We are currently using identity mapping for mapping device registers. |
88 | * will be only a limited amount of devices that the kernel itself needs to |
- | |
89 | * lock in DTLB. |
- | |
90 | * |
55 | * |
91 | * @param physaddr Physical address of the page where the device is located. |
56 | * @param physaddr Physical address of the page where the device is |
92 | * Must be at least page-aligned. |
57 | * located. |
93 | * @param size Size of the device's registers. Must not exceed 4M and must |
58 | * @param size Size of the device's registers. |
94 | * include extra space caused by the alignment. |
- | |
95 | * |
59 | * |
96 | * @return Virtual address of the page where the device is mapped. |
60 | * @return Virtual address of the page where the device is mapped. |
- | 61 | * |
|
97 | */ |
62 | */ |
98 | uintptr_t hw_map(uintptr_t physaddr, size_t size) |
63 | uintptr_t hw_map(uintptr_t physaddr, size_t size) |
99 | { |
64 | { |
100 | unsigned int order; |
- | |
101 | unsigned int i; |
- | |
102 | - | ||
103 | ASSERT(config.cpu_active == 1); |
- | |
104 | - | ||
105 | struct { |
- | |
106 | int pagesize_code; |
- | |
107 | size_t increment; |
- | |
108 | count_t count; |
- | |
109 | } sizemap[] = { |
- | |
110 | { PAGESIZE_8K, 0, 1 }, /* 8K */ |
- | |
111 | { PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */ |
- | |
112 | { PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */ |
- | |
113 | { PAGESIZE_64K, 0, 1}, /* 64K */ |
- | |
114 | { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */ |
- | |
115 | { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */ |
- | |
116 | { PAGESIZE_512K, 0, 1 }, /* 512K */ |
- | |
117 | { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */ |
- | |
118 | { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */ |
- | |
119 | { PAGESIZE_4M, 0, 1 }, /* 4M */ |
- | |
120 | { PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */ |
- | |
121 | }; |
- | |
122 | - | ||
123 | ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr); |
- | |
124 | ASSERT(size <= 8 * 1024 * 1024); |
- | |
125 | - | ||
126 | if (size <= MMU_FRAME_SIZE) |
- | |
127 | order = 0; |
- | |
128 | else |
- | |
129 | order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH; |
- | |
130 | - | ||
131 | /* |
- | |
132 | * Use virtual addresses that are beyond the limit of physical memory. |
- | |
133 | * Thus, the physical address space will not be wasted by holes created |
- | |
134 | * by frame_alloc(). |
- | |
135 | */ |
- | |
136 | ASSERT(PA2KA(last_frame)); |
- | |
137 | uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), |
- | |
138 | 1 << (order + FRAME_WIDTH)); |
- | |
139 | last_frame = ALIGN_UP(KA2PA(virtaddr) + size, |
- | |
140 | 1 << (order + FRAME_WIDTH)); |
- | |
141 | - | ||
142 | for (i = 0; i < sizemap[order].count; i++) { |
- | |
143 | /* |
- | |
144 | * First, insert the mapping into DTLB. |
- | |
145 | */ |
- | |
146 | dtlb_insert_mapping(virtaddr + i * sizemap[order].increment, |
- | |
147 | physaddr + i * sizemap[order].increment, |
- | |
148 | sizemap[order].pagesize_code, true, false); |
- | |
149 | - | ||
150 | #ifdef CONFIG_SMP |
- | |
151 | /* |
- | |
152 | * Second, save the information about the mapping for APs. |
- | |
153 | */ |
- | |
154 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = |
- | |
155 | virtaddr + i * sizemap[order].increment; |
- | |
156 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = |
- | |
157 | physaddr + i * sizemap[order].increment; |
- | |
158 | bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = |
- | |
159 | sizemap[order].pagesize_code; |
- | |
160 | bsp_locked_dtlb_entries++; |
- | |
161 | #endif |
- | |
162 | } |
- | |
163 | - | ||
164 | return virtaddr; |
65 | return PA2KA(physaddr); |
165 | } |
66 | } |
166 | 67 | ||
167 | /** @} |
68 | /** @} |
168 | */ |
69 | */ |
169 | - |