Subversion Repositories HelenOS

Rev

Rev 3770 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3770 Rev 3862
1
/*
1
/*
2
 * Copyright (c) 2005 Jakub Jermar
2
 * Copyright (c) 2005 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm  
29
/** @addtogroup sparc64mm  
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/page.h>
35
#include <arch/mm/page.h>
36
#include <arch/mm/pagesize.h>
36
#include <arch/mm/pagesize.h>
37
#include <arch/mm/sun4u/tlb.h>
-
 
38
#include <arch/mm/sun4u/tlb.h>
37
#include <arch/mm/tlb.h>
39
#include <genarch/mm/page_ht.h>
38
#include <genarch/mm/page_ht.h>
40
#include <mm/frame.h>
39
#include <mm/frame.h>
41
#include <arch/mm/frame.h>
40
#include <arch/mm/frame.h>
42
#include <bitops.h>
41
#include <bitops.h>
43
#include <debug.h>
42
#include <debug.h>
44
#include <align.h>
43
#include <align.h>
45
#include <config.h>
44
#include <config.h>
46
 
45
 
47
#ifdef CONFIG_SMP
46
#ifdef CONFIG_SMP
48
/** Entries locked in DTLB of BSP.
47
/** Entries locked in DTLB of BSP.
49
 *
48
 *
50
 * Application processors need to have the same locked entries in their DTLBs as
49
 * Application processors need to have the same locked entries in their DTLBs as
51
 * the bootstrap processor.
50
 * the bootstrap processor.
52
 */
51
 */
53
static struct {
52
static struct {
54
    uintptr_t virt_page;
53
    uintptr_t virt_page;
55
    uintptr_t phys_page;
54
    uintptr_t phys_page;
56
    int pagesize_code;
55
    int pagesize_code;
57
} bsp_locked_dtlb_entry[DTLB_MAX_LOCKED_ENTRIES];
56
} bsp_locked_dtlb_entry[DTLB_MAX_LOCKED_ENTRIES];
58
 
57
 
59
/** Number of entries in bsp_locked_dtlb_entry array. */
58
/** Number of entries in bsp_locked_dtlb_entry array. */
60
static count_t bsp_locked_dtlb_entries = 0;
59
static count_t bsp_locked_dtlb_entries = 0;
61
#endif /* CONFIG_SMP */
60
#endif /* CONFIG_SMP */
62
 
61
 
63
/** Perform sparc64 specific initialization of paging. */
62
/** Perform sparc64 specific initialization of paging. */
64
void page_arch_init(void)
63
void page_arch_init(void)
65
{
64
{
66
    if (config.cpu_active == 1) {
65
    if (config.cpu_active == 1) {
67
        page_mapping_operations = &ht_mapping_operations;
66
        page_mapping_operations = &ht_mapping_operations;
68
    } else {
67
    } else {
69
 
68
 
70
#ifdef CONFIG_SMP
69
#ifdef CONFIG_SMP
71
        unsigned int i;
70
        unsigned int i;
72
 
71
 
73
        /*
72
        /*
74
         * Copy locked DTLB entries from the BSP.
73
         * Copy locked DTLB entries from the BSP.
75
         */    
74
         */    
76
        for (i = 0; i < bsp_locked_dtlb_entries; i++) {
75
        for (i = 0; i < bsp_locked_dtlb_entries; i++) {
77
            dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
76
            dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
78
                bsp_locked_dtlb_entry[i].phys_page,
77
                bsp_locked_dtlb_entry[i].phys_page,
79
                bsp_locked_dtlb_entry[i].pagesize_code, true,
78
                bsp_locked_dtlb_entry[i].pagesize_code, true,
80
                false);
79
                false);
81
        }
80
        }
82
#endif  
81
#endif  
83
 
82
 
84
    }
83
    }
85
}
84
}
86
 
85
 
87
/** Map memory-mapped device into virtual memory.
86
/** Map memory-mapped device into virtual memory.
88
 *
87
 *
89
 * So far, only DTLB is used to map devices into memory. Chances are that there
88
 * So far, only DTLB is used to map devices into memory. Chances are that there
90
 * will be only a limited amount of devices that the kernel itself needs to
89
 * will be only a limited amount of devices that the kernel itself needs to
91
 * lock in DTLB.
90
 * lock in DTLB.
92
 *
91
 *
93
 * @param physaddr Physical address of the page where the device is located.
92
 * @param physaddr Physical address of the page where the device is located.
94
 *  Must be at least page-aligned.
93
 *  Must be at least page-aligned.
95
 * @param size Size of the device's registers. Must not exceed 4M and must
94
 * @param size Size of the device's registers. Must not exceed 4M and must
96
 *  include extra space caused by the alignment.
95
 *  include extra space caused by the alignment.
97
 *
96
 *
98
 * @return Virtual address of the page where the device is mapped.
97
 * @return Virtual address of the page where the device is mapped.
99
 */
98
 */
100
uintptr_t hw_map(uintptr_t physaddr, size_t size)
99
uintptr_t hw_map(uintptr_t physaddr, size_t size)
101
{
100
{
102
    unsigned int order;
101
    unsigned int order;
103
    unsigned int i;
102
    unsigned int i;
104
 
103
 
105
    ASSERT(config.cpu_active == 1);
104
    ASSERT(config.cpu_active == 1);
106
 
105
 
107
    struct {
106
    struct {
108
        int pagesize_code;
107
        int pagesize_code;
109
        size_t increment;
108
        size_t increment;
110
        count_t count;
109
        count_t count;
111
    } sizemap[] = {
110
    } sizemap[] = {
112
        { PAGESIZE_8K, 0, 1 },              /* 8K */
111
        { PAGESIZE_8K, 0, 1 },              /* 8K */
113
        { PAGESIZE_8K, MMU_PAGE_SIZE, 2 },      /* 16K */
112
        { PAGESIZE_8K, MMU_PAGE_SIZE, 2 },      /* 16K */
114
        { PAGESIZE_8K, MMU_PAGE_SIZE, 4 },      /* 32K */
113
        { PAGESIZE_8K, MMU_PAGE_SIZE, 4 },      /* 32K */
115
        { PAGESIZE_64K, 0, 1},              /* 64K */
114
        { PAGESIZE_64K, 0, 1},              /* 64K */
116
        { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 },     /* 128K */
115
        { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 },     /* 128K */
117
        { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 },     /* 256K */
116
        { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 },     /* 256K */
118
        { PAGESIZE_512K, 0, 1 },            /* 512K */
117
        { PAGESIZE_512K, 0, 1 },            /* 512K */
119
        { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 },   /* 1M */
118
        { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 },   /* 1M */
120
        { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 },   /* 2M */
119
        { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 },   /* 2M */
121
        { PAGESIZE_4M, 0, 1 },              /* 4M */
120
        { PAGESIZE_4M, 0, 1 },              /* 4M */
122
        { PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 }     /* 8M */
121
        { PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 }     /* 8M */
123
    };
122
    };
124
   
123
   
125
    ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr);
124
    ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr);
126
    ASSERT(size <= 8 * 1024 * 1024);
125
    ASSERT(size <= 8 * 1024 * 1024);
127
   
126
   
128
    if (size <= MMU_FRAME_SIZE)
127
    if (size <= MMU_FRAME_SIZE)
129
        order = 0;
128
        order = 0;
130
    else
129
    else
131
        order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH;
130
        order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH;
132
 
131
 
133
    /*
132
    /*
134
     * Use virtual addresses that are beyond the limit of physical memory.
133
     * Use virtual addresses that are beyond the limit of physical memory.
135
     * Thus, the physical address space will not be wasted by holes created
134
     * Thus, the physical address space will not be wasted by holes created
136
     * by frame_alloc().
135
     * by frame_alloc().
137
     */
136
     */
138
    ASSERT(PA2KA(last_frame));
137
    ASSERT(PA2KA(last_frame));
139
    uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame),
138
    uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame),
140
        1 << (order + FRAME_WIDTH));
139
        1 << (order + FRAME_WIDTH));
141
    last_frame = ALIGN_UP(KA2PA(virtaddr) + size,
140
    last_frame = ALIGN_UP(KA2PA(virtaddr) + size,
142
        1 << (order + FRAME_WIDTH));
141
        1 << (order + FRAME_WIDTH));
143
   
142
   
144
    for (i = 0; i < sizemap[order].count; i++) {
143
    for (i = 0; i < sizemap[order].count; i++) {
145
        /*
144
        /*
146
         * First, insert the mapping into DTLB.
145
         * First, insert the mapping into DTLB.
147
         */
146
         */
148
        dtlb_insert_mapping(virtaddr + i * sizemap[order].increment,
147
        dtlb_insert_mapping(virtaddr + i * sizemap[order].increment,
149
            physaddr + i * sizemap[order].increment,
148
            physaddr + i * sizemap[order].increment,
150
            sizemap[order].pagesize_code, true, false);
149
            sizemap[order].pagesize_code, true, false);
151
   
150
   
152
#ifdef CONFIG_SMP   
151
#ifdef CONFIG_SMP   
153
        /*
152
        /*
154
         * Second, save the information about the mapping for APs.
153
         * Second, save the information about the mapping for APs.
155
         */
154
         */
156
        bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
155
        bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
157
            virtaddr + i * sizemap[order].increment;
156
            virtaddr + i * sizemap[order].increment;
158
        bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
157
        bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
159
            physaddr + i * sizemap[order].increment;
158
            physaddr + i * sizemap[order].increment;
160
        bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
159
        bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
161
            sizemap[order].pagesize_code;
160
            sizemap[order].pagesize_code;
162
        bsp_locked_dtlb_entries++;
161
        bsp_locked_dtlb_entries++;
163
#endif
162
#endif
164
    }
163
    }
165
   
164
   
166
    return virtaddr;
165
    return virtaddr;
167
}
166
}
168
 
167
 
169
/** @}
168
/** @}
170
 */
169
 */
171
 
170
 
172
 
171