Subversion Repositories HelenOS

Rev

Rev 3817 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3817 Rev 3862
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm
29
/** @addtogroup sparc64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/as.h>
35
#include <arch/mm/as.h>
36
#include <arch/mm/pagesize.h>
36
#include <arch/mm/pagesize.h>
37
#include <arch/mm/sun4u/tlb.h>
-
 
38
#include <arch/mm/sun4u/tlb.h>
37
#include <arch/mm/tlb.h>
39
#include <genarch/mm/page_ht.h>
38
#include <genarch/mm/page_ht.h>
40
#include <genarch/mm/asid_fifo.h>
39
#include <genarch/mm/asid_fifo.h>
41
#include <debug.h>
40
#include <debug.h>
42
#include <config.h>
41
#include <config.h>
43
 
42
 
44
#ifdef CONFIG_TSB
43
#ifdef CONFIG_TSB
45
#include <arch/mm/tsb.h>
44
#include <arch/mm/tsb.h>
46
#include <arch/memstr.h>
45
#include <arch/memstr.h>
47
#include <arch/asm.h>
46
#include <arch/asm.h>
48
#include <mm/frame.h>
47
#include <mm/frame.h>
49
#include <bitops.h>
48
#include <bitops.h>
50
#include <macros.h>
49
#include <macros.h>
51
#endif /* CONFIG_TSB */
50
#endif /* CONFIG_TSB */
52
 
51
 
53
/** Architecture dependent address space init. */
52
/** Architecture dependent address space init. */
54
void as_arch_init(void)
53
void as_arch_init(void)
55
{
54
{
56
    if (config.cpu_active == 1) {
55
    if (config.cpu_active == 1) {
57
        as_operations = &as_ht_operations;
56
        as_operations = &as_ht_operations;
58
        asid_fifo_init();
57
        asid_fifo_init();
59
    }
58
    }
60
}
59
}
61
 
60
 
62
int as_constructor_arch(as_t *as, int flags)
61
int as_constructor_arch(as_t *as, int flags)
63
{
62
{
64
#ifdef CONFIG_TSB
63
#ifdef CONFIG_TSB
65
    /*
64
    /*
66
     * The order must be calculated with respect to the emulated
65
     * The order must be calculated with respect to the emulated
67
     * 16K page size.
66
     * 16K page size.
68
     */
67
     */
69
    int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
68
    int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
70
        sizeof(tsb_entry_t)) >> FRAME_WIDTH);
69
        sizeof(tsb_entry_t)) >> FRAME_WIDTH);
71
 
70
 
72
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
71
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
73
 
72
 
74
    if (!tsb)
73
    if (!tsb)
75
        return -1;
74
        return -1;
76
 
75
 
77
    as->arch.itsb = (tsb_entry_t *) tsb;
76
    as->arch.itsb = (tsb_entry_t *) tsb;
78
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
77
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
79
        sizeof(tsb_entry_t));
78
        sizeof(tsb_entry_t));
80
 
79
 
81
    memsetb(as->arch.itsb,
80
    memsetb(as->arch.itsb,
82
        (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
81
        (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
83
#endif
82
#endif
84
    return 0;
83
    return 0;
85
}
84
}
86
 
85
 
87
int as_destructor_arch(as_t *as)
86
int as_destructor_arch(as_t *as)
88
{
87
{
89
#ifdef CONFIG_TSB
88
#ifdef CONFIG_TSB
90
    /*
89
    /*
91
     * The count must be calculated with respect to the emualted 16K page
90
     * The count must be calculated with respect to the emualted 16K page
92
     * size.
91
     * size.
93
     */
92
     */
94
    count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
93
    count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
95
        sizeof(tsb_entry_t)) >> FRAME_WIDTH;
94
        sizeof(tsb_entry_t)) >> FRAME_WIDTH;
96
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
95
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
97
    return cnt;
96
    return cnt;
98
#else
97
#else
99
    return 0;
98
    return 0;
100
#endif
99
#endif
101
}
100
}
102
 
101
 
103
int as_create_arch(as_t *as, int flags)
102
int as_create_arch(as_t *as, int flags)
104
{
103
{
105
#ifdef CONFIG_TSB
104
#ifdef CONFIG_TSB
106
    tsb_invalidate(as, 0, (count_t) -1);
105
    tsb_invalidate(as, 0, (count_t) -1);
107
#endif
106
#endif
108
    return 0;
107
    return 0;
109
}
108
}
110
 
109
 
111
/** Perform sparc64-specific tasks when an address space becomes active on the
110
/** Perform sparc64-specific tasks when an address space becomes active on the
112
 * processor.
111
 * processor.
113
 *
112
 *
114
 * Install ASID and map TSBs.
113
 * Install ASID and map TSBs.
115
 *
114
 *
116
 * @param as Address space.
115
 * @param as Address space.
117
 */
116
 */
118
void as_install_arch(as_t *as)
117
void as_install_arch(as_t *as)
119
{
118
{
120
    tlb_context_reg_t ctx;
119
    tlb_context_reg_t ctx;
121
   
120
   
122
    /*
121
    /*
123
     * Note that we don't and may not lock the address space. That's ok
122
     * Note that we don't and may not lock the address space. That's ok
124
     * since we only read members that are currently read-only.
123
     * since we only read members that are currently read-only.
125
     *
124
     *
126
     * Moreover, the as->asid is protected by asidlock, which is being held.
125
     * Moreover, the as->asid is protected by asidlock, which is being held.
127
     */
126
     */
128
   
127
   
129
    /*
128
    /*
130
     * Write ASID to secondary context register. The primary context
129
     * Write ASID to secondary context register. The primary context
131
     * register has to be set from TL>0 so it will be filled from the
130
     * register has to be set from TL>0 so it will be filled from the
132
     * secondary context register from the TL=1 code just before switch to
131
     * secondary context register from the TL=1 code just before switch to
133
     * userspace.
132
     * userspace.
134
     */
133
     */
135
    ctx.v = 0;
134
    ctx.v = 0;
136
    ctx.context = as->asid;
135
    ctx.context = as->asid;
137
    mmu_secondary_context_write(ctx.v);
136
    mmu_secondary_context_write(ctx.v);
138
 
137
 
139
#ifdef CONFIG_TSB   
138
#ifdef CONFIG_TSB   
140
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
139
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
141
 
140
 
142
    ASSERT(as->arch.itsb && as->arch.dtsb);
141
    ASSERT(as->arch.itsb && as->arch.dtsb);
143
 
142
 
144
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
143
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
145
       
144
       
146
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
145
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
147
        /*
146
        /*
148
         * TSBs were allocated from memory not covered
147
         * TSBs were allocated from memory not covered
149
         * by the locked 4M kernel DTLB entry. We need
148
         * by the locked 4M kernel DTLB entry. We need
150
         * to map both TSBs explicitly.
149
         * to map both TSBs explicitly.
151
         */
150
         */
152
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
151
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
153
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
152
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
154
    }
153
    }
155
       
154
       
156
    /*
155
    /*
157
     * Setup TSB Base registers.
156
     * Setup TSB Base registers.
158
     */
157
     */
159
    tsb_base_reg_t tsb_base;
158
    tsb_base_reg_t tsb_base;
160
       
159
       
161
    tsb_base.value = 0;
160
    tsb_base.value = 0;
162
    tsb_base.size = TSB_SIZE;
161
    tsb_base.size = TSB_SIZE;
163
    tsb_base.split = 0;
162
    tsb_base.split = 0;
164
 
163
 
165
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
164
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
166
    itsb_base_write(tsb_base.value);
165
    itsb_base_write(tsb_base.value);
167
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
166
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
168
    dtsb_base_write(tsb_base.value);
167
    dtsb_base_write(tsb_base.value);
169
   
168
   
170
#if defined (US3)
169
#if defined (US3)
171
    /*
170
    /*
172
     * Clear the extension registers.
171
     * Clear the extension registers.
173
     * In HelenOS, primary and secondary context registers contain
172
     * In HelenOS, primary and secondary context registers contain
174
     * equal values and kernel misses (context 0, ie. the nucleus context)
173
     * equal values and kernel misses (context 0, ie. the nucleus context)
175
     * are excluded from the TSB miss handler, so it makes no sense
174
     * are excluded from the TSB miss handler, so it makes no sense
176
     * to have separate TSBs for primary, secondary and nucleus contexts.
175
     * to have separate TSBs for primary, secondary and nucleus contexts.
177
     * Clearing the extension registers will ensure that the value of the
176
     * Clearing the extension registers will ensure that the value of the
178
     * TSB Base register will be used as an address of TSB, making the code
177
     * TSB Base register will be used as an address of TSB, making the code
179
     * compatible with the US port.
178
     * compatible with the US port.
180
     */
179
     */
181
    itsb_primary_extension_write(0);
180
    itsb_primary_extension_write(0);
182
    itsb_nucleus_extension_write(0);
181
    itsb_nucleus_extension_write(0);
183
    dtsb_primary_extension_write(0);
182
    dtsb_primary_extension_write(0);
184
    dtsb_secondary_extension_write(0);
183
    dtsb_secondary_extension_write(0);
185
    dtsb_nucleus_extension_write(0);
184
    dtsb_nucleus_extension_write(0);
186
#endif
185
#endif
187
#endif
186
#endif
188
}
187
}
189
 
188
 
190
/** Perform sparc64-specific tasks when an address space is removed from the
189
/** Perform sparc64-specific tasks when an address space is removed from the
191
 * processor.
190
 * processor.
192
 *
191
 *
193
 * Demap TSBs.
192
 * Demap TSBs.
194
 *
193
 *
195
 * @param as Address space.
194
 * @param as Address space.
196
 */
195
 */
197
void as_deinstall_arch(as_t *as)
196
void as_deinstall_arch(as_t *as)
198
{
197
{
199
 
198
 
200
    /*
199
    /*
201
     * Note that we don't and may not lock the address space. That's ok
200
     * Note that we don't and may not lock the address space. That's ok
202
     * since we only read members that are currently read-only.
201
     * since we only read members that are currently read-only.
203
     *
202
     *
204
     * Moreover, the as->asid is protected by asidlock, which is being held.
203
     * Moreover, the as->asid is protected by asidlock, which is being held.
205
     */
204
     */
206
 
205
 
207
#ifdef CONFIG_TSB
206
#ifdef CONFIG_TSB
208
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
207
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
209
 
208
 
210
    ASSERT(as->arch.itsb && as->arch.dtsb);
209
    ASSERT(as->arch.itsb && as->arch.dtsb);
211
 
210
 
212
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
211
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
213
       
212
       
214
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
213
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
215
        /*
214
        /*
216
         * TSBs were allocated from memory not covered
215
         * TSBs were allocated from memory not covered
217
         * by the locked 4M kernel DTLB entry. We need
216
         * by the locked 4M kernel DTLB entry. We need
218
         * to demap the entry installed by as_install_arch().
217
         * to demap the entry installed by as_install_arch().
219
         */
218
         */
220
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
219
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
221
    }
220
    }
222
#endif
221
#endif
223
}
222
}
224
 
223
 
225
/** @}
224
/** @}
226
 */
225
 */
227
 
226