Subversion Repositories HelenOS

Rev

Rev 2292 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2292 Rev 2307
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm
29
/** @addtogroup sparc64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/as.h>
35
#include <arch/mm/as.h>
36
#include <arch/mm/tlb.h>
36
#include <arch/mm/tlb.h>
37
#include <genarch/mm/page_ht.h>
37
#include <genarch/mm/page_ht.h>
38
#include <genarch/mm/asid_fifo.h>
38
#include <genarch/mm/asid_fifo.h>
39
#include <debug.h>
39
#include <debug.h>
40
#include <config.h>
40
#include <config.h>
41
 
41
 
42
#ifdef CONFIG_TSB
42
#ifdef CONFIG_TSB
43
#include <arch/mm/tsb.h>
43
#include <arch/mm/tsb.h>
44
#include <arch/memstr.h>
44
#include <arch/memstr.h>
45
#include <arch/asm.h>
45
#include <arch/asm.h>
46
#include <mm/frame.h>
46
#include <mm/frame.h>
47
#include <bitops.h>
47
#include <bitops.h>
48
#include <macros.h>
48
#include <macros.h>
49
#endif /* CONFIG_TSB */
49
#endif /* CONFIG_TSB */
50
 
50
 
51
/** Architecture dependent address space init. */
51
/** Architecture dependent address space init. */
52
void as_arch_init(void)
52
void as_arch_init(void)
53
{
53
{
54
    if (config.cpu_active == 1) {
54
    if (config.cpu_active == 1) {
55
        as_operations = &as_ht_operations;
55
        as_operations = &as_ht_operations;
56
        asid_fifo_init();
56
        asid_fifo_init();
57
    }
57
    }
58
}
58
}
59
 
59
 
60
int as_constructor_arch(as_t *as, int flags)
60
int as_constructor_arch(as_t *as, int flags)
61
{
61
{
62
#ifdef CONFIG_TSB
62
#ifdef CONFIG_TSB
63
    /*
63
    /*
64
     * The order must be calculated with respect to the emulated
64
     * The order must be calculated with respect to the emulated
65
     * 16K page size.
65
     * 16K page size.
66
     */
66
     */
67
    int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
67
    int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
68
        sizeof(tsb_entry_t)) >> FRAME_WIDTH);
68
        sizeof(tsb_entry_t)) >> FRAME_WIDTH);
-
 
69
 
69
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
70
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
70
 
71
 
71
    if (!tsb)
72
    if (!tsb)
72
        return -1;
73
        return -1;
73
 
74
 
74
    as->arch.itsb = (tsb_entry_t *) tsb;
75
    as->arch.itsb = (tsb_entry_t *) tsb;
75
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
76
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
76
        sizeof(tsb_entry_t));
77
        sizeof(tsb_entry_t));
-
 
78
 
77
    memsetb((uintptr_t) as->arch.itsb,
79
    memsetb((uintptr_t) as->arch.itsb,
78
        (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
80
        (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
79
#endif
81
#endif
80
    return 0;
82
    return 0;
81
}
83
}
82
 
84
 
83
int as_destructor_arch(as_t *as)
85
int as_destructor_arch(as_t *as)
84
{
86
{
85
#ifdef CONFIG_TSB
87
#ifdef CONFIG_TSB
86
    /*
88
    /*
87
     * The count must be calculated with respect to the emualted 16K page
89
     * The count must be calculated with respect to the emualted 16K page
88
     * size.
90
     * size.
89
     */
91
     */
90
    count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
92
    count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
91
        sizeof(tsb_entry_t)) >> FRAME_WIDTH;
93
        sizeof(tsb_entry_t)) >> FRAME_WIDTH;
92
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
94
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
93
    return cnt;
95
    return cnt;
94
#else
96
#else
95
    return 0;
97
    return 0;
96
#endif
98
#endif
97
}
99
}
98
 
100
 
99
int as_create_arch(as_t *as, int flags)
101
int as_create_arch(as_t *as, int flags)
100
{
102
{
101
#ifdef CONFIG_TSB
103
#ifdef CONFIG_TSB
102
    tsb_invalidate(as, 0, (count_t) -1);
104
    tsb_invalidate(as, 0, (count_t) -1);
103
#endif
105
#endif
104
    return 0;
106
    return 0;
105
}
107
}
106
 
108
 
107
/** Perform sparc64-specific tasks when an address space becomes active on the
109
/** Perform sparc64-specific tasks when an address space becomes active on the
108
 * processor.
110
 * processor.
109
 *
111
 *
110
 * Install ASID and map TSBs.
112
 * Install ASID and map TSBs.
111
 *
113
 *
112
 * @param as Address space.
114
 * @param as Address space.
113
 */
115
 */
114
void as_install_arch(as_t *as)
116
void as_install_arch(as_t *as)
115
{
117
{
116
    tlb_context_reg_t ctx;
118
    tlb_context_reg_t ctx;
117
   
119
   
118
    /*
120
    /*
119
     * Note that we don't and may not lock the address space. That's ok
121
     * Note that we don't and may not lock the address space. That's ok
120
     * since we only read members that are currently read-only.
122
     * since we only read members that are currently read-only.
121
     *
123
     *
122
     * Moreover, the as->asid is protected by asidlock, which is being held.
124
     * Moreover, the as->asid is protected by asidlock, which is being held.
123
     */
125
     */
124
   
126
   
125
    /*
127
    /*
126
     * Write ASID to secondary context register. The primary context
128
     * Write ASID to secondary context register. The primary context
127
     * register has to be set from TL>0 so it will be filled from the
129
     * register has to be set from TL>0 so it will be filled from the
128
     * secondary context register from the TL=1 code just before switch to
130
     * secondary context register from the TL=1 code just before switch to
129
     * userspace.
131
     * userspace.
130
     */
132
     */
131
    ctx.v = 0;
133
    ctx.v = 0;
132
    ctx.context = as->asid;
134
    ctx.context = as->asid;
133
    mmu_secondary_context_write(ctx.v);
135
    mmu_secondary_context_write(ctx.v);
134
 
136
 
135
#ifdef CONFIG_TSB   
137
#ifdef CONFIG_TSB   
136
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
138
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
137
 
139
 
138
    ASSERT(as->arch.itsb && as->arch.dtsb);
140
    ASSERT(as->arch.itsb && as->arch.dtsb);
139
 
141
 
140
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
142
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
141
       
143
       
142
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
144
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
143
        /*
145
        /*
144
         * TSBs were allocated from memory not covered
146
         * TSBs were allocated from memory not covered
145
         * by the locked 4M kernel DTLB entry. We need
147
         * by the locked 4M kernel DTLB entry. We need
146
         * to map both TSBs explicitly.
148
         * to map both TSBs explicitly.
147
         */
149
         */
148
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
150
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
149
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
151
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
150
    }
152
    }
151
       
153
       
152
    /*
154
    /*
153
     * Setup TSB Base registers.
155
     * Setup TSB Base registers.
154
     */
156
     */
155
    tsb_base_reg_t tsb_base;
157
    tsb_base_reg_t tsb_base;
156
       
158
       
157
    tsb_base.value = 0;
159
    tsb_base.value = 0;
158
    tsb_base.size = TSB_SIZE;
160
    tsb_base.size = TSB_SIZE;
159
    tsb_base.split = 0;
161
    tsb_base.split = 0;
160
 
162
 
161
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
163
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
162
    itsb_base_write(tsb_base.value);
164
    itsb_base_write(tsb_base.value);
163
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
165
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
164
    dtsb_base_write(tsb_base.value);
166
    dtsb_base_write(tsb_base.value);
165
#endif
167
#endif
166
}
168
}
167
 
169
 
168
/** Perform sparc64-specific tasks when an address space is removed from the
170
/** Perform sparc64-specific tasks when an address space is removed from the
169
 * processor.
171
 * processor.
170
 *
172
 *
171
 * Demap TSBs.
173
 * Demap TSBs.
172
 *
174
 *
173
 * @param as Address space.
175
 * @param as Address space.
174
 */
176
 */
175
void as_deinstall_arch(as_t *as)
177
void as_deinstall_arch(as_t *as)
176
{
178
{
177
 
179
 
178
    /*
180
    /*
179
     * Note that we don't and may not lock the address space. That's ok
181
     * Note that we don't and may not lock the address space. That's ok
180
     * since we only read members that are currently read-only.
182
     * since we only read members that are currently read-only.
181
     *
183
     *
182
     * Moreover, the as->asid is protected by asidlock, which is being held.
184
     * Moreover, the as->asid is protected by asidlock, which is being held.
183
     */
185
     */
184
 
186
 
185
#ifdef CONFIG_TSB
187
#ifdef CONFIG_TSB
186
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
188
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
187
 
189
 
188
    ASSERT(as->arch.itsb && as->arch.dtsb);
190
    ASSERT(as->arch.itsb && as->arch.dtsb);
189
 
191
 
190
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
192
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
191
       
193
       
192
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
194
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
193
        /*
195
        /*
194
         * TSBs were allocated from memory not covered
196
         * TSBs were allocated from memory not covered
195
         * by the locked 4M kernel DTLB entry. We need
197
         * by the locked 4M kernel DTLB entry. We need
196
         * to demap the entry installed by as_install_arch().
198
         * to demap the entry installed by as_install_arch().
197
         */
199
         */
198
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
200
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
199
    }
201
    }
200
#endif
202
#endif
201
}
203
}
202
 
204
 
203
/** @}
205
/** @}
204
 */
206
 */
205
 
207