Subversion Repositories HelenOS

Rev

Rev 2144 | Rev 2272 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2144 Rev 2170
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm
29
/** @addtogroup sparc64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/as.h>
35
#include <arch/mm/as.h>
36
#include <arch/mm/tlb.h>
36
#include <arch/mm/tlb.h>
37
#include <genarch/mm/page_ht.h>
37
#include <genarch/mm/page_ht.h>
38
#include <genarch/mm/asid_fifo.h>
38
#include <genarch/mm/asid_fifo.h>
39
#include <debug.h>
39
#include <debug.h>
40
#include <config.h>
40
#include <config.h>
41
 
41
 
42
#ifdef CONFIG_TSB
42
#ifdef CONFIG_TSB
43
#include <arch/mm/tsb.h>
43
#include <arch/mm/tsb.h>
44
#include <arch/memstr.h>
44
#include <arch/memstr.h>
45
#include <synch/mutex.h>
-
 
46
#include <arch/asm.h>
45
#include <arch/asm.h>
47
#include <mm/frame.h>
46
#include <mm/frame.h>
48
#include <bitops.h>
47
#include <bitops.h>
49
#include <macros.h>
48
#include <macros.h>
50
#endif /* CONFIG_TSB */
49
#endif /* CONFIG_TSB */
51
 
50
 
52
/** Architecture dependent address space init. */
51
/** Architecture dependent address space init. */
53
void as_arch_init(void)
52
void as_arch_init(void)
54
{
53
{
55
    if (config.cpu_active == 1) {
54
    if (config.cpu_active == 1) {
56
        as_operations = &as_ht_operations;
55
        as_operations = &as_ht_operations;
57
        asid_fifo_init();
56
        asid_fifo_init();
58
    }
57
    }
59
}
58
}
60
 
59
 
61
int as_constructor_arch(as_t *as, int flags)
60
int as_constructor_arch(as_t *as, int flags)
62
{
61
{
63
#ifdef CONFIG_TSB
62
#ifdef CONFIG_TSB
64
    /*
63
    /*
65
     * The order must be calculated with respect to the emulated
64
     * The order must be calculated with respect to the emulated
66
     * 16K page size.
65
     * 16K page size.
67
     */
66
     */
68
    int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
67
    int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
69
        sizeof(tsb_entry_t)) >> FRAME_WIDTH);
68
        sizeof(tsb_entry_t)) >> FRAME_WIDTH);
70
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
69
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
71
 
70
 
72
    if (!tsb)
71
    if (!tsb)
73
        return -1;
72
        return -1;
74
 
73
 
75
    as->arch.itsb = (tsb_entry_t *) tsb;
74
    as->arch.itsb = (tsb_entry_t *) tsb;
76
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
75
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
77
        sizeof(tsb_entry_t));
76
        sizeof(tsb_entry_t));
78
    memsetb((uintptr_t) as->arch.itsb,
77
    memsetb((uintptr_t) as->arch.itsb,
79
        (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
78
        (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
80
#endif
79
#endif
81
    return 0;
80
    return 0;
82
}
81
}
83
 
82
 
84
int as_destructor_arch(as_t *as)
83
int as_destructor_arch(as_t *as)
85
{
84
{
86
#ifdef CONFIG_TSB
85
#ifdef CONFIG_TSB
87
    /*
86
    /*
88
     * The count must be calculated with respect to the emualted 16K page
87
     * The count must be calculated with respect to the emualted 16K page
89
     * size.
88
     * size.
90
     */
89
     */
91
    count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
90
    count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
92
        sizeof(tsb_entry_t)) >> FRAME_WIDTH;
91
        sizeof(tsb_entry_t)) >> FRAME_WIDTH;
93
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
92
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
94
    return cnt;
93
    return cnt;
95
#else
94
#else
96
    return 0;
95
    return 0;
97
#endif
96
#endif
98
}
97
}
99
 
98
 
100
int as_create_arch(as_t *as, int flags)
99
int as_create_arch(as_t *as, int flags)
101
{
100
{
102
#ifdef CONFIG_TSB
101
#ifdef CONFIG_TSB
103
    ipl_t ipl;
-
 
104
 
-
 
105
    ipl = interrupts_disable();
-
 
106
    mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
-
 
107
    tsb_invalidate(as, 0, (count_t) -1);
102
    tsb_invalidate(as, 0, (count_t) -1);
108
    mutex_unlock(&as->lock);
-
 
109
    interrupts_restore(ipl);
-
 
110
#endif
103
#endif
111
    return 0;
104
    return 0;
112
}
105
}
113
 
106
 
114
/** Perform sparc64-specific tasks when an address space becomes active on the
107
/** Perform sparc64-specific tasks when an address space becomes active on the
115
 * processor.
108
 * processor.
116
 *
109
 *
117
 * Install ASID and map TSBs.
110
 * Install ASID and map TSBs.
118
 *
111
 *
119
 * @param as Address space.
112
 * @param as Address space.
120
 */
113
 */
121
void as_install_arch(as_t *as)
114
void as_install_arch(as_t *as)
122
{
115
{
123
    tlb_context_reg_t ctx;
116
    tlb_context_reg_t ctx;
124
   
117
   
125
    /*
118
    /*
126
     * Note that we don't lock the address space.
119
     * Note that we don't and may not lock the address space. That's ok
127
     * That's correct - we can afford it here
-
 
128
     * because we only read members that are
120
     * since we only read members that are currently read-only.
-
 
121
     *
129
     * currently read-only.
122
     * Moreover, the as->asid is protected by asidlock, which is being held.
130
     */
123
     */
131
   
124
   
132
    /*
125
    /*
133
     * Write ASID to secondary context register.
126
     * Write ASID to secondary context register. The primary context
134
     * The primary context register has to be set
-
 
135
     * from TL>0 so it will be filled from the
127
     * register has to be set from TL>0 so it will be filled from the
136
     * secondary context register from the TL=1
128
     * secondary context register from the TL=1 code just before switch to
137
     * code just before switch to userspace.
129
     * userspace.
138
     */
130
     */
139
    ctx.v = 0;
131
    ctx.v = 0;
140
    ctx.context = as->asid;
132
    ctx.context = as->asid;
141
    mmu_secondary_context_write(ctx.v);
133
    mmu_secondary_context_write(ctx.v);
142
 
134
 
143
#ifdef CONFIG_TSB   
135
#ifdef CONFIG_TSB   
144
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
136
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
145
 
137
 
146
    ASSERT(as->arch.itsb && as->arch.dtsb);
138
    ASSERT(as->arch.itsb && as->arch.dtsb);
147
 
139
 
148
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
140
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
149
       
141
       
150
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
142
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
151
        /*
143
        /*
152
         * TSBs were allocated from memory not covered
144
         * TSBs were allocated from memory not covered
153
         * by the locked 4M kernel DTLB entry. We need
145
         * by the locked 4M kernel DTLB entry. We need
154
         * to map both TSBs explicitly.
146
         * to map both TSBs explicitly.
155
         */
147
         */
156
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
148
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
157
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
149
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
158
    }
150
    }
159
       
151
       
160
    /*
152
    /*
161
     * Setup TSB Base registers.
153
     * Setup TSB Base registers.
162
     */
154
     */
163
    tsb_base_reg_t tsb_base;
155
    tsb_base_reg_t tsb_base;
164
       
156
       
165
    tsb_base.value = 0;
157
    tsb_base.value = 0;
166
    tsb_base.size = TSB_SIZE;
158
    tsb_base.size = TSB_SIZE;
167
    tsb_base.split = 0;
159
    tsb_base.split = 0;
168
 
160
 
169
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
161
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
170
    itsb_base_write(tsb_base.value);
162
    itsb_base_write(tsb_base.value);
171
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
163
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
172
    dtsb_base_write(tsb_base.value);
164
    dtsb_base_write(tsb_base.value);
173
#endif
165
#endif
174
}
166
}
175
 
167
 
176
/** Perform sparc64-specific tasks when an address space is removed from the
168
/** Perform sparc64-specific tasks when an address space is removed from the
177
 * processor.
169
 * processor.
178
 *
170
 *
179
 * Demap TSBs.
171
 * Demap TSBs.
180
 *
172
 *
181
 * @param as Address space.
173
 * @param as Address space.
182
 */
174
 */
183
void as_deinstall_arch(as_t *as)
175
void as_deinstall_arch(as_t *as)
184
{
176
{
185
 
177
 
186
    /*
178
    /*
187
     * Note that we don't lock the address space.
179
     * Note that we don't and may not lock the address space. That's ok
188
     * That's correct - we can afford it here
-
 
189
     * because we only read members that are
180
     * since we only read members that are currently read-only.
-
 
181
     *
190
     * currently read-only.
182
     * Moreover, the as->asid is protected by asidlock, which is being held.
191
     */
183
     */
192
 
184
 
193
#ifdef CONFIG_TSB
185
#ifdef CONFIG_TSB
194
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
186
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
195
 
187
 
196
    ASSERT(as->arch.itsb && as->arch.dtsb);
188
    ASSERT(as->arch.itsb && as->arch.dtsb);
197
 
189
 
198
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
190
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
199
       
191
       
200
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
192
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
201
        /*
193
        /*
202
         * TSBs were allocated from memory not covered
194
         * TSBs were allocated from memory not covered
203
         * by the locked 4M kernel DTLB entry. We need
195
         * by the locked 4M kernel DTLB entry. We need
204
         * to demap the entry installed by as_install_arch().
196
         * to demap the entry installed by as_install_arch().
205
         */
197
         */
206
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
198
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
207
    }
199
    }
208
#endif
200
#endif
209
}
201
}
210
 
202
 
211
/** @}
203
/** @}
212
 */
204
 */
213
 
205