Subversion Repositories HelenOS

Rev

Rev 1987 | Rev 2010 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1987 Rev 2009
1
/*
1
/*
2
 * Copyright (C) 2006 Jakub Jermar
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm
29
/** @addtogroup sparc64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/as.h>
35
#include <arch/mm/as.h>
36
#include <arch/mm/tlb.h>
36
#include <arch/mm/tlb.h>
37
#include <genarch/mm/as_ht.h>
37
#include <genarch/mm/as_ht.h>
38
#include <genarch/mm/asid_fifo.h>
38
#include <genarch/mm/asid_fifo.h>
39
#include <debug.h>
39
#include <debug.h>
40
#include <config.h>
40
#include <config.h>
41
 
41
 
42
#ifdef CONFIG_TSB
42
#ifdef CONFIG_TSB
43
#include <arch/mm/tsb.h>
43
#include <arch/mm/tsb.h>
44
#include <arch/memstr.h>
44
#include <arch/memstr.h>
45
#include <synch/mutex.h>
45
#include <synch/mutex.h>
46
#include <arch/asm.h>
46
#include <arch/asm.h>
47
#include <mm/frame.h>
47
#include <mm/frame.h>
48
#include <bitops.h>
48
#include <bitops.h>
49
#include <macros.h>
49
#include <macros.h>
50
#endif
50
#endif /* CONFIG_TSB */
-
 
51
 
-
 
52
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
53
#include <arch/mm/cache.h>
-
 
54
#endif /* CONFIG_VIRT_IDX_DCACHE */
51
 
55
 
52
/** Architecture dependent address space init. */
56
/** Architecture dependent address space init. */
53
void as_arch_init(void)
57
void as_arch_init(void)
54
{
58
{
55
    if (config.cpu_active == 1) {
59
    if (config.cpu_active == 1) {
56
        as_operations = &as_ht_operations;
60
        as_operations = &as_ht_operations;
57
        asid_fifo_init();
61
        asid_fifo_init();
58
    }
62
    }
59
}
63
}
60
 
64
 
61
int as_constructor_arch(as_t *as, int flags)
65
int as_constructor_arch(as_t *as, int flags)
62
{
66
{
63
#ifdef CONFIG_TSB
67
#ifdef CONFIG_TSB
64
    int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH);
68
    int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH);
65
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
69
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
66
 
70
 
67
    if (!tsb)
71
    if (!tsb)
68
        return -1;
72
        return -1;
69
 
73
 
70
    as->arch.itsb = (tsb_entry_t *) tsb;
74
    as->arch.itsb = (tsb_entry_t *) tsb;
71
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t));
75
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t));
72
    memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0);
76
    memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0);
73
#endif
77
#endif
74
    return 0;
78
    return 0;
75
}
79
}
76
 
80
 
77
int as_destructor_arch(as_t *as)
81
int as_destructor_arch(as_t *as)
78
{
82
{
79
#ifdef CONFIG_TSB
83
#ifdef CONFIG_TSB
80
    count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH;
84
    count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH;
81
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
85
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
82
    return cnt;
86
    return cnt;
83
#else
87
#else
84
    return 0;
88
    return 0;
85
#endif
89
#endif
86
}
90
}
87
 
91
 
88
int as_create_arch(as_t *as, int flags)
92
int as_create_arch(as_t *as, int flags)
89
{
93
{
90
#ifdef CONFIG_TSB
94
#ifdef CONFIG_TSB
91
    ipl_t ipl;
95
    ipl_t ipl;
92
 
96
 
93
    ipl = interrupts_disable();
97
    ipl = interrupts_disable();
94
    mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
98
    mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
95
    tsb_invalidate(as, 0, (count_t) -1);
99
    tsb_invalidate(as, 0, (count_t) -1);
96
    mutex_unlock(&as->lock);
100
    mutex_unlock(&as->lock);
97
    interrupts_restore(ipl);
101
    interrupts_restore(ipl);
98
#endif
102
#endif
99
    return 0;
103
    return 0;
100
}
104
}
101
 
105
 
102
/** Perform sparc64-specific tasks when an address space becomes active on the processor.
106
/** Perform sparc64-specific tasks when an address space becomes active on the processor.
103
 *
107
 *
104
 * Install ASID and map TSBs.
108
 * Install ASID and map TSBs.
105
 *
109
 *
106
 * @param as Address space.
110
 * @param as Address space.
107
 */
111
 */
108
void as_install_arch(as_t *as)
112
void as_install_arch(as_t *as)
109
{
113
{
110
    tlb_context_reg_t ctx;
114
    tlb_context_reg_t ctx;
111
   
115
   
112
    /*
116
    /*
113
     * Note that we don't lock the address space.
117
     * Note that we don't lock the address space.
114
     * That's correct - we can afford it here
118
     * That's correct - we can afford it here
115
     * because we only read members that are
119
     * because we only read members that are
116
     * currently read-only.
120
     * currently read-only.
117
     */
121
     */
118
   
122
   
119
    /*
123
    /*
120
     * Write ASID to secondary context register.
124
     * Write ASID to secondary context register.
121
     * The primary context register has to be set
125
     * The primary context register has to be set
122
     * from TL>0 so it will be filled from the
126
     * from TL>0 so it will be filled from the
123
     * secondary context register from the TL=1
127
     * secondary context register from the TL=1
124
     * code just before switch to userspace.
128
     * code just before switch to userspace.
125
     */
129
     */
126
    ctx.v = 0;
130
    ctx.v = 0;
127
    ctx.context = as->asid;
131
    ctx.context = as->asid;
128
    mmu_secondary_context_write(ctx.v);
132
    mmu_secondary_context_write(ctx.v);
129
 
133
 
130
#ifdef CONFIG_TSB   
134
#ifdef CONFIG_TSB   
131
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
135
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
132
 
136
 
133
    ASSERT(as->arch.itsb && as->arch.dtsb);
137
    ASSERT(as->arch.itsb && as->arch.dtsb);
134
 
138
 
135
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
139
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
136
       
140
       
137
    if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
141
    if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
138
        /*
142
        /*
139
         * TSBs were allocated from memory not covered
143
         * TSBs were allocated from memory not covered
140
         * by the locked 4M kernel DTLB entry. We need
144
         * by the locked 4M kernel DTLB entry. We need
141
         * to map both TSBs explicitly.
145
         * to map both TSBs explicitly.
142
         */
146
         */
143
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
147
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
144
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
148
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
145
    }
149
    }
146
       
150
       
147
    /*
151
    /*
148
     * Setup TSB Base registers.
152
     * Setup TSB Base registers.
149
     */
153
     */
150
    tsb_base_reg_t tsb_base;
154
    tsb_base_reg_t tsb_base;
151
       
155
       
152
    tsb_base.value = 0;
156
    tsb_base.value = 0;
153
    tsb_base.size = TSB_SIZE;
157
    tsb_base.size = TSB_SIZE;
154
    tsb_base.split = 0;
158
    tsb_base.split = 0;
155
 
159
 
156
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
160
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
157
    itsb_base_write(tsb_base.value);
161
    itsb_base_write(tsb_base.value);
158
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
162
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
159
    dtsb_base_write(tsb_base.value);
163
    dtsb_base_write(tsb_base.value);
160
#endif
164
#endif
-
 
165
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
166
    if (as->dcache_flush_on_install) {
-
 
167
        /*
-
 
168
         * Some mappings in this address space are illegal address
-
 
169
         * aliases. Upon their creation, the flush_dcache_on_install
-
 
170
         * flag was set.
-
 
171
         *
-
 
172
         * We are now obliged to flush the D-cache in order to guarantee
-
 
173
         * that there will be at most one cache line for each address
-
 
174
         * alias.
-
 
175
         *
-
 
176
         * This flush performs a cleanup after another address space in
-
 
177
         * which the alias might have existed.
-
 
178
         */
-
 
179
        dcache_flush();
-
 
180
    }
-
 
181
#endif /* CONFIG_VIRT_IDX_DCACHE */
161
}
182
}
162
 
183
 
163
/** Perform sparc64-specific tasks when an address space is removed from the processor.
184
/** Perform sparc64-specific tasks when an address space is removed from the processor.
164
 *
185
 *
165
 * Demap TSBs.
186
 * Demap TSBs.
166
 *
187
 *
167
 * @param as Address space.
188
 * @param as Address space.
168
 */
189
 */
169
void as_deinstall_arch(as_t *as)
190
void as_deinstall_arch(as_t *as)
170
{
191
{
171
 
192
 
172
    /*
193
    /*
173
     * Note that we don't lock the address space.
194
     * Note that we don't lock the address space.
174
     * That's correct - we can afford it here
195
     * That's correct - we can afford it here
175
     * because we only read members that are
196
     * because we only read members that are
176
     * currently read-only.
197
     * currently read-only.
177
     */
198
     */
178
 
199
 
179
#ifdef CONFIG_TSB
200
#ifdef CONFIG_TSB
180
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
201
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
181
 
202
 
182
    ASSERT(as->arch.itsb && as->arch.dtsb);
203
    ASSERT(as->arch.itsb && as->arch.dtsb);
183
 
204
 
184
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
205
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
185
       
206
       
186
    if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
207
    if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
187
        /*
208
        /*
188
         * TSBs were allocated from memory not covered
209
         * TSBs were allocated from memory not covered
189
         * by the locked 4M kernel DTLB entry. We need
210
         * by the locked 4M kernel DTLB entry. We need
190
         * to demap the entry installed by as_install_arch().
211
         * to demap the entry installed by as_install_arch().
191
         */
212
         */
192
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
213
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
193
    }
214
    }
194
#endif
215
#endif
-
 
216
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
217
    if (as->dcache_flush_on_deinstall) {
-
 
218
        /*
-
 
219
         * Some mappings in this address space are illegal address
-
 
220
         * aliases. Upon their creation, the flush_dcache_on_deinstall
-
 
221
         * flag was set.
-
 
222
         *
-
 
223
         * We are now obliged to flush the D-cache in order to guarantee
-
 
224
         * that there will be at most one cache line for each address
-
 
225
         * alias.
-
 
226
         *
-
 
227
         * This flush performs a cleanup after this address space. It is
-
 
228
         * necessary because other address spaces that contain the same
-
 
229
         * alias are not necessarily aware of the need to carry out the
-
 
230
         * cache flush. The only address spaces that are aware of it are
-
 
231
         * those that created the illegal alias.
-
 
232
         */
-
 
233
        dcache_flush();
-
 
234
    }
-
 
235
#endif /* CONFIG_VIRT_IDX_DCACHE */
195
}
236
}
196
 
237
 
197
/** @}
238
/** @}
198
 */
239
 */
199
 
240