Subversion Repositories HelenOS

Rev

Rev 2131 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2131 Rev 2422
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm
29
/** @addtogroup sparc64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/as.h>
35
#include <arch/mm/as.h>
36
#include <arch/mm/tlb.h>
36
#include <arch/mm/tlb.h>
37
#include <genarch/mm/page_ht.h>
37
#include <genarch/mm/page_ht.h>
38
#include <genarch/mm/asid_fifo.h>
38
#include <genarch/mm/asid_fifo.h>
39
#include <debug.h>
39
#include <debug.h>
40
#include <config.h>
40
#include <config.h>
41
 
41
 
42
#ifdef CONFIG_TSB
42
#ifdef CONFIG_TSB
43
#include <arch/mm/tsb.h>
43
#include <arch/mm/tsb.h>
44
#include <arch/memstr.h>
44
#include <arch/memstr.h>
45
#include <synch/mutex.h>
-
 
46
#include <arch/asm.h>
45
#include <arch/asm.h>
47
#include <mm/frame.h>
46
#include <mm/frame.h>
48
#include <bitops.h>
47
#include <bitops.h>
49
#include <macros.h>
48
#include <macros.h>
50
#endif /* CONFIG_TSB */
49
#endif /* CONFIG_TSB */
51
 
50
 
52
/** Architecture dependent address space init. */
51
/** Architecture dependent address space init. */
53
void as_arch_init(void)
52
void as_arch_init(void)
54
{
53
{
55
    if (config.cpu_active == 1) {
54
    if (config.cpu_active == 1) {
56
        as_operations = &as_ht_operations;
55
        as_operations = &as_ht_operations;
57
        asid_fifo_init();
56
        asid_fifo_init();
58
    }
57
    }
59
}
58
}
60
 
59
 
61
int as_constructor_arch(as_t *as, int flags)
60
int as_constructor_arch(as_t *as, int flags)
62
{
61
{
63
#ifdef CONFIG_TSB
62
#ifdef CONFIG_TSB
-
 
63
    /*
-
 
64
     * The order must be calculated with respect to the emulated
-
 
65
     * 16K page size.
-
 
66
     */
64
    int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
67
    int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
65
        sizeof(tsb_entry_t)) >> FRAME_WIDTH);
68
        sizeof(tsb_entry_t)) >> FRAME_WIDTH);
-
 
69
 
66
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
70
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
67
 
71
 
68
    if (!tsb)
72
    if (!tsb)
69
        return -1;
73
        return -1;
70
 
74
 
71
    as->arch.itsb = (tsb_entry_t *) tsb;
75
    as->arch.itsb = (tsb_entry_t *) tsb;
72
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
76
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
73
        sizeof(tsb_entry_t));
77
        sizeof(tsb_entry_t));
-
 
78
 
74
    memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
79
    memsetb((uintptr_t) as->arch.itsb,
75
        * sizeof(tsb_entry_t), 0);
80
        (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
76
#endif
81
#endif
77
    return 0;
82
    return 0;
78
}
83
}
79
 
84
 
80
int as_destructor_arch(as_t *as)
85
int as_destructor_arch(as_t *as)
81
{
86
{
82
#ifdef CONFIG_TSB
87
#ifdef CONFIG_TSB
-
 
88
    /*
-
 
89
     * The count must be calculated with respect to the emualted 16K page
-
 
90
     * size.
-
 
91
     */
83
    count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
92
    count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
84
        sizeof(tsb_entry_t)) >> FRAME_WIDTH;
93
        sizeof(tsb_entry_t)) >> FRAME_WIDTH;
85
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
94
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
86
    return cnt;
95
    return cnt;
87
#else
96
#else
88
    return 0;
97
    return 0;
89
#endif
98
#endif
90
}
99
}
91
 
100
 
92
int as_create_arch(as_t *as, int flags)
101
int as_create_arch(as_t *as, int flags)
93
{
102
{
94
#ifdef CONFIG_TSB
103
#ifdef CONFIG_TSB
95
    ipl_t ipl;
-
 
96
 
-
 
97
    ipl = interrupts_disable();
-
 
98
    mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
-
 
99
    tsb_invalidate(as, 0, (count_t) -1);
104
    tsb_invalidate(as, 0, (count_t) -1);
100
    mutex_unlock(&as->lock);
-
 
101
    interrupts_restore(ipl);
-
 
102
#endif
105
#endif
103
    return 0;
106
    return 0;
104
}
107
}
105
 
108
 
106
/** Perform sparc64-specific tasks when an address space becomes active on the
109
/** Perform sparc64-specific tasks when an address space becomes active on the
107
 * processor.
110
 * processor.
108
 *
111
 *
109
 * Install ASID and map TSBs.
112
 * Install ASID and map TSBs.
110
 *
113
 *
111
 * @param as Address space.
114
 * @param as Address space.
112
 */
115
 */
113
void as_install_arch(as_t *as)
116
void as_install_arch(as_t *as)
114
{
117
{
115
    tlb_context_reg_t ctx;
118
    tlb_context_reg_t ctx;
116
   
119
   
117
    /*
120
    /*
118
     * Note that we don't lock the address space.
121
     * Note that we don't and may not lock the address space. That's ok
119
     * That's correct - we can afford it here
-
 
120
     * because we only read members that are
122
     * since we only read members that are currently read-only.
-
 
123
     *
121
     * currently read-only.
124
     * Moreover, the as->asid is protected by asidlock, which is being held.
122
     */
125
     */
123
   
126
   
124
    /*
127
    /*
125
     * Write ASID to secondary context register.
128
     * Write ASID to secondary context register. The primary context
126
     * The primary context register has to be set
-
 
127
     * from TL>0 so it will be filled from the
129
     * register has to be set from TL>0 so it will be filled from the
128
     * secondary context register from the TL=1
130
     * secondary context register from the TL=1 code just before switch to
129
     * code just before switch to userspace.
131
     * userspace.
130
     */
132
     */
131
    ctx.v = 0;
133
    ctx.v = 0;
132
    ctx.context = as->asid;
134
    ctx.context = as->asid;
133
    mmu_secondary_context_write(ctx.v);
135
    mmu_secondary_context_write(ctx.v);
134
 
136
 
135
#ifdef CONFIG_TSB   
137
#ifdef CONFIG_TSB   
136
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
138
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
137
 
139
 
138
    ASSERT(as->arch.itsb && as->arch.dtsb);
140
    ASSERT(as->arch.itsb && as->arch.dtsb);
139
 
141
 
140
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
142
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
141
       
143
       
142
    if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
144
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
143
        /*
145
        /*
144
         * TSBs were allocated from memory not covered
146
         * TSBs were allocated from memory not covered
145
         * by the locked 4M kernel DTLB entry. We need
147
         * by the locked 4M kernel DTLB entry. We need
146
         * to map both TSBs explicitly.
148
         * to map both TSBs explicitly.
147
         */
149
         */
148
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
150
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
149
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
151
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
150
    }
152
    }
151
       
153
       
152
    /*
154
    /*
153
     * Setup TSB Base registers.
155
     * Setup TSB Base registers.
154
     */
156
     */
155
    tsb_base_reg_t tsb_base;
157
    tsb_base_reg_t tsb_base;
156
       
158
       
157
    tsb_base.value = 0;
159
    tsb_base.value = 0;
158
    tsb_base.size = TSB_SIZE;
160
    tsb_base.size = TSB_SIZE;
159
    tsb_base.split = 0;
161
    tsb_base.split = 0;
160
 
162
 
161
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
163
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
162
    itsb_base_write(tsb_base.value);
164
    itsb_base_write(tsb_base.value);
163
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
165
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
164
    dtsb_base_write(tsb_base.value);
166
    dtsb_base_write(tsb_base.value);
165
#endif
167
#endif
166
}
168
}
167
 
169
 
168
/** Perform sparc64-specific tasks when an address space is removed from the
170
/** Perform sparc64-specific tasks when an address space is removed from the
169
 * processor.
171
 * processor.
170
 *
172
 *
171
 * Demap TSBs.
173
 * Demap TSBs.
172
 *
174
 *
173
 * @param as Address space.
175
 * @param as Address space.
174
 */
176
 */
175
void as_deinstall_arch(as_t *as)
177
void as_deinstall_arch(as_t *as)
176
{
178
{
177
 
179
 
178
    /*
180
    /*
179
     * Note that we don't lock the address space.
181
     * Note that we don't and may not lock the address space. That's ok
180
     * That's correct - we can afford it here
-
 
181
     * because we only read members that are
182
     * since we only read members that are currently read-only.
-
 
183
     *
182
     * currently read-only.
184
     * Moreover, the as->asid is protected by asidlock, which is being held.
183
     */
185
     */
184
 
186
 
185
#ifdef CONFIG_TSB
187
#ifdef CONFIG_TSB
186
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
188
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
187
 
189
 
188
    ASSERT(as->arch.itsb && as->arch.dtsb);
190
    ASSERT(as->arch.itsb && as->arch.dtsb);
189
 
191
 
190
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
192
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
191
       
193
       
192
    if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
194
    if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
193
        /*
195
        /*
194
         * TSBs were allocated from memory not covered
196
         * TSBs were allocated from memory not covered
195
         * by the locked 4M kernel DTLB entry. We need
197
         * by the locked 4M kernel DTLB entry. We need
196
         * to demap the entry installed by as_install_arch().
198
         * to demap the entry installed by as_install_arch().
197
         */
199
         */
198
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
200
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
199
    }
201
    }
200
#endif
202
#endif
201
}
203
}
202
 
204
 
203
/** @}
205
/** @}
204
 */
206
 */
205
 
207