Subversion Repositories HelenOS

Rev

Rev 1892 | Rev 1987 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1892 Rev 1903
1
/*
1
/*
2
 * Copyright (C) 2006 Jakub Jermar
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm
29
/** @addtogroup sparc64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/as.h>
35
#include <arch/mm/as.h>
36
#include <arch/mm/tlb.h>
36
#include <arch/mm/tlb.h>
37
#include <genarch/mm/as_ht.h>
37
#include <genarch/mm/as_ht.h>
38
#include <genarch/mm/asid_fifo.h>
38
#include <genarch/mm/asid_fifo.h>
39
#include <debug.h>
39
#include <debug.h>
-
 
40
#include <config.h>
40
 
41
 
41
#ifdef CONFIG_TSB
42
#ifdef CONFIG_TSB
42
#include <arch/mm/tsb.h>
43
#include <arch/mm/tsb.h>
43
#include <arch/memstr.h>
44
#include <arch/memstr.h>
44
#include <synch/mutex.h>
45
#include <synch/mutex.h>
45
#include <arch/asm.h>
46
#include <arch/asm.h>
46
#include <mm/frame.h>
47
#include <mm/frame.h>
47
#include <bitops.h>
48
#include <bitops.h>
48
#include <macros.h>
49
#include <macros.h>
49
#endif
50
#endif
50
 
51
 
51
/** Architecture dependent address space init. */
52
/** Architecture dependent address space init. */
52
void as_arch_init(void)
53
void as_arch_init(void)
53
{
54
{
-
 
55
    if (config.cpu_active == 1) {
54
    as_operations = &as_ht_operations;
56
        as_operations = &as_ht_operations;
55
    asid_fifo_init();
57
        asid_fifo_init();
-
 
58
    }
56
}
59
}
57
 
60
 
58
int as_constructor_arch(as_t *as, int flags)
61
int as_constructor_arch(as_t *as, int flags)
59
{
62
{
60
#ifdef CONFIG_TSB
63
#ifdef CONFIG_TSB
61
    int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH);
64
    int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH);
62
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags);
65
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags);
63
 
66
 
64
    if (!tsb)
67
    if (!tsb)
65
        return -1;
68
        return -1;
66
 
69
 
67
    as->arch.itsb = (tsb_entry_t *) tsb;
70
    as->arch.itsb = (tsb_entry_t *) tsb;
68
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t));
71
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t));
69
    memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0);
72
    memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0);
70
#endif
73
#endif
71
    return 0;
74
    return 0;
72
}
75
}
73
 
76
 
74
int as_destructor_arch(as_t *as)
77
int as_destructor_arch(as_t *as)
75
{
78
{
76
#ifdef CONFIG_TSB
79
#ifdef CONFIG_TSB
77
    count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH;
80
    count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH;
78
    frame_free((uintptr_t) as->arch.itsb);
81
    frame_free((uintptr_t) as->arch.itsb);
79
    return cnt;
82
    return cnt;
80
#else
83
#else
81
    return 0;
84
    return 0;
82
#endif
85
#endif
83
}
86
}
84
 
87
 
85
int as_create_arch(as_t *as, int flags)
88
int as_create_arch(as_t *as, int flags)
86
{
89
{
87
#ifdef CONFIG_TSB
90
#ifdef CONFIG_TSB
88
    ipl_t ipl;
91
    ipl_t ipl;
89
 
92
 
90
    ipl = interrupts_disable();
93
    ipl = interrupts_disable();
91
    mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
94
    mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
92
    tsb_invalidate(as, 0, (count_t) -1);
95
    tsb_invalidate(as, 0, (count_t) -1);
93
    mutex_unlock(&as->lock);
96
    mutex_unlock(&as->lock);
94
    interrupts_restore(ipl);
97
    interrupts_restore(ipl);
95
#endif
98
#endif
96
    return 0;
99
    return 0;
97
}
100
}
98
 
101
 
99
/** Perform sparc64-specific tasks when an address space becomes active on the processor.
102
/** Perform sparc64-specific tasks when an address space becomes active on the processor.
100
 *
103
 *
101
 * Install ASID and map TSBs.
104
 * Install ASID and map TSBs.
102
 *
105
 *
103
 * @param as Address space.
106
 * @param as Address space.
104
 */
107
 */
105
void as_install_arch(as_t *as)
108
void as_install_arch(as_t *as)
106
{
109
{
107
    tlb_context_reg_t ctx;
110
    tlb_context_reg_t ctx;
108
   
111
   
109
    /*
112
    /*
110
     * Note that we don't lock the address space.
113
     * Note that we don't lock the address space.
111
     * That's correct - we can afford it here
114
     * That's correct - we can afford it here
112
     * because we only read members that are
115
     * because we only read members that are
113
     * currently read-only.
116
     * currently read-only.
114
     */
117
     */
115
   
118
   
116
    /*
119
    /*
117
     * Write ASID to secondary context register.
120
     * Write ASID to secondary context register.
118
     * The primary context register has to be set
121
     * The primary context register has to be set
119
     * from TL>0 so it will be filled from the
122
     * from TL>0 so it will be filled from the
120
     * secondary context register from the TL=1
123
     * secondary context register from the TL=1
121
     * code just before switch to userspace.
124
     * code just before switch to userspace.
122
     */
125
     */
123
    ctx.v = 0;
126
    ctx.v = 0;
124
    ctx.context = as->asid;
127
    ctx.context = as->asid;
125
    mmu_secondary_context_write(ctx.v);
128
    mmu_secondary_context_write(ctx.v);
126
 
129
 
127
#ifdef CONFIG_TSB   
130
#ifdef CONFIG_TSB   
128
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
131
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
129
 
132
 
130
    ASSERT(as->arch.itsb && as->arch.dtsb);
133
    ASSERT(as->arch.itsb && as->arch.dtsb);
131
 
134
 
132
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
135
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
133
       
136
       
134
    if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
137
    if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
135
        /*
138
        /*
136
         * TSBs were allocated from memory not covered
139
         * TSBs were allocated from memory not covered
137
         * by the locked 4M kernel DTLB entry. We need
140
         * by the locked 4M kernel DTLB entry. We need
138
         * to map both TSBs explicitly.
141
         * to map both TSBs explicitly.
139
         */
142
         */
140
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
143
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
141
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
144
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
142
    }
145
    }
143
       
146
       
144
    /*
147
    /*
145
     * Setup TSB Base registers.
148
     * Setup TSB Base registers.
146
     */
149
     */
147
    tsb_base_reg_t tsb_base;
150
    tsb_base_reg_t tsb_base;
148
       
151
       
149
    tsb_base.value = 0;
152
    tsb_base.value = 0;
150
    tsb_base.size = TSB_SIZE;
153
    tsb_base.size = TSB_SIZE;
151
    tsb_base.split = 0;
154
    tsb_base.split = 0;
152
 
155
 
153
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
156
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
154
    itsb_base_write(tsb_base.value);
157
    itsb_base_write(tsb_base.value);
155
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
158
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
156
    dtsb_base_write(tsb_base.value);
159
    dtsb_base_write(tsb_base.value);
157
#endif
160
#endif
158
}
161
}
159
 
162
 
160
/** Perform sparc64-specific tasks when an address space is removed from the processor.
163
/** Perform sparc64-specific tasks when an address space is removed from the processor.
161
 *
164
 *
162
 * Demap TSBs.
165
 * Demap TSBs.
163
 *
166
 *
164
 * @param as Address space.
167
 * @param as Address space.
165
 */
168
 */
166
void as_deinstall_arch(as_t *as)
169
void as_deinstall_arch(as_t *as)
167
{
170
{
168
 
171
 
169
    /*
172
    /*
170
     * Note that we don't lock the address space.
173
     * Note that we don't lock the address space.
171
     * That's correct - we can afford it here
174
     * That's correct - we can afford it here
172
     * because we only read members that are
175
     * because we only read members that are
173
     * currently read-only.
176
     * currently read-only.
174
     */
177
     */
175
 
178
 
176
#ifdef CONFIG_TSB
179
#ifdef CONFIG_TSB
177
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
180
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
178
 
181
 
179
    ASSERT(as->arch.itsb && as->arch.dtsb);
182
    ASSERT(as->arch.itsb && as->arch.dtsb);
180
 
183
 
181
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
184
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
182
       
185
       
183
    if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
186
    if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
184
        /*
187
        /*
185
         * TSBs were allocated from memory not covered
188
         * TSBs were allocated from memory not covered
186
         * by the locked 4M kernel DTLB entry. We need
189
         * by the locked 4M kernel DTLB entry. We need
187
         * to demap the entry installed by as_install_arch().
190
         * to demap the entry installed by as_install_arch().
188
         */
191
         */
189
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
192
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
190
    }
193
    }
191
#endif
194
#endif
192
}
195
}
193
 
196
 
194
/** @}
197
/** @}
195
 */
198
 */
196
 
199