Subversion Repositories HelenOS

Rev

Rev 2048 | Rev 2089 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2048 Rev 2071
1
/*
1
/*
2
 * Copyright (C) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm
29
/** @addtogroup sparc64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/as.h>
35
#include <arch/mm/as.h>
36
#include <arch/mm/tlb.h>
36
#include <arch/mm/tlb.h>
37
#include <genarch/mm/as_ht.h>
37
#include <genarch/mm/as_ht.h>
38
#include <genarch/mm/asid_fifo.h>
38
#include <genarch/mm/asid_fifo.h>
39
#include <debug.h>
39
#include <debug.h>
40
#include <config.h>
40
#include <config.h>
41
 
41
 
42
#ifdef CONFIG_TSB
42
#ifdef CONFIG_TSB
43
#include <arch/mm/tsb.h>
43
#include <arch/mm/tsb.h>
44
#include <arch/memstr.h>
44
#include <arch/memstr.h>
45
#include <synch/mutex.h>
45
#include <synch/mutex.h>
46
#include <arch/asm.h>
46
#include <arch/asm.h>
47
#include <mm/frame.h>
47
#include <mm/frame.h>
48
#include <bitops.h>
48
#include <bitops.h>
49
#include <macros.h>
49
#include <macros.h>
50
#endif /* CONFIG_TSB */
50
#endif /* CONFIG_TSB */
51
 
51
 
52
/** Architecture dependent address space init. */
52
/** Architecture dependent address space init. */
53
void as_arch_init(void)
53
void as_arch_init(void)
54
{
54
{
55
    if (config.cpu_active == 1) {
55
    if (config.cpu_active == 1) {
56
        as_operations = &as_ht_operations;
56
        as_operations = &as_ht_operations;
57
        asid_fifo_init();
57
        asid_fifo_init();
58
    }
58
    }
59
}
59
}
60
 
60
 
61
int as_constructor_arch(as_t *as, int flags)
61
int as_constructor_arch(as_t *as, int flags)
62
{
62
{
63
#ifdef CONFIG_TSB
63
#ifdef CONFIG_TSB
64
    int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
64
    int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
65
        sizeof(tsb_entry_t)) >> FRAME_WIDTH);
65
        sizeof(tsb_entry_t)) >> FRAME_WIDTH);
66
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
66
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
67
 
67
 
68
    if (!tsb)
68
    if (!tsb)
69
        return -1;
69
        return -1;
70
 
70
 
71
    as->arch.itsb = (tsb_entry_t *) tsb;
71
    as->arch.itsb = (tsb_entry_t *) tsb;
72
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
72
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
73
        sizeof(tsb_entry_t));
73
        sizeof(tsb_entry_t));
74
    memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
74
    memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
75
        * sizeof(tsb_entry_t), 0);
75
        * sizeof(tsb_entry_t), 0);
76
#endif
76
#endif
77
    return 0;
77
    return 0;
78
}
78
}
79
 
79
 
80
int as_destructor_arch(as_t *as)
80
int as_destructor_arch(as_t *as)
81
{
81
{
82
#ifdef CONFIG_TSB
82
#ifdef CONFIG_TSB
83
    count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
83
    count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
84
        sizeof(tsb_entry_t)) >> FRAME_WIDTH;
84
        sizeof(tsb_entry_t)) >> FRAME_WIDTH;
85
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
85
    frame_free(KA2PA((uintptr_t) as->arch.itsb));
86
    return cnt;
86
    return cnt;
87
#else
87
#else
88
    return 0;
88
    return 0;
89
#endif
89
#endif
90
}
90
}
91
 
91
 
92
int as_create_arch(as_t *as, int flags)
92
int as_create_arch(as_t *as, int flags)
93
{
93
{
94
#ifdef CONFIG_TSB
94
#ifdef CONFIG_TSB
95
    ipl_t ipl;
95
    ipl_t ipl;
96
 
96
 
97
    ipl = interrupts_disable();
97
    ipl = interrupts_disable();
98
    mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
98
    mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
99
    tsb_invalidate(as, 0, (count_t) -1);
99
    tsb_invalidate(as, 0, (count_t) -1);
100
    mutex_unlock(&as->lock);
100
    mutex_unlock(&as->lock);
101
    interrupts_restore(ipl);
101
    interrupts_restore(ipl);
102
#endif
102
#endif
103
    return 0;
103
    return 0;
104
}
104
}
105
 
105
 
106
/** Perform sparc64-specific tasks when an address space becomes active on the
106
/** Perform sparc64-specific tasks when an address space becomes active on the
107
 * processor.
107
 * processor.
108
 *
108
 *
109
 * Install ASID and map TSBs.
109
 * Install ASID and map TSBs.
110
 *
110
 *
111
 * @param as Address space.
111
 * @param as Address space.
112
 */
112
 */
113
void as_install_arch(as_t *as)
113
void as_install_arch(as_t *as)
114
{
114
{
115
    tlb_context_reg_t ctx;
115
    tlb_context_reg_t ctx;
116
   
116
   
117
    /*
117
    /*
118
     * Note that we don't lock the address space.
118
     * Note that we don't lock the address space.
119
     * That's correct - we can afford it here
119
     * That's correct - we can afford it here
120
     * because we only read members that are
120
     * because we only read members that are
121
     * currently read-only.
121
     * currently read-only.
122
     */
122
     */
123
   
123
   
124
    /*
124
    /*
125
     * Write ASID to secondary context register.
125
     * Write ASID to secondary context register.
126
     * The primary context register has to be set
126
     * The primary context register has to be set
127
     * from TL>0 so it will be filled from the
127
     * from TL>0 so it will be filled from the
128
     * secondary context register from the TL=1
128
     * secondary context register from the TL=1
129
     * code just before switch to userspace.
129
     * code just before switch to userspace.
130
     */
130
     */
131
    ctx.v = 0;
131
    ctx.v = 0;
132
    ctx.context = as->asid;
132
    ctx.context = as->asid;
133
    mmu_secondary_context_write(ctx.v);
133
    mmu_secondary_context_write(ctx.v);
134
 
134
 
135
#ifdef CONFIG_TSB   
135
#ifdef CONFIG_TSB   
136
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
136
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
137
 
137
 
138
    ASSERT(as->arch.itsb && as->arch.dtsb);
138
    ASSERT(as->arch.itsb && as->arch.dtsb);
139
 
139
 
140
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
140
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
141
       
141
       
142
    if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
142
    if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
143
        /*
143
        /*
144
         * TSBs were allocated from memory not covered
144
         * TSBs were allocated from memory not covered
145
         * by the locked 4M kernel DTLB entry. We need
145
         * by the locked 4M kernel DTLB entry. We need
146
         * to map both TSBs explicitly.
146
         * to map both TSBs explicitly.
147
         */
147
         */
148
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
148
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
149
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
149
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
150
    }
150
    }
151
       
151
       
152
    /*
152
    /*
153
     * Setup TSB Base registers.
153
     * Setup TSB Base registers.
154
     */
154
     */
155
    tsb_base_reg_t tsb_base;
155
    tsb_base_reg_t tsb_base;
156
       
156
       
157
    tsb_base.value = 0;
157
    tsb_base.value = 0;
158
    tsb_base.size = TSB_SIZE;
158
    tsb_base.size = TSB_SIZE;
159
    tsb_base.split = 0;
159
    tsb_base.split = 0;
160
 
160
 
161
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
161
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
162
    itsb_base_write(tsb_base.value);
162
    itsb_base_write(tsb_base.value);
163
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
163
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
164
    dtsb_base_write(tsb_base.value);
164
    dtsb_base_write(tsb_base.value);
165
#endif
165
#endif
166
}
166
}
167
 
167
 
168
/** Perform sparc64-specific tasks when an address space is removed from the
168
/** Perform sparc64-specific tasks when an address space is removed from the
169
 * processor.
169
 * processor.
170
 *
170
 *
171
 * Demap TSBs.
171
 * Demap TSBs.
172
 *
172
 *
173
 * @param as Address space.
173
 * @param as Address space.
174
 */
174
 */
175
void as_deinstall_arch(as_t *as)
175
void as_deinstall_arch(as_t *as)
176
{
176
{
177
 
177
 
178
    /*
178
    /*
179
     * Note that we don't lock the address space.
179
     * Note that we don't lock the address space.
180
     * That's correct - we can afford it here
180
     * That's correct - we can afford it here
181
     * because we only read members that are
181
     * because we only read members that are
182
     * currently read-only.
182
     * currently read-only.
183
     */
183
     */
184
 
184
 
185
#ifdef CONFIG_TSB
185
#ifdef CONFIG_TSB
186
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
186
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
187
 
187
 
188
    ASSERT(as->arch.itsb && as->arch.dtsb);
188
    ASSERT(as->arch.itsb && as->arch.dtsb);
189
 
189
 
190
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
190
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
191
       
191
       
192
    if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
192
    if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
193
        /*
193
        /*
194
         * TSBs were allocated from memory not covered
194
         * TSBs were allocated from memory not covered
195
         * by the locked 4M kernel DTLB entry. We need
195
         * by the locked 4M kernel DTLB entry. We need
196
         * to demap the entry installed by as_install_arch().
196
         * to demap the entry installed by as_install_arch().
197
         */
197
         */
198
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
198
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
199
    }
199
    }
200
#endif
200
#endif
201
}
201
}
202
 
202
 
203
/** @}
203
/** @}
204
 */
204
 */
205
 
205