Subversion Repositories HelenOS

Rev

Rev 1890 | Rev 1892 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1890 Rev 1891
1
/*
1
/*
2
 * Copyright (C) 2006 Jakub Jermar
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm
29
/** @addtogroup sparc64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/as.h>
35
#include <arch/mm/as.h>
36
#include <arch/mm/tlb.h>
36
#include <arch/mm/tlb.h>
37
#include <genarch/mm/as_ht.h>
37
#include <genarch/mm/as_ht.h>
38
#include <genarch/mm/asid_fifo.h>
38
#include <genarch/mm/asid_fifo.h>
39
#include <debug.h>
39
#include <debug.h>
40
 
40
 
41
#ifdef CONFIG_TSB
41
#ifdef CONFIG_TSB
42
#include <arch/mm/tsb.h>
42
#include <arch/mm/tsb.h>
-
 
43
#include <arch/memstr.h>
-
 
44
#include <synch/mutex.h>
-
 
45
#include <arch/asm.h>
-
 
46
#include <mm/frame.h>
-
 
47
#include <bitops.h>
-
 
48
#include <macros.h>
43
#endif
49
#endif
44
 
50
 
45
/** Architecture dependent address space init. */
51
/** Architecture dependent address space init. */
46
void as_arch_init(void)
52
void as_arch_init(void)
47
{
53
{
48
    as_operations = &as_ht_operations;
54
    as_operations = &as_ht_operations;
49
    asid_fifo_init();
55
    asid_fifo_init();
50
}
56
}
51
 
57
 
-
 
58
int as_constructor_arch(as_t *as, int flags)
-
 
59
{
-
 
60
#ifdef CONFIG_TSB
-
 
61
    int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH);
-
 
62
    uintptr_t tsb = (uintptr_t) frame_alloc(order, flags);
-
 
63
 
-
 
64
    if (!tsb)
-
 
65
        return -1;
-
 
66
 
-
 
67
    as->arch.itsb = (tsb_entry_t *) tsb;
-
 
68
    as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t));
-
 
69
#endif
-
 
70
    return 0;
-
 
71
}
-
 
72
 
-
 
73
int as_destructor_arch(as_t *as)
-
 
74
{
-
 
75
#ifdef CONFIG_TSB
-
 
76
    count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH;
-
 
77
    frame_free((uintptr_t) as->arch.itsb);
-
 
78
    return cnt;
-
 
79
#else
-
 
80
    return 0;
-
 
81
#endif
-
 
82
}
-
 
83
 
-
 
84
int as_create_arch(as_t *as, int flags)
-
 
85
{
-
 
86
#ifdef CONFIG_TSB
-
 
87
    ipl_t ipl;
-
 
88
 
-
 
89
    memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0);
-
 
90
    ipl = interrupts_disable();
-
 
91
    mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
-
 
92
    tsb_invalidate(as, 0, (count_t) -1);
-
 
93
    mutex_unlock(&as->lock);
-
 
94
    interrupts_restore(ipl);
-
 
95
#endif
-
 
96
    return 0;
-
 
97
}
-
 
98
 
52
/** Perform sparc64-specific tasks when an address space becomes active on the processor.
99
/** Perform sparc64-specific tasks when an address space becomes active on the processor.
53
 *
100
 *
54
 * Install ASID and map TSBs.
101
 * Install ASID and map TSBs.
55
 *
102
 *
56
 * @param as Address space.
103
 * @param as Address space.
57
 */
104
 */
58
void as_install_arch(as_t *as)
105
void as_install_arch(as_t *as)
59
{
106
{
60
    tlb_context_reg_t ctx;
107
    tlb_context_reg_t ctx;
61
   
108
   
62
    /*
109
    /*
63
     * Note that we don't lock the address space.
110
     * Note that we don't lock the address space.
64
     * That's correct - we can afford it here
111
     * That's correct - we can afford it here
65
     * because we only read members that are
112
     * because we only read members that are
66
     * currently read-only.
113
     * currently read-only.
67
     */
114
     */
68
   
115
   
69
    /*
116
    /*
70
     * Write ASID to secondary context register.
117
     * Write ASID to secondary context register.
71
     * The primary context register has to be set
118
     * The primary context register has to be set
72
     * from TL>0 so it will be filled from the
119
     * from TL>0 so it will be filled from the
73
     * secondary context register from the TL=1
120
     * secondary context register from the TL=1
74
     * code just before switch to userspace.
121
     * code just before switch to userspace.
75
     */
122
     */
76
    ctx.v = 0;
123
    ctx.v = 0;
77
    ctx.context = as->asid;
124
    ctx.context = as->asid;
78
    mmu_secondary_context_write(ctx.v);
125
    mmu_secondary_context_write(ctx.v);
79
 
126
 
80
#ifdef CONFIG_TSB   
127
#ifdef CONFIG_TSB   
81
    if (as != AS_KERNEL) {
-
 
82
        uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
128
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
83
 
129
 
84
        ASSERT(as->arch.itsb && as->arch.dtsb);
130
    ASSERT(as->arch.itsb && as->arch.dtsb);
85
 
131
 
86
        uintptr_t tsb = as->arch.itsb;
132
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
87
       
133
       
88
        if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
134
    if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
89
            /*
135
        /*
90
             * TSBs were allocated from memory not covered
136
         * TSBs were allocated from memory not covered
91
             * by the locked 4M kernel DTLB entry. We need
137
         * by the locked 4M kernel DTLB entry. We need
92
             * to map both TSBs explicitly.
138
         * to map both TSBs explicitly.
93
             */
139
         */
94
            dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
140
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
95
            dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
141
        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
96
        }
142
    }
97
       
143
       
98
        /*
144
    /*
99
         * Setup TSB Base registers.
145
     * Setup TSB Base registers.
100
         */
146
     */
101
        tsb_base_reg_t tsb_base;
147
    tsb_base_reg_t tsb_base;
102
       
148
       
103
        tsb_base.value = 0;
149
    tsb_base.value = 0;
104
        tsb_base.size = TSB_SIZE;
150
    tsb_base.size = TSB_SIZE;
105
        tsb_base.split = 0;
151
    tsb_base.split = 0;
106
 
152
 
107
        tsb_base.base = as->arch.itsb >> PAGE_WIDTH;
153
    tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
108
        itsb_base_write(tsb_base.value);
154
    itsb_base_write(tsb_base.value);
109
        tsb_base.base = as->arch.dtsb >> PAGE_WIDTH;
155
    tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
110
        dtsb_base_write(tsb_base.value);
156
    dtsb_base_write(tsb_base.value);
111
    }
-
 
112
#endif
157
#endif
113
}
158
}
114
 
159
 
115
/** Perform sparc64-specific tasks when an address space is removed from the processor.
160
/** Perform sparc64-specific tasks when an address space is removed from the processor.
116
 *
161
 *
117
 * Demap TSBs.
162
 * Demap TSBs.
118
 *
163
 *
119
 * @param as Address space.
164
 * @param as Address space.
120
 */
165
 */
121
void as_deinstall_arch(as_t *as)
166
void as_deinstall_arch(as_t *as)
122
{
167
{
123
 
168
 
124
    /*
169
    /*
125
     * Note that we don't lock the address space.
170
     * Note that we don't lock the address space.
126
     * That's correct - we can afford it here
171
     * That's correct - we can afford it here
127
     * because we only read members that are
172
     * because we only read members that are
128
     * currently read-only.
173
     * currently read-only.
129
     */
174
     */
130
 
175
 
131
#ifdef CONFIG_TSB
176
#ifdef CONFIG_TSB
132
    if (as != AS_KERNEL) {
-
 
133
        uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
177
    uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
134
 
178
 
135
        ASSERT(as->arch.itsb && as->arch.dtsb);
179
    ASSERT(as->arch.itsb && as->arch.dtsb);
136
 
180
 
137
        uintptr_t tsb = as->arch.itsb;
181
    uintptr_t tsb = (uintptr_t) as->arch.itsb;
138
       
182
       
139
        if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
183
    if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
140
            /*
184
        /*
141
             * TSBs were allocated from memory not covered
185
         * TSBs were allocated from memory not covered
142
             * by the locked 4M kernel DTLB entry. We need
186
         * by the locked 4M kernel DTLB entry. We need
143
             * to demap the entry installed by as_install_arch().
187
         * to demap the entry installed by as_install_arch().
144
             */
188
         */
145
            dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
189
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
146
        }
190
    }
147
       
-
 
148
    }
-
 
149
#endif
191
#endif
150
}
192
}
151
 
193
 
152
/** @}
194
/** @}
153
 */
195
 */
154
 
196