Subversion Repositories HelenOS

Rev

Rev 4277 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4277 Rev 4282
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64interrupt
29
/** @addtogroup sparc64interrupt
30
 * @{
30
 * @{
31
 */
31
 */
32
/**
32
/**
33
 * @file
33
 * @file
34
 * @brief This file contains fast MMU trap handlers.
34
 * @brief This file contains fast MMU trap handlers.
35
 */
35
 */
36
 
36
 
37
#ifndef KERN_sparc64_MMU_TRAP_H_
37
#ifndef KERN_sparc64_MMU_TRAP_H_
38
#define KERN_sparc64_MMU_TRAP_H_
38
#define KERN_sparc64_MMU_TRAP_H_
39
 
39
 
40
#include <arch/stack.h>
40
#include <arch/stack.h>
41
#include <arch/regdef.h>
41
#include <arch/regdef.h>
42
#include <arch/mm/tlb.h>
42
#include <arch/mm/tlb.h>
43
#include <arch/mm/mmu.h>
43
#include <arch/mm/mmu.h>
44
#include <arch/mm/tte.h>
44
#include <arch/mm/tte.h>
45
#include <arch/trap/regwin.h>
45
#include <arch/trap/regwin.h>
46
 
46
 
47
#ifdef CONFIG_TSB
47
#ifdef CONFIG_TSB
48
#include <arch/mm/tsb.h>
48
#include <arch/mm/tsb.h>
49
#endif
49
#endif
50
 
50
 
51
#define TT_FAST_INSTRUCTION_ACCESS_MMU_MISS 0x64
51
#define TT_FAST_INSTRUCTION_ACCESS_MMU_MISS 0x64
52
#define TT_FAST_DATA_ACCESS_MMU_MISS        0x68
52
#define TT_FAST_DATA_ACCESS_MMU_MISS        0x68
53
#define TT_FAST_DATA_ACCESS_PROTECTION      0x6c
53
#define TT_FAST_DATA_ACCESS_PROTECTION      0x6c
54
 
54
 
55
#define FAST_MMU_HANDLER_SIZE           128
55
#define FAST_MMU_HANDLER_SIZE           128
56
 
56
 
57
#ifdef __ASM__
57
#ifdef __ASM__
58
 
58
 
59
.macro FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER
59
.macro FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER
60
    /*
60
    /*
61
     * First, try to refill TLB from TSB.
61
     * First, try to refill TLB from TSB.
62
     */
62
     */
63
#ifdef CONFIG_TSB
63
#ifdef CONFIG_TSB
64
    ldxa [%g0] ASI_IMMU, %g1            ! read TSB Tag Target Register
64
    ldxa [%g0] ASI_IMMU, %g1            ! read TSB Tag Target Register
65
    ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g2    ! read TSB 8K Pointer
65
    ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g2    ! read TSB 8K Pointer
66
    ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4        ! 16-byte atomic load into %g4 and %g5
66
    ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4        ! 16-byte atomic load into %g4 and %g5
67
    cmp %g1, %g4                    ! is this the entry we are looking for?
67
    cmp %g1, %g4                    ! is this the entry we are looking for?
68
    bne,pn %xcc, 0f
68
    bne,pn %xcc, 0f
69
    nop
69
    nop
70
    stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG        ! copy mapping from ITSB to ITLB
70
    stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG        ! copy mapping from ITSB to ITLB
71
    retry
71
    retry
72
#endif
72
#endif
73
 
73
 
74
0:
74
0:
75
    wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
75
    wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
76
    PREEMPTIBLE_HANDLER fast_instruction_access_mmu_miss
76
    PREEMPTIBLE_HANDLER fast_instruction_access_mmu_miss
77
.endm
77
.endm
78
 
78
 
79
.macro FAST_DATA_ACCESS_MMU_MISS_HANDLER tl
79
.macro FAST_DATA_ACCESS_MMU_MISS_HANDLER tl
80
    /*
80
    /*
81
     * First, try to refill TLB from TSB.
81
     * First, try to refill TLB from TSB.
82
     */
82
     */
83
 
83
 
84
#ifdef CONFIG_TSB
84
#ifdef CONFIG_TSB
85
    ldxa [%g0] ASI_DMMU, %g1            ! read TSB Tag Target Register
85
    ldxa [%g0] ASI_DMMU, %g1            ! read TSB Tag Target Register
86
    srlx %g1, TSB_TAG_TARGET_CONTEXT_SHIFT, %g2 ! is this a kernel miss?
86
    srlx %g1, TSB_TAG_TARGET_CONTEXT_SHIFT, %g2 ! is this a kernel miss?
87
    brz,pn %g2, 0f
87
    brz,pn %g2, 0f
88
    ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g3    ! read TSB 8K Pointer
88
    ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g3    ! read TSB 8K Pointer
89
    ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4        ! 16-byte atomic load into %g4 and %g5
89
    ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4        ! 16-byte atomic load into %g4 and %g5
90
    cmp %g1, %g4                    ! is this the entry we are looking for?
90
    cmp %g1, %g4                    ! is this the entry we are looking for?
91
    bne,pn %xcc, 0f
91
    bne,pn %xcc, 0f
92
    nop
92
    nop
93
    stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG        ! copy mapping from DTSB to DTLB
93
    stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG        ! copy mapping from DTSB to DTLB
94
    retry
94
    retry
95
#endif
95
#endif
96
 
96
 
97
    /*
97
    /*
98
     * Second, test if it is the portion of the kernel address space
98
     * Second, test if it is the portion of the kernel address space
99
     * which is faulting. If that is the case, immediately create
99
     * which is faulting. If that is the case, immediately create
100
     * identity mapping for that page in DTLB. VPN 0 is excluded from
100
     * identity mapping for that page in DTLB. VPN 0 is excluded from
101
     * this treatment.
101
     * this treatment.
102
     *
102
     *
103
     * Note that branch-delay slots are used in order to save space.
103
     * Note that branch-delay slots are used in order to save space.
104
     */
104
     */
105
0:
105
0:
106
    sethi %hi(fast_data_access_mmu_miss_data_hi), %g7
106
    sethi %hi(fast_data_access_mmu_miss_data_hi), %g7
107
    wr %g0, ASI_DMMU, %asi
107
    wr %g0, ASI_DMMU, %asi
108
    ldxa [VA_DMMU_TAG_ACCESS] %asi, %g1     ! read the faulting Context and VPN
108
    ldxa [VA_DMMU_TAG_ACCESS] %asi, %g1     ! read the faulting Context and VPN
109
    set TLB_TAG_ACCESS_CONTEXT_MASK, %g2
109
    set TLB_TAG_ACCESS_CONTEXT_MASK, %g2
110
    andcc %g1, %g2, %g3             ! get Context
110
    andcc %g1, %g2, %g3             ! get Context
111
    bnz %xcc, 0f                    ! Context is non-zero
111
    bnz %xcc, 0f                    ! Context is non-zero
112
    andncc %g1, %g2, %g3                ! get page address into %g3
112
    andncc %g1, %g2, %g3                ! get page address into %g3
113
    bz  %xcc, 0f                    ! page address is zero
113
    bz  %xcc, 0f                    ! page address is zero
114
    ldx [%g7 + %lo(end_of_identity)], %g4
114
    ldx [%g7 + %lo(end_of_identity)], %g4
115
    cmp %g3, %g4
115
    cmp %g3, %g4
116
    bgeu %xcc, 0f
116
    bgeu %xcc, 0f
117
 
117
 
118
    ldx [%g7 + %lo(kernel_8k_tlb_data_template)], %g2
118
    ldx [%g7 + %lo(kernel_8k_tlb_data_template)], %g2
119
    or %g3, %g2, %g2
119
    add %g3, %g2, %g2
120
    stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG        ! identity map the kernel page
120
    stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG        ! identity map the kernel page
121
    retry
121
    retry
122
 
122
 
123
    /*
123
    /*
124
     * Third, catch and handle special cases when the trap is caused by
124
     * Third, catch and handle special cases when the trap is caused by
125
     * the userspace register window spill or fill handler. In case
125
     * the userspace register window spill or fill handler. In case
126
     * one of these two traps caused this trap, we just lower the trap
126
     * one of these two traps caused this trap, we just lower the trap
127
     * level and service the DTLB miss. In the end, we restart
127
     * level and service the DTLB miss. In the end, we restart
128
     * the offending SAVE or RESTORE.
128
     * the offending SAVE or RESTORE.
129
     */
129
     */
130
0:
130
0:
131
.if (\tl > 0)
131
.if (\tl > 0)
132
    wrpr %g0, 1, %tl
132
    wrpr %g0, 1, %tl
133
.endif
133
.endif
134
 
134
 
135
    /*
135
    /*
136
     * Switch from the MM globals.
136
     * Switch from the MM globals.
137
     */
137
     */
138
    wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
138
    wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
139
 
139
 
140
    /*
140
    /*
141
     * Read the Tag Access register for the higher-level handler.
141
     * Read the Tag Access register for the higher-level handler.
142
     * This is necessary to survive nested DTLB misses.
142
     * This is necessary to survive nested DTLB misses.
143
     */
143
     */
144
    ldxa [VA_DMMU_TAG_ACCESS] %asi, %g2
144
    ldxa [VA_DMMU_TAG_ACCESS] %asi, %g2
145
 
145
 
146
    /*
146
    /*
147
     * g2 will be passed as an argument to fast_data_access_mmu_miss().
147
     * g2 will be passed as an argument to fast_data_access_mmu_miss().
148
     */
148
     */
149
    PREEMPTIBLE_HANDLER fast_data_access_mmu_miss
149
    PREEMPTIBLE_HANDLER fast_data_access_mmu_miss
150
.endm
150
.endm
151
 
151
 
152
.macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl
152
.macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl
153
    /*
153
    /*
154
     * The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER.
154
     * The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER.
155
     */
155
     */
156
 
156
 
157
.if (\tl > 0)
157
.if (\tl > 0)
158
    wrpr %g0, 1, %tl
158
    wrpr %g0, 1, %tl
159
.endif
159
.endif
160
 
160
 
161
    /*
161
    /*
162
     * Switch from the MM globals.
162
     * Switch from the MM globals.
163
     */
163
     */
164
    wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
164
    wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
165
 
165
 
166
    /*
166
    /*
167
     * Read the Tag Access register for the higher-level handler.
167
     * Read the Tag Access register for the higher-level handler.
168
     * This is necessary to survive nested DTLB misses.
168
     * This is necessary to survive nested DTLB misses.
169
     */
169
     */
170
    mov VA_DMMU_TAG_ACCESS, %g2
170
    mov VA_DMMU_TAG_ACCESS, %g2
171
    ldxa [%g2] ASI_DMMU, %g2
171
    ldxa [%g2] ASI_DMMU, %g2
172
 
172
 
173
    /*
173
    /*
174
     * g2 will be passed as an argument to fast_data_access_mmu_miss().
174
     * g2 will be passed as an argument to fast_data_access_mmu_miss().
175
     */
175
     */
176
    PREEMPTIBLE_HANDLER fast_data_access_protection
176
    PREEMPTIBLE_HANDLER fast_data_access_protection
177
.endm
177
.endm
178
 
178
 
179
#endif /* __ASM__ */
179
#endif /* __ASM__ */
180
 
180
 
181
#endif
181
#endif
182
 
182
 
183
/** @}
183
/** @}
184
 */
184
 */
185
 
185