Subversion Repositories HelenOS

Rev

Rev 3862 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3862 Rev 4129
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * Copyright (c) 2009 Pavel Rimsky
3
 * Copyright (c) 2009 Pavel Rimsky
4
 * All rights reserved.
4
 * All rights reserved.
5
 *
5
 *
6
 * Redistribution and use in source and binary forms, with or without
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
7
 * modification, are permitted provided that the following conditions
8
 * are met:
8
 * are met:
9
 *
9
 *
10
 * - Redistributions of source code must retain the above copyright
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
16
 *   derived from this software without specific prior written permission.
17
 *
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
28
 */
29
 
29
 
30
/** @addtogroup sparc64mm  
30
/** @addtogroup sparc64mm  
31
 * @{
31
 * @{
32
 */
32
 */
33
/** @file
33
/** @file
34
 */
34
 */
35
 
35
 
36
#include <arch/mm/tsb.h>
36
#include <arch/mm/tsb.h>
37
#include <arch/mm/pagesize.h>
37
#include <arch/mm/pagesize.h>
38
#include <arch/mm/tlb.h>
38
#include <arch/mm/tlb.h>
39
#include <arch/mm/page.h>
39
#include <arch/mm/page.h>
40
#include <arch/barrier.h>
40
#include <arch/barrier.h>
41
#include <mm/as.h>
41
#include <mm/as.h>
42
#include <arch/types.h>
42
#include <arch/types.h>
43
#include <macros.h>
43
#include <macros.h>
44
#include <debug.h>
44
#include <debug.h>
45
 
45
 
46
#define TSB_INDEX_MASK  ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
46
#define TSB_INDEX_MASK  ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
47
 
47
 
48
/** Invalidate portion of TSB.
48
/** Invalidate portion of TSB.
49
 *
49
 *
50
 * We assume that the address space is already locked. Note that respective
50
 * We assume that the address space is already locked. Note that respective
51
 * portions of both TSBs are invalidated at a time.
51
 * portions of both TSBs are invalidated at a time.
52
 *
52
 *
53
 * @param as    Address space.
53
 * @param as    Address space.
54
 * @param page  First page to invalidate in TSB.
54
 * @param page  First page to invalidate in TSB.
55
 * @param pages Number of pages to invalidate. Value of (count_t) -1 means the
55
 * @param pages Number of pages to invalidate. Value of (count_t) -1 means the
56
 *      whole TSB.
56
 *      whole TSB.
57
 */
57
 */
58
void tsb_invalidate(as_t *as, uintptr_t page, count_t pages)
58
void tsb_invalidate(as_t *as, uintptr_t page, count_t pages)
59
{
59
{
60
    index_t i0, i;
60
    index_t i0, i;
61
    count_t cnt;
61
    count_t cnt;
62
   
62
   
63
    ASSERT(as->arch.tsb_description.tsb_base);
63
    ASSERT(as->arch.tsb_description.tsb_base);
64
   
64
   
65
    i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
65
    i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
66
    ASSERT(i0 < TSB_ENTRY_COUNT);
66
    ASSERT(i0 < TSB_ENTRY_COUNT);
67
 
67
 
68
    if (pages == (count_t) - 1 || (pages) > TSB_ENTRY_COUNT)
68
    if (pages == (count_t) - 1 || (pages) > TSB_ENTRY_COUNT)
69
        cnt = TSB_ENTRY_COUNT;
69
        cnt = TSB_ENTRY_COUNT;
70
    else
70
    else
71
        cnt = pages;
71
        cnt = pages;
72
   
72
   
73
    for (i = 0; i < cnt; i++) {
73
    for (i = 0; i < cnt; i++) {
74
        ((tsb_entry_t *) as->arch.tsb_description.tsb_base)[
74
        ((tsb_entry_t *) as->arch.tsb_description.tsb_base)[
75
            (i0 + i) & (TSB_ENTRY_COUNT - 1)].tag.invalid = true;
75
            (i0 + i) & (TSB_ENTRY_COUNT - 1)].data.v = false;
76
    }
76
    }
77
}
77
}
78
 
78
 
79
/** Copy software PTE to ITSB.
79
/** Copy software PTE to ITSB.
80
 *
80
 *
81
 * @param t     Software PTE.
81
 * @param t     Software PTE.
82
 * @param index Zero if lower 8K-subpage, one if higher 8K subpage.
-
 
83
 */
82
 */
84
void itsb_pte_copy(pte_t *t, index_t index)
83
void itsb_pte_copy(pte_t *t)
85
{
84
{
86
    #if 0
-
 
87
    as_t *as;
85
    as_t *as;
88
    tsb_entry_t *tsb;
86
    tsb_entry_t *tsb;
89
    index_t entry;
87
    index_t entry;
90
 
88
 
91
    ASSERT(index <= 1);
-
 
92
   
-
 
93
    as = t->as;
89
    as = t->as;
94
    entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
90
    entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
95
    ASSERT(entry < ITSB_ENTRY_COUNT);
91
    ASSERT(entry < TSB_ENTRY_COUNT);
96
    tsb = &as->arch.itsb[entry];
92
    tsb = &((tsb_entry_t *) as->arch.tsb_description.tsb_base)[entry];
97
 
93
 
98
    /*
94
    /*
99
     * We use write barriers to make sure that the TSB load
95
     * We use write barriers to make sure that the TSB load
100
     * won't use inconsistent data or that the fault will
96
     * won't use inconsistent data or that the fault will
101
     * be repeated.
97
     * be repeated.
102
     */
98
     */
103
 
99
 
104
    tsb->tag.invalid = true;    /* invalidate the entry
-
 
105
                     * (tag target has this
100
    tsb->data.v = false;
106
                     * set to 0) */
-
 
107
 
101
 
108
    write_barrier();
102
    write_barrier();
109
 
103
 
110
    tsb->tag.context = as->asid;
104
    tsb->tag.context = as->asid;
111
    /* the shift is bigger than PAGE_WIDTH, do not bother with index  */
-
 
112
    tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
105
    tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
-
 
106
 
113
    tsb->data.value = 0;
107
    tsb->data.value = 0;
114
    tsb->data.size = PAGESIZE_8K;
108
    tsb->data.nfo = false;
115
    tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
109
    tsb->data.ra = t->frame >> MMU_FRAME_WIDTH;
-
 
110
    tsb->data.ie = false;
-
 
111
    tsb->data.e = false;
116
    tsb->data.cp = t->c;    /* cp as cache in phys.-idxed, c as cacheable */
112
    tsb->data.cp = t->c;    /* cp as cache in phys.-idxed, c as cacheable */
-
 
113
    tsb->data.cv = false;
117
    tsb->data.p = t->k; /* p as privileged, k as kernel */
114
    tsb->data.p = t->k; /* p as privileged, k as kernel */
-
 
115
    tsb->data.x = true;
-
 
116
    tsb->data.w = false;
118
    tsb->data.v = t->p; /* v as valid, p as present */
117
    tsb->data.size = PAGESIZE_8K;
119
   
118
   
120
    write_barrier();
119
    write_barrier();
121
   
120
   
122
    tsb->tag.invalid = false;   /* mark the entry as valid */
121
    tsb->data.v = t->p; /* v as valid, p as present */
123
    #endif
-
 
124
}
122
}
125
 
123
 
126
/** Copy software PTE to DTSB.
124
/** Copy software PTE to DTSB.
127
 *
125
 *
128
 * @param t Software PTE.
126
 * @param t Software PTE.
129
 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
-
 
130
 * @param ro    If true, the mapping is copied read-only.
127
 * @param ro    If true, the mapping is copied read-only.
131
 */
128
 */
132
void dtsb_pte_copy(pte_t *t, index_t index, bool ro)
129
void dtsb_pte_copy(pte_t *t, bool ro)
133
{
130
{
134
    #if 0
-
 
135
    as_t *as;
131
    as_t *as;
136
    tsb_entry_t *tsb;
132
    tsb_entry_t *tsb;
137
    index_t entry;
133
    index_t entry;
138
   
-
 
139
    ASSERT(index <= 1);
-
 
140
 
134
 
141
    as = t->as;
135
    as = t->as;
142
    entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
136
    entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
143
    ASSERT(entry < DTSB_ENTRY_COUNT);
137
    ASSERT(entry < TSB_ENTRY_COUNT);
144
    tsb = &as->arch.dtsb[entry];
138
    tsb = &((tsb_entry_t *) as->arch.tsb_description.tsb_base)[entry];
145
 
139
 
146
    /*
140
    /*
147
     * We use write barriers to make sure that the TSB load
141
     * We use write barriers to make sure that the TSB load
148
     * won't use inconsistent data or that the fault will
142
     * won't use inconsistent data or that the fault will
149
     * be repeated.
143
     * be repeated.
150
     */
144
     */
151
 
145
 
152
    tsb->tag.invalid = true;    /* invalidate the entry
-
 
153
                     * (tag target has this
146
    tsb->data.v = false;
154
                     * set to 0) */
-
 
155
 
147
 
156
    write_barrier();
148
    write_barrier();
157
 
149
 
158
    tsb->tag.context = as->asid;
150
    tsb->tag.context = as->asid;
159
    /* the shift is bigger than PAGE_WIDTH, do not bother with index */
-
 
160
    tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
151
    tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
-
 
152
 
161
    tsb->data.value = 0;
153
    tsb->data.value = 0;
162
    tsb->data.size = PAGESIZE_8K;
154
    tsb->data.nfo = false;
163
    tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
155
    tsb->data.ra = t->frame >> MMU_FRAME_WIDTH;
164
    tsb->data.cp = t->c;
156
    tsb->data.ie = false;
-
 
157
    tsb->data.e = false;
-
 
158
    tsb->data.cp = t->c;    /* cp as cache in phys.-idxed, c as cacheable */
165
#ifdef CONFIG_VIRT_IDX_DCACHE
159
#ifdef CONFIG_VIRT_IDX_DCACHE
166
    tsb->data.cv = t->c;
160
    tsb->data.cv = t->c;
167
#endif /* CONFIG_VIRT_IDX_DCACHE */
161
#endif /* CONFIG_VIRT_IDX_DCACHE */
168
    tsb->data.p = t->k;     /* p as privileged */
162
    tsb->data.p = t->k; /* p as privileged, k as kernel */
-
 
163
    tsb->data.x = true;
169
    tsb->data.w = ro ? false : t->w;
164
    tsb->data.w = ro ? false : t->w;
170
    tsb->data.v = t->p;
165
    tsb->data.size = PAGESIZE_8K;
171
   
166
   
172
    write_barrier();
167
    write_barrier();
173
   
168
   
174
    tsb->tag.invalid = false;   /* mark the entry as valid */
169
    tsb->data.v = t->p; /* v as valid, p as present */
175
    #endif
-
 
176
}
170
}
177
 
171
 
178
/** @}
172
/** @}
179
 */
173
 */
180
 
174
 
181
 
175
 
182
 
176