Subversion Repositories HelenOS

Rev

Rev 1965 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1965 Rev 1968
1
/*
1
/*
2
 * Copyright (C) 2005 Martin Decky
2
 * Copyright (C) 2005 Martin Decky
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <arch/mm/page.h>
29
#include <arch/mm/page.h>
30
#include <genarch/mm/page_pt.h>
30
#include <genarch/mm/page_pt.h>
31
#include <arch/mm/frame.h>
31
#include <arch/mm/frame.h>
32
#include <arch/asm.h>
32
#include <arch/asm.h>
33
#include <mm/frame.h>
33
#include <mm/frame.h>
34
#include <mm/page.h>
34
#include <mm/page.h>
35
#include <mm/as.h>
35
#include <mm/as.h>
36
#include <arch.h>
36
#include <arch.h>
37
#include <arch/types.h>
37
#include <arch/types.h>
38
#include <arch/exception.h>
38
#include <arch/exception.h>
39
#include <align.h>
39
#include <align.h>
40
#include <config.h>
40
#include <config.h>
41
#include <print.h>
41
#include <print.h>
42
#include <symtab.h>
42
#include <symtab.h>
43
 
43
 
44
static phte_t *phte;
44
static phte_t *phte;
45
 
45
 
46
 
46
 
47
/** Try to find PTE for faulting address
47
/** Try to find PTE for faulting address
48
 *
48
 *
49
 * Try to find PTE for faulting address.
49
 * Try to find PTE for faulting address.
50
 * The as->lock must be held on entry to this function
50
 * The as->lock must be held on entry to this function
51
 * if lock is true.
51
 * if lock is true.
52
 *
52
 *
53
 * @param as       Address space.
53
 * @param as       Address space.
54
 * @param lock     Lock/unlock the address space.
54
 * @param lock     Lock/unlock the address space.
55
 * @param badvaddr Faulting virtual address.
55
 * @param badvaddr Faulting virtual address.
56
 * @param access   Access mode that caused the fault.
56
 * @param access   Access mode that caused the fault.
57
 * @param istate   Pointer to interrupted state.
57
 * @param istate   Pointer to interrupted state.
58
 * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
58
 * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
59
 * @return         PTE on success, NULL otherwise.
59
 * @return         PTE on success, NULL otherwise.
60
 *
60
 *
61
 */
61
 */
62
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
62
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
63
                     istate_t *istate, int *pfcr)
63
                     istate_t *istate, int *pfcr)
64
{
64
{
65
    /*
65
    /*
66
     * Check if the mapping exists in page tables.
66
     * Check if the mapping exists in page tables.
67
     */
67
     */
68
    pte_t *pte = page_mapping_find(as, badvaddr);
68
    pte_t *pte = page_mapping_find(as, badvaddr);
69
    if ((pte) && (pte->p)) {
69
    if ((pte) && (pte->p)) {
70
        /*
70
        /*
71
         * Mapping found in page tables.
71
         * Mapping found in page tables.
72
         * Immediately succeed.
72
         * Immediately succeed.
73
         */
73
         */
74
        return pte;
74
        return pte;
75
    } else {
75
    } else {
76
        int rc;
76
        int rc;
77
   
77
   
78
        /*
78
        /*
79
         * Mapping not found in page tables.
79
         * Mapping not found in page tables.
80
         * Resort to higher-level page fault handler.
80
         * Resort to higher-level page fault handler.
81
         */
81
         */
82
        page_table_unlock(as, lock);
82
        page_table_unlock(as, lock);
83
        switch (rc = as_page_fault(badvaddr, access, istate)) {
83
        switch (rc = as_page_fault(badvaddr, access, istate)) {
84
            case AS_PF_OK:
84
            case AS_PF_OK:
85
                /*
85
                /*
86
                 * The higher-level page fault handler succeeded,
86
                 * The higher-level page fault handler succeeded,
87
                 * The mapping ought to be in place.
87
                 * The mapping ought to be in place.
88
                 */
88
                 */
89
                page_table_lock(as, lock);
89
                page_table_lock(as, lock);
90
                pte = page_mapping_find(as, badvaddr);
90
                pte = page_mapping_find(as, badvaddr);
91
                ASSERT((pte) && (pte->p));
91
                ASSERT((pte) && (pte->p));
92
                return pte;
92
                return pte;
93
            case AS_PF_DEFER:
93
            case AS_PF_DEFER:
94
                page_table_lock(as, lock);
94
                page_table_lock(as, lock);
95
                *pfcr = rc;
95
                *pfcr = rc;
96
                return NULL;
96
                return NULL;
97
            case AS_PF_FAULT:
97
            case AS_PF_FAULT:
98
                page_table_lock(as, lock);
98
                page_table_lock(as, lock);
99
                printf("Page fault.\n");
99
                printf("Page fault.\n");
100
                *pfcr = rc;
100
                *pfcr = rc;
101
                return NULL;
101
                return NULL;
102
            default:
102
            default:
103
                panic("unexpected rc (%d)\n", rc);
103
                panic("unexpected rc (%d)\n", rc);
104
        }  
104
        }  
105
    }
105
    }
106
}
106
}
107
 
107
 
108
 
108
 
109
static void pht_refill_fail(__address badvaddr, istate_t *istate)
109
static void pht_refill_fail(__address badvaddr, istate_t *istate)
110
{
110
{
111
    char *symbol = "";
111
    char *symbol = "";
112
    char *sym2 = "";
112
    char *sym2 = "";
113
 
113
 
114
    char *s = get_symtab_entry(istate->pc);
114
    char *s = get_symtab_entry(istate->pc);
115
    if (s)
115
    if (s)
116
        symbol = s;
116
        symbol = s;
117
    s = get_symtab_entry(istate->lr);
117
    s = get_symtab_entry(istate->lr);
118
    if (s)
118
    if (s)
119
        sym2 = s;
119
        sym2 = s;
120
    panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
120
    panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
121
}
121
}
122
 
122
 
123
 
123
 
124
static void pht_insert(const __address vaddr, const pfn_t pfn)
124
static void pht_insert(const __address vaddr, const pfn_t pfn)
125
{
125
{
126
    __u32 page = (vaddr >> 12) & 0xffff;
126
    __u32 page = (vaddr >> 12) & 0xffff;
127
    __u32 api = (vaddr >> 22) & 0x3f;
127
    __u32 api = (vaddr >> 22) & 0x3f;
128
    __u32 vsid;
128
    __u32 vsid;
129
   
129
   
130
    asm volatile (
130
    asm volatile (
131
        "mfsrin %0, %1\n"
131
        "mfsrin %0, %1\n"
132
        : "=r" (vsid)
132
        : "=r" (vsid)
133
        : "r" (vaddr)
133
        : "r" (vaddr)
134
    );
134
    );
135
   
135
   
136
    /* Primary hash (xor) */
136
    /* Primary hash (xor) */
137
    __u32 h = 0;
137
    __u32 h = 0;
138
    __u32 hash = vsid ^ page;
138
    __u32 hash = vsid ^ page;
139
    __u32 base = (hash & 0x3ff) << 3;
139
    __u32 base = (hash & 0x3ff) << 3;
140
    __u32 i;
140
    __u32 i;
141
    bool found = false;
141
    bool found = false;
142
   
142
   
143
    /* Find unused or colliding
143
    /* Find unused or colliding
144
       PTE in PTEG */
144
       PTE in PTEG */
145
    for (i = 0; i < 8; i++) {
145
    for (i = 0; i < 8; i++) {
146
        if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
146
        if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
147
            found = true;
147
            found = true;
148
            break;
148
            break;
149
        }
149
        }
150
    }
150
    }
151
   
151
   
152
    if (!found) {
152
    if (!found) {
153
        /* Secondary hash (not) */
153
        /* Secondary hash (not) */
154
        __u32 base2 = (~hash & 0x3ff) << 3;
154
        __u32 base2 = (~hash & 0x3ff) << 3;
155
       
155
       
156
        /* Find unused or colliding
156
        /* Find unused or colliding
157
           PTE in PTEG */
157
           PTE in PTEG */
158
        for (i = 0; i < 8; i++) {
158
        for (i = 0; i < 8; i++) {
159
            if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
159
            if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
160
                found = true;
160
                found = true;
161
                base = base2;
161
                base = base2;
162
                h = 1;
162
                h = 1;
163
                break;
163
                break;
164
            }
164
            }
165
        }
165
        }
166
       
166
       
167
        if (!found) {
167
        if (!found) {
168
            // TODO: A/C precedence groups
168
            // TODO: A/C precedence groups
169
            i = page % 8;
169
            i = page % 8;
170
        }
170
        }
171
    }
171
    }
172
   
172
   
173
    phte[base + i].v = 1;
173
    phte[base + i].v = 1;
174
    phte[base + i].vsid = vsid;
174
    phte[base + i].vsid = vsid;
175
    phte[base + i].h = h;
175
    phte[base + i].h = h;
176
    phte[base + i].api = api;
176
    phte[base + i].api = api;
177
    phte[base + i].rpn = pfn;
177
    phte[base + i].rpn = pfn;
178
    phte[base + i].r = 0;
178
    phte[base + i].r = 0;
179
    phte[base + i].c = 0;
179
    phte[base + i].c = 0;
180
    phte[base + i].pp = 2; // FIXME
180
    phte[base + i].pp = 2; // FIXME
181
}
181
}
182
 
182
 
183
 
183
 
184
/** Process Instruction/Data Storage Interrupt
184
/** Process Instruction/Data Storage Interrupt
185
 *
185
 *
186
 * @param data   True if Data Storage Interrupt.
186
 * @param data   True if Data Storage Interrupt.
187
 * @param istate Interrupted register context.
187
 * @param istate Interrupted register context.
188
 *
188
 *
189
 */
189
 */
190
void pht_refill(bool data, istate_t *istate)
190
void pht_refill(bool data, istate_t *istate)
191
{
191
{
192
    __address badvaddr;
192
    __address badvaddr;
193
    pte_t *pte;
193
    pte_t *pte;
194
    int pfcr;
194
    int pfcr;
195
    as_t *as;
195
    as_t *as;
196
    bool lock;
196
    bool lock;
197
   
197
   
198
    if (AS == NULL) {
198
    if (AS == NULL) {
199
        as = AS_KERNEL;
199
        as = AS_KERNEL;
200
        lock = false;
200
        lock = false;
201
    } else {
201
    } else {
202
        as = AS;
202
        as = AS;
203
        lock = true;
203
        lock = true;
204
    }
204
    }
205
   
205
   
206
    if (data) {
206
    if (data) {
207
        asm volatile (
207
        asm volatile (
208
            "mfdar %0\n"
208
            "mfdar %0\n"
209
            : "=r" (badvaddr)
209
            : "=r" (badvaddr)
210
        );
210
        );
211
    } else
211
    } else
212
        badvaddr = istate->pc;
212
        badvaddr = istate->pc;
213
       
213
       
214
    page_table_lock(as, lock);
214
    page_table_lock(as, lock);
215
   
215
   
216
    pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr);
216
    pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr);
217
    if (!pte) {
217
    if (!pte) {
218
        switch (pfcr) {
218
        switch (pfcr) {
219
            case AS_PF_FAULT:
219
            case AS_PF_FAULT:
220
                goto fail;
220
                goto fail;
221
                break;
221
                break;
222
            case AS_PF_DEFER:
222
            case AS_PF_DEFER:
223
                /*
223
                /*
224
                 * The page fault came during copy_from_uspace()
224
                 * The page fault came during copy_from_uspace()
225
                 * or copy_to_uspace().
225
                 * or copy_to_uspace().
226
                 */
226
                 */
227
                page_table_unlock(as, lock);
227
                page_table_unlock(as, lock);
228
                return;
228
                return;
229
            default:
229
            default:
230
                panic("Unexpected pfrc (%d)\n", pfcr);
230
                panic("Unexpected pfrc (%d)\n", pfcr);
231
        }
231
        }
232
    }
232
    }
233
   
233
   
234
    pte->a = 1; /* Record access to PTE */
234
    pte->a = 1; /* Record access to PTE */
235
    pht_insert(badvaddr, pte->pfn);
235
    pht_insert(badvaddr, pte->pfn);
236
   
236
   
237
    page_table_unlock(as, lock);
237
    page_table_unlock(as, lock);
238
    return;
238
    return;
239
   
239
   
240
fail:
240
fail:
241
    page_table_unlock(as, lock);
241
    page_table_unlock(as, lock);
242
    pht_refill_fail(badvaddr, istate);
242
    pht_refill_fail(badvaddr, istate);
243
}
243
}
244
 
244
 
245
 
245
 
246
void pht_init(void)
246
void pht_init(void)
247
{
247
{
248
    memsetb((__address) phte, 1 << PHT_BITS, 0);
248
    memsetb((__address) phte, 1 << PHT_BITS, 0);
249
}
249
}
250
 
250
 
251
 
251
 
252
void page_arch_init(void)
252
void page_arch_init(void)
253
{
253
{
254
    if (config.cpu_active == 1) {
254
    if (config.cpu_active == 1) {
255
        page_mapping_operations = &pt_mapping_operations;
255
        page_mapping_operations = &pt_mapping_operations;
256
       
256
       
257
        __address cur;
257
        __address cur;
258
        int flags;
258
        int flags;
259
       
259
       
260
        /* Frames below 128 MB are mapped using BAT,
260
        /* Frames below 128 MB are mapped using BAT,
261
           map rest of the physical memory */
261
           map rest of the physical memory */
262
        for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
262
        for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
263
            flags = PAGE_CACHEABLE;
263
            flags = PAGE_CACHEABLE;
264
            if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
264
            if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
265
                flags |= PAGE_GLOBAL;
265
                flags |= PAGE_GLOBAL;
266
            page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
266
            page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
267
        }
267
        }
268
       
268
       
269
        /* Allocate page hash table */
269
        /* Allocate page hash table */
270
        phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC));
270
        phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC));
271
        phte = (phte_t *) PA2KA((__address) physical_phte);
271
        phte = (phte_t *) PA2KA((__address) physical_phte);
272
       
272
       
273
        ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0);
273
        ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0);
274
        pht_init();
274
        pht_init();
275
       
275
       
276
        asm volatile (
276
        asm volatile (
277
            "mtsdr1 %0\n"
277
            "mtsdr1 %0\n"
278
            :
278
            :
279
            : "r" ((__address) physical_phte)
279
            : "r" ((__address) physical_phte)
280
        );
280
        );
281
    }
281
    }
282
}
282
}
283
 
283
 
284
 
284
 
285
__address hw_map(__address physaddr, size_t size)
285
__address hw_map(__address physaddr, size_t size)
286
{
286
{
287
    if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
287
    if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
288
        panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
288
        panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
289
   
289
   
290
    __address virtaddr = PA2KA(last_frame);
290
    __address virtaddr = PA2KA(last_frame);
291
    pfn_t i;
291
    pfn_t i;
292
    for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
292
    for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
293
        page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
293
        page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
294
   
294
   
295
    last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
295
    last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
296
   
296
   
297
    return virtaddr;
297
    return virtaddr;
298
}
298
}
299
 
299