Subversion Repositories HelenOS-historic

Rev

Rev 1767 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1767 Rev 1780
1
/*
1
/*
2
 * Copyright (C) 2005 Martin Decky
2
 * Copyright (C) 2005 Martin Decky
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
 /** @addtogroup ppc64mm   
29
 /** @addtogroup ppc64mm   
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/page.h>
35
#include <arch/mm/page.h>
36
#include <genarch/mm/page_pt.h>
36
#include <genarch/mm/page_pt.h>
37
#include <arch/mm/frame.h>
37
#include <arch/mm/frame.h>
38
#include <arch/asm.h>
38
#include <arch/asm.h>
39
#include <mm/frame.h>
39
#include <mm/frame.h>
40
#include <mm/page.h>
40
#include <mm/page.h>
41
#include <mm/as.h>
41
#include <mm/as.h>
42
#include <arch.h>
42
#include <arch.h>
43
#include <arch/types.h>
43
#include <arch/types.h>
44
#include <arch/exception.h>
44
#include <arch/exception.h>
45
#include <align.h>
45
#include <align.h>
46
#include <config.h>
46
#include <config.h>
47
#include <print.h>
47
#include <print.h>
48
#include <symtab.h>
48
#include <symtab.h>
49
 
49
 
50
static phte_t *phte;
50
static phte_t *phte;
51
 
51
 
52
 
52
 
53
/** Try to find PTE for faulting address
53
/** Try to find PTE for faulting address
54
 *
54
 *
55
 * Try to find PTE for faulting address.
55
 * Try to find PTE for faulting address.
56
 * The as->lock must be held on entry to this function
56
 * The as->lock must be held on entry to this function
57
 * if lock is true.
57
 * if lock is true.
58
 *
58
 *
59
 * @param as       Address space.
59
 * @param as       Address space.
60
 * @param lock     Lock/unlock the address space.
60
 * @param lock     Lock/unlock the address space.
61
 * @param badvaddr Faulting virtual address.
61
 * @param badvaddr Faulting virtual address.
62
 * @param access   Access mode that caused the fault.
62
 * @param access   Access mode that caused the fault.
63
 * @param istate   Pointer to interrupted state.
63
 * @param istate   Pointer to interrupted state.
64
 * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
64
 * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
65
 * @return         PTE on success, NULL otherwise.
65
 * @return         PTE on success, NULL otherwise.
66
 *
66
 *
67
 */
67
 */
68
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
68
static pte_t *find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access,
69
                     istate_t *istate, int *pfrc)
69
                     istate_t *istate, int *pfrc)
70
{
70
{
71
    /*
71
    /*
72
     * Check if the mapping exists in page tables.
72
     * Check if the mapping exists in page tables.
73
     */
73
     */
74
    pte_t *pte = page_mapping_find(as, badvaddr);
74
    pte_t *pte = page_mapping_find(as, badvaddr);
75
    if ((pte) && (pte->p)) {
75
    if ((pte) && (pte->p)) {
76
        /*
76
        /*
77
         * Mapping found in page tables.
77
         * Mapping found in page tables.
78
         * Immediately succeed.
78
         * Immediately succeed.
79
         */
79
         */
80
        return pte;
80
        return pte;
81
    } else {
81
    } else {
82
        int rc;
82
        int rc;
83
   
83
   
84
        /*
84
        /*
85
         * Mapping not found in page tables.
85
         * Mapping not found in page tables.
86
         * Resort to higher-level page fault handler.
86
         * Resort to higher-level page fault handler.
87
         */
87
         */
88
        page_table_unlock(as, lock);
88
        page_table_unlock(as, lock);
89
        switch (rc = as_page_fault(badvaddr, access, istate)) {
89
        switch (rc = as_page_fault(badvaddr, access, istate)) {
90
            case AS_PF_OK:
90
            case AS_PF_OK:
91
                /*
91
                /*
92
                 * The higher-level page fault handler succeeded,
92
                 * The higher-level page fault handler succeeded,
93
                 * The mapping ought to be in place.
93
                 * The mapping ought to be in place.
94
                 */
94
                 */
95
                page_table_lock(as, lock);
95
                page_table_lock(as, lock);
96
                pte = page_mapping_find(as, badvaddr);
96
                pte = page_mapping_find(as, badvaddr);
97
                ASSERT((pte) && (pte->p));
97
                ASSERT((pte) && (pte->p));
98
                *pfrc = 0;
98
                *pfrc = 0;
99
                return pte;
99
                return pte;
100
            case AS_PF_DEFER:
100
            case AS_PF_DEFER:
101
                page_table_lock(as, lock);
101
                page_table_lock(as, lock);
102
                *pfrc = rc;
102
                *pfrc = rc;
103
                return NULL;
103
                return NULL;
104
            case AS_PF_FAULT:
104
            case AS_PF_FAULT:
105
                page_table_lock(as, lock);
105
                page_table_lock(as, lock);
106
                printf("Page fault.\n");
106
                printf("Page fault.\n");
107
                *pfrc = rc;
107
                *pfrc = rc;
108
                return NULL;
108
                return NULL;
109
            default:
109
            default:
110
                panic("unexpected rc (%d)\n", rc);
110
                panic("unexpected rc (%d)\n", rc);
111
        }  
111
        }  
112
    }
112
    }
113
}
113
}
114
 
114
 
115
 
115
 
116
static void pht_refill_fail(__address badvaddr, istate_t *istate)
116
static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate)
117
{
117
{
118
    char *symbol = "";
118
    char *symbol = "";
119
    char *sym2 = "";
119
    char *sym2 = "";
120
 
120
 
121
    char *s = get_symtab_entry(istate->pc);
121
    char *s = get_symtab_entry(istate->pc);
122
    if (s)
122
    if (s)
123
        symbol = s;
123
        symbol = s;
124
    s = get_symtab_entry(istate->lr);
124
    s = get_symtab_entry(istate->lr);
125
    if (s)
125
    if (s)
126
        sym2 = s;
126
        sym2 = s;
127
    panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
127
    panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
128
}
128
}
129
 
129
 
130
 
130
 
131
static void pht_insert(const __address vaddr, const pfn_t pfn)
131
static void pht_insert(const uintptr_t vaddr, const pfn_t pfn)
132
{
132
{
133
    __u32 page = (vaddr >> 12) & 0xffff;
133
    uint32_t page = (vaddr >> 12) & 0xffff;
134
    __u32 api = (vaddr >> 22) & 0x3f;
134
    uint32_t api = (vaddr >> 22) & 0x3f;
135
    __u32 vsid;
135
    uint32_t vsid;
136
   
136
   
137
    asm volatile (
137
    asm volatile (
138
        "mfsrin %0, %1\n"
138
        "mfsrin %0, %1\n"
139
        : "=r" (vsid)
139
        : "=r" (vsid)
140
        : "r" (vaddr)
140
        : "r" (vaddr)
141
    );
141
    );
142
   
142
   
143
    /* Primary hash (xor) */
143
    /* Primary hash (xor) */
144
    __u32 h = 0;
144
    uint32_t h = 0;
145
    __u32 hash = vsid ^ page;
145
    uint32_t hash = vsid ^ page;
146
    __u32 base = (hash & 0x3ff) << 3;
146
    uint32_t base = (hash & 0x3ff) << 3;
147
    __u32 i;
147
    uint32_t i;
148
    bool found = false;
148
    bool found = false;
149
   
149
   
150
    /* Find unused or colliding
150
    /* Find unused or colliding
151
       PTE in PTEG */
151
       PTE in PTEG */
152
    for (i = 0; i < 8; i++) {
152
    for (i = 0; i < 8; i++) {
153
        if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
153
        if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
154
            found = true;
154
            found = true;
155
            break;
155
            break;
156
        }
156
        }
157
    }
157
    }
158
   
158
   
159
    if (!found) {
159
    if (!found) {
160
        /* Secondary hash (not) */
160
        /* Secondary hash (not) */
161
        __u32 base2 = (~hash & 0x3ff) << 3;
161
        uint32_t base2 = (~hash & 0x3ff) << 3;
162
       
162
       
163
        /* Find unused or colliding
163
        /* Find unused or colliding
164
           PTE in PTEG */
164
           PTE in PTEG */
165
        for (i = 0; i < 8; i++) {
165
        for (i = 0; i < 8; i++) {
166
            if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
166
            if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
167
                found = true;
167
                found = true;
168
                base = base2;
168
                base = base2;
169
                h = 1;
169
                h = 1;
170
                break;
170
                break;
171
            }
171
            }
172
        }
172
        }
173
       
173
       
174
        if (!found) {
174
        if (!found) {
175
            // TODO: A/C precedence groups
175
            // TODO: A/C precedence groups
176
            i = page % 8;
176
            i = page % 8;
177
        }
177
        }
178
    }
178
    }
179
   
179
   
180
    phte[base + i].v = 1;
180
    phte[base + i].v = 1;
181
    phte[base + i].vsid = vsid;
181
    phte[base + i].vsid = vsid;
182
    phte[base + i].h = h;
182
    phte[base + i].h = h;
183
    phte[base + i].api = api;
183
    phte[base + i].api = api;
184
    phte[base + i].rpn = pfn;
184
    phte[base + i].rpn = pfn;
185
    phte[base + i].r = 0;
185
    phte[base + i].r = 0;
186
    phte[base + i].c = 0;
186
    phte[base + i].c = 0;
187
    phte[base + i].pp = 2; // FIXME
187
    phte[base + i].pp = 2; // FIXME
188
}
188
}
189
 
189
 
190
 
190
 
191
/** Process Instruction/Data Storage Interrupt
191
/** Process Instruction/Data Storage Interrupt
192
 *
192
 *
193
 * @param data   True if Data Storage Interrupt.
193
 * @param data   True if Data Storage Interrupt.
194
 * @param istate Interrupted register context.
194
 * @param istate Interrupted register context.
195
 *
195
 *
196
 */
196
 */
197
void pht_refill(bool data, istate_t *istate)
197
void pht_refill(bool data, istate_t *istate)
198
{
198
{
199
    __address badvaddr;
199
    uintptr_t badvaddr;
200
    pte_t *pte;
200
    pte_t *pte;
201
    int pfrc;
201
    int pfrc;
202
    as_t *as;
202
    as_t *as;
203
    bool lock;
203
    bool lock;
204
   
204
   
205
    if (AS == NULL) {
205
    if (AS == NULL) {
206
        as = AS_KERNEL;
206
        as = AS_KERNEL;
207
        lock = false;
207
        lock = false;
208
    } else {
208
    } else {
209
        as = AS;
209
        as = AS;
210
        lock = true;
210
        lock = true;
211
    }
211
    }
212
   
212
   
213
    if (data) {
213
    if (data) {
214
        asm volatile (
214
        asm volatile (
215
            "mfdar %0\n"
215
            "mfdar %0\n"
216
            : "=r" (badvaddr)
216
            : "=r" (badvaddr)
217
        );
217
        );
218
    } else
218
    } else
219
        badvaddr = istate->pc;
219
        badvaddr = istate->pc;
220
       
220
       
221
    page_table_lock(as, lock);
221
    page_table_lock(as, lock);
222
   
222
   
223
    pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc);
223
    pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc);
224
    if (!pte) {
224
    if (!pte) {
225
        switch (pfrc) {
225
        switch (pfrc) {
226
            case AS_PF_FAULT:
226
            case AS_PF_FAULT:
227
                goto fail;
227
                goto fail;
228
                break;
228
                break;
229
            case AS_PF_DEFER:
229
            case AS_PF_DEFER:
230
                /*
230
                /*
231
                 * The page fault came during copy_from_uspace()
231
                 * The page fault came during copy_from_uspace()
232
                 * or copy_to_uspace().
232
                 * or copy_to_uspace().
233
                 */
233
                 */
234
                page_table_unlock(as, lock);
234
                page_table_unlock(as, lock);
235
                return;
235
                return;
236
            default:
236
            default:
237
                panic("Unexpected pfrc (%d)\n", pfrc);
237
                panic("Unexpected pfrc (%d)\n", pfrc);
238
        }
238
        }
239
    }
239
    }
240
   
240
   
241
    pte->a = 1; /* Record access to PTE */
241
    pte->a = 1; /* Record access to PTE */
242
    pht_insert(badvaddr, pte->pfn);
242
    pht_insert(badvaddr, pte->pfn);
243
   
243
   
244
    page_table_unlock(as, lock);
244
    page_table_unlock(as, lock);
245
    return;
245
    return;
246
   
246
   
247
fail:
247
fail:
248
    page_table_unlock(as, lock);
248
    page_table_unlock(as, lock);
249
    pht_refill_fail(badvaddr, istate);
249
    pht_refill_fail(badvaddr, istate);
250
}
250
}
251
 
251
 
252
 
252
 
253
void pht_init(void)
253
void pht_init(void)
254
{
254
{
255
    memsetb((__address) phte, 1 << PHT_BITS, 0);
255
    memsetb((uintptr_t) phte, 1 << PHT_BITS, 0);
256
}
256
}
257
 
257
 
258
 
258
 
259
void page_arch_init(void)
259
void page_arch_init(void)
260
{
260
{
261
    if (config.cpu_active == 1) {
261
    if (config.cpu_active == 1) {
262
        page_mapping_operations = &pt_mapping_operations;
262
        page_mapping_operations = &pt_mapping_operations;
263
       
263
       
264
        __address cur;
264
        uintptr_t cur;
265
        int flags;
265
        int flags;
266
       
266
       
267
        /* Frames below 128 MB are mapped using BAT,
267
        /* Frames below 128 MB are mapped using BAT,
268
           map rest of the physical memory */
268
           map rest of the physical memory */
269
        for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
269
        for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
270
            flags = PAGE_CACHEABLE;
270
            flags = PAGE_CACHEABLE;
271
            if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
271
            if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
272
                flags |= PAGE_GLOBAL;
272
                flags |= PAGE_GLOBAL;
273
            page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
273
            page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
274
        }
274
        }
275
       
275
       
276
        /* Allocate page hash table */
276
        /* Allocate page hash table */
277
        phte_t *physical_phte = (phte_t *) frame_alloc(PHT_ORDER, FRAME_KA | FRAME_ATOMIC);
277
        phte_t *physical_phte = (phte_t *) frame_alloc(PHT_ORDER, FRAME_KA | FRAME_ATOMIC);
278
       
278
       
279
        ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0);
279
        ASSERT((uintptr_t) physical_phte % (1 << PHT_BITS) == 0);
280
        pht_init();
280
        pht_init();
281
       
281
       
282
        asm volatile (
282
        asm volatile (
283
            "mtsdr1 %0\n"
283
            "mtsdr1 %0\n"
284
            :
284
            :
285
            : "r" ((__address) physical_phte)
285
            : "r" ((uintptr_t) physical_phte)
286
        );
286
        );
287
    }
287
    }
288
}
288
}
289
 
289
 
290
 
290
 
291
__address hw_map(__address physaddr, size_t size)
291
uintptr_t hw_map(uintptr_t physaddr, size_t size)
292
{
292
{
293
    if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
293
    if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
294
        panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
294
        panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
295
   
295
   
296
    __address virtaddr = PA2KA(last_frame);
296
    uintptr_t virtaddr = PA2KA(last_frame);
297
    pfn_t i;
297
    pfn_t i;
298
    for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
298
    for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
299
        page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
299
        page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
300
   
300
   
301
    last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
301
    last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
302
   
302
   
303
    return virtaddr;
303
    return virtaddr;
304
}
304
}
305
 
305
 
306
 /** @}
306
 /** @}
307
 */
307
 */
308
 
308
 
309
 
309