Subversion Repositories HelenOS

Rev

Rev 2277 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2277 Rev 2278
1
/*
1
/*
2
 * Copyright (c) 2007 Pavel Jancik, Michal Kebrt
2
 * Copyright (c) 2007 Pavel Jancik, Michal Kebrt
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup arm32mm
29
/** @addtogroup arm32mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/page.h>
35
#include <arch/mm/page.h>
36
#include <genarch/mm/page_pt.h>
36
#include <genarch/mm/page_pt.h>
37
#include <arch.h>
-
 
38
#include <mm/page.h>
37
#include <mm/page.h>
39
#include <align.h>
38
#include <align.h>
40
#include <config.h>
39
#include <config.h>
41
#include <arch/exception.h>
40
#include <arch/exception.h>
42
#include <typedefs.h>
41
#include <typedefs.h>
43
#include <arch/types.h>
42
#include <arch/types.h>
44
#include <interrupt.h>
43
#include <interrupt.h>
45
#include <arch/debug_print/print.h>
44
#include <arch/debug_print/print.h>
46
 
45
 
47
 
46
 
48
// localy used types
-
 
49
/**
-
 
50
 * Decribes structure of fault status register in coprocessor 15
-
 
51
 */
-
 
52
typedef struct {
-
 
53
        unsigned status              : 3;
-
 
54
        unsigned domain              : 4;
-
 
55
        unsigned zero            : 1;
-
 
56
        unsigned should_be_zero      : 24;
-
 
57
} __attribute__ ((packed)) fault_status_t;
-
 
58
 
-
 
59
/**
-
 
60
 * Help union used for overcasting integer value into fault_status_t type
-
 
61
 */
-
 
62
typedef union {
-
 
63
    fault_status_t  fsr;
-
 
64
    uint32_t    dummy;
-
 
65
} fault_status_union_t;
-
 
66
 
-
 
67
/**
-
 
68
 * Very simplyfied description of instruction code structure intended for
-
 
69
 * recognising memmory access of instruction ( reads or writes into memmory)
-
 
70
 * more details: see ARM architecture preference chapter:3.1 Instruction set encoding
-
 
71
 */
-
 
72
typedef struct {
-
 
73
        unsigned dummy1              : 4;
-
 
74
        unsigned bit4                : 1;
-
 
75
        unsigned bits567             : 3;
-
 
76
        unsigned dummy               : 12;
-
 
77
        unsigned access              : 1;
-
 
78
        unsigned opcode              : 4;
-
 
79
        unsigned instr_type          : 3;
-
 
80
        unsigned condition       : 4;
-
 
81
} __attribute__ ((packed)) instruction_t;
-
 
82
 
-
 
83
/**
-
 
84
 *  Help union used for overcasting ip register (uint_32_t) value into instruction_t pointer
-
 
85
 */
-
 
86
typedef union {
-
 
87
    instruction_t*  instr;
-
 
88
    uint32_t    ip;
-
 
89
} instruction_union_t;
-
 
90
 
-
 
91
// localy used functions
-
 
92
static fault_status_t read_fault_status_register();
-
 
93
static uintptr_t read_fault_address_register();
-
 
94
static pf_access_t get_memmory_access_type(uint32_t instr_addr, uintptr_t badvaddr);
-
 
95
 
-
 
96
 
-
 
97
/**
47
/**
98
 * Initializes kernel adress space page tables, sets abourts exceptions vectors
48
 * Initializes kernel adress space page tables, sets abourts exceptions vectors
99
 */
49
 */
100
void page_arch_init(void)
50
void page_arch_init(void)
101
{
51
{
102
    uintptr_t cur;
52
    uintptr_t cur;
103
    int flags;
53
    int flags;
104
 
54
 
105
    page_mapping_operations = &pt_mapping_operations;
55
    page_mapping_operations = &pt_mapping_operations;
106
 
56
 
107
    flags = PAGE_CACHEABLE;
57
    flags = PAGE_CACHEABLE;
108
 
58
 
109
    /* PA2KA(identity) mapping for all frames until last_frame */
59
    /* PA2KA(identity) mapping for all frames until last_frame */
110
    for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
60
    for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
111
        page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
61
        page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
112
    }
62
    }
113
   
63
   
114
    // Create mapping for exception table at high offcet
64
    // Create mapping for exception table at high offcet
115
    #ifdef HIGH_EXCEPTION_VECTORS
65
    #ifdef HIGH_EXCEPTION_VECTORS
116
    /* Note: this mapping cann't be done by hw_map because fixed exception
66
    /* Note: this mapping cann't be done by hw_map because fixed exception
117
       vector is stored at fixed virtual address
67
       vector is stored at fixed virtual address
118
     */
68
     */
119
    // reserve frame for exception table
69
    // reserve frame for exception table
120
    void* virtaddr = frame_alloc( ONE_FRAME ,FRAME_KA);
70
    void* virtaddr = frame_alloc( ONE_FRAME ,FRAME_KA);
121
    page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, KA2PA(virtaddr), flags );
71
    page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, KA2PA(virtaddr), flags );
122
    #endif
72
    #endif
123
 
73
 
124
    // TODO: move to the kernel space
74
    // TODO: move to the kernel space
125
//  page_mapping_insert(AS_KERNEL, 0xffff0000, 0x00000000, flags);
75
//  page_mapping_insert(AS_KERNEL, 0xffff0000, 0x00000000, flags);
126
    // TODO: remove when aux_printf not needed
76
    // TODO: remove when aux_printf not needed
127
    page_mapping_insert(AS_KERNEL, 0x10000000, 0x10000000, flags);
77
    page_mapping_insert(AS_KERNEL, 0x10000000, 0x10000000, flags);
128
 
78
 
129
 
79
 
130
    as_switch(NULL, AS_KERNEL);
80
    as_switch(NULL, AS_KERNEL);
131
 
81
 
132
}
82
}
133
 
83
 
134
/**
84
/**
135
 * Map device into kernel space.
85
 * Map device into kernel space.
136
 *
86
 *
137
 * This function adds mapping of physical address that is read/write only
87
 * This function adds mapping of physical address that is read/write only
138
 *  from kernel and not bufferable.
88
 *  from kernel and not bufferable.
139
 *
89
 *
140
 * \param physaddr Physical addres where device is connected
90
 * \param physaddr Physical addres where device is connected
141
 * \param size Length of area where device is present
91
 * \param size Length of area where device is present
142
 * \return Virtual address where device will be accessable
92
 * \return Virtual address where device will be accessable
143
 * Note: This is copy of IA32 hw_map code
93
 * Note: This is copy of IA32 hw_map code
144
 */
94
 */
145
uintptr_t hw_map(uintptr_t physaddr, size_t size)
95
uintptr_t hw_map(uintptr_t physaddr, size_t size)
146
{
96
{
147
    if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
97
    if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
148
        panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
98
        panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
149
 
99
 
150
    uintptr_t virtaddr = PA2KA(last_frame);
100
    uintptr_t virtaddr = PA2KA(last_frame);
151
    pfn_t i;
101
    pfn_t i;
152
    for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
102
    for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
153
        page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_READ | PAGE_WRITE | PAGE_KERNEL);
103
        page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_READ | PAGE_WRITE | PAGE_KERNEL);
154
 
104
 
155
    last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
105
    last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
156
 
106
 
157
    return virtaddr;
107
    return virtaddr;
158
}
108
}
159
 
109
 
160
//TODO: remove in final version
-
 
161
static void print_istate(istate_t* istate);
-
 
162
static void print_istate(istate_t* istate) {
-
 
163
 dprintf("\nIstate dump:\n");
-
 
164
 dprintf("    r0:%X    r1:%X    r2:%X    r3:%X\n", istate->r0,  istate->r1, istate->r2,  istate->r3);
-
 
165
 dprintf("    r4:%X    r5:%X    r6:%X    r7:%X\n", istate->r4,  istate->r5, istate->r6,  istate->r7);
-
 
166
 dprintf("    r8:%X    r8:%X   r10:%X   r11:%X\n", istate->r8,  istate->r9, istate->r10, istate->r11);
-
 
167
 dprintf("   r12:%X    sp:%X    lr:%X  spsr:%X\n", istate->r12, istate->sp, istate->lr,  istate->spsr);
-
 
168
}
-
 
169
 
-
 
170
/**
-
 
171
 * \return Value stored in fault status register
-
 
172
 */
-
 
173
static fault_status_t read_fault_status_register() {
-
 
174
        fault_status_union_t tmp;
-
 
175
        asm volatile (
-
 
176
        "mrc p15, 0, %0, c5, c0, 0"
-
 
177
            : "=r"(tmp.dummy)
-
 
178
    );
-
 
179
    return tmp.fsr;
-
 
180
}
-
 
181
 
-
 
182
/**
-
 
183
 * \return Virtual adress. Access on this addres caused exception
-
 
184
 */
-
 
185
static uintptr_t read_fault_address_register() {
-
 
186
        uintptr_t tmp;
-
 
187
    // Fault adress is stored in coprocessor15, register 6
-
 
188
    asm volatile (
-
 
189
        "mrc p15, 0, %0, c6, c0, 0"
-
 
190
        : "=r"(tmp)
-
 
191
    );
-
 
192
    return tmp;
-
 
193
};
-
 
194
 
-
 
195
/**
-
 
196
 * Decode instruction and decide if try to read or write into memmory.
-
 
197
 *
-
 
198
 * \param instr_addr address of instruction which attempts to access into memmory
-
 
199
 * \param badvaddr Virtual address on which instruction tries to access
-
 
200
 * \return type of access into memmory
-
 
201
 *  Note: return PF_ACESS_EXEC if no memmory acess
-
 
202
 */
-
 
203
//TODO: remove debug print in final version ... instead panic return PF_ACESS_EXEC
-
 
204
pf_access_t get_memmory_access_type(uint32_t instr_addr, uintptr_t badvaddr) {
-
 
205
        instruction_union_t tmp;
-
 
206
        tmp.ip = instr_addr;
-
 
207
    // get instruction op code
-
 
208
    instruction_t i_code = *(tmp.instr);
-
 
209
 
-
 
210
        dprintf("get_instruction_memmory_access\n");
-
 
211
    dprintf(" i_code:%X\n",i_code);
-
 
212
    dprintf(" i_code.condition:%d\n", i_code.condition);
-
 
213
    dprintf(" i_code.instr_type:%d\n",i_code.instr_type);
-
 
214
    dprintf(" i_code.opcode:%d\n",i_code.opcode);
-
 
215
    dprintf(" i_code.acess:%d\n", i_code.access);
-
 
216
    dprintf(" i_code.dummy:%d\n", i_code.dummy);
-
 
217
    dprintf(" i_code.bits567%d\n", i_code.bits567);
-
 
218
    dprintf(" i_code.bit4:%d\n", i_code.bit4);
-
 
219
    dprintf(" i_code.dummy1:%d\n", i_code.dummy1);
-
 
220
 
-
 
221
 
-
 
222
        // undefined instructions ... (or special instructions)
-
 
223
    if ( i_code.condition == 0xf ) {
-
 
224
        panic("page_fault - on instruction not acessing to memmory (instr_code:%X, badvaddr:%X)",i_code, badvaddr);
-
 
225
        return PF_ACCESS_EXEC;
-
 
226
    }
-
 
227
 
-
 
228
    // load store instructions
-
 
229
        if ( ( i_code.instr_type == 0x2 ) || // load store immediate offset
-
 
230
             ( i_code.instr_type == 0x3 && i_code.bit4 == 0) || // load store register offset
-
 
231
             ( i_code.instr_type == 0x4 ) || // load store multiple
-
 
232
         ( i_code.instr_type == 0x6 )    // coprocessor load / strore
-
 
233
       ) {
-
 
234
        if ( i_code.access == 1) {
-
 
235
            return PF_ACCESS_READ;
-
 
236
        } else {
-
 
237
            return PF_ACCESS_WRITE;
-
 
238
        }
-
 
239
    };
-
 
240
 
-
 
241
    // swap, swpb instruction
-
 
242
    if ( i_code.instr_type == 0x0 && (i_code.opcode == 0x8 || i_code.opcode == 0xA) &&
-
 
243
         i_code.access == 0x0 && i_code.bits567 == 0x4 && i_code.bit4 == 1 )
-
 
244
     {
-
 
245
        /* Swap instructions make read and write in one step.
-
 
246
         * Type of access that caused exception have to page tables and access rights.
-
 
247
         */
-
 
248
//TODO: ALF!!!!! cann't use AS as is define as THE->as and THE structure is sored after stack_base of current thread
-
 
249
//      but now ... in exception we have separate stacks <==> different stack_pointer ... so AS contains nonsence data
-
 
250
//  same case as_page_fault .... it's nessesary to solve "stack" problem
-
 
251
                pte_level1_t* pte = (pte_level1_t*)pt_mapping_operations.mapping_find(AS, badvaddr);
-
 
252
 
-
 
253
        ASSERT(pte);
-
 
254
 
-
 
255
                /* check if read possible
-
 
256
                 * Note: Don't check PTE_READABLE because it returns 1 everytimes */
-
 
257
        if ( !PTE_PRESENT(pte) ) {
-
 
258
                return PF_ACCESS_READ;
-
 
259
        }
-
 
260
        if ( !PTE_WRITABLE(pte) ) {
-
 
261
            return PF_ACCESS_WRITE;
-
 
262
        }
-
 
263
        else
-
 
264
            // badvaddr is present readable and writeable but error occured ... why?
-
 
265
            panic("page_fault - swap instruction, but address readable and writeable (instr_code:%X, badvaddr:%X)",i_code, badvaddr);
-
 
266
    }
-
 
267
    panic("page_fault - on instruction not acessing to memmory (instr_code:%X, badvaddr:%X)",i_code, badvaddr);
-
 
268
    return PF_ACCESS_EXEC;
-
 
269
}
-
 
270
 
-
 
271
/**
-
 
272
 * Routine that solves exception data_abourt
-
 
273
 *  ... you try to load or store value into invalid memmory address
-
 
274
 * \param istate State of CPU when data abourt occured
-
 
275
 * \param n number of exception
-
 
276
 */
-
 
277
//TODO: remove debug prints in final tested version
-
 
278
void data_abort(int n, istate_t *istate) {
-
 
279
        fault_status_t fsr = read_fault_status_register();
-
 
280
        uintptr_t  page = read_fault_address_register();
-
 
281
 
-
 
282
    pf_access_t access = get_memmory_access_type( istate->lr, page);
-
 
283
 
-
 
284
    print_istate(istate);
-
 
285
    dprintf(" page fault : ip:%X, va:%X, status:%x(%x), access:%d\n", istate->lr, page, fsr.status,fsr, access);
-
 
286
 
-
 
287
/* Alf: Will be commented until stack problem will be solved ...
-
 
288
    as_page_fault make consequent page faults
-
 
289
 
-
 
290
        int ret = as_page_fault(page, access, istate);
-
 
291
    dprintf(" as_page_fault ret:%d\n", ret);
-
 
292
        if (ret == AS_PF_FAULT) {
-
 
293
        fault_if_from_uspace(istate, "Page fault: %#x", page);
-
 
294
 
-
 
295
                panic("page fault\n");
-
 
296
        }
-
 
297
*/
-
 
298
    // TODO: Remove this ... now for testing purposes ... it's bad to test page faults in kernel, where no page faults should occures
-
 
299
    panic("page fault ... solved\n");
-
 
300
 
-
 
301
}
-
 
302
 
-
 
303
/**
-
 
304
 * Routine that solves exception prefetch_about
-
 
305
 *  ... you try to execute instruction on invalid address
-
 
306
 * \param istate State of CPU when prefetch abourt occured
-
 
307
 * \param n number of exception
-
 
308
 */
-
 
309
void prefetch_abort(int n, istate_t *istate) {
-
 
310
 // Prefetch can be made be bkpt instruction
-
 
311
    print_istate(istate);
-
 
312
    dprintf(" prefetch_abourt ... instruction on adress:%x can't be fetched\n", istate->lr);
-
 
313
 
-
 
314
/* Alf: Will be commented until stack problem will be solved ...
-
 
315
    as_page_fault make consequent page faults
-
 
316
 
-
 
317
    int ret = as_page_fault(istate->lr, PF_ACCESS_EXEC, istate);
-
 
318
    dprintf(" as_page_fault ret:%d\n", ret);
-
 
319
        if (ret == AS_PF_FAULT) {
-
 
320
                panic("page fault - instruction fetch at addr:%X\n", istate->lr);
-
 
321
        }
-
 
322
*/
-
 
323
 
-
 
324
    panic("Prefetch abourt ... solved");
-
 
325
}
-
 
326
 
110
 
327
/** @}
111
/** @}
328
 */
112
 */
329
 
113
 
330
 
114