Subversion Repositories HelenOS-historic

Rev

Rev 822 | Rev 840 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 822 Rev 823
Line 54... Line 54...
54
#include <arch.h>
54
#include <arch.h>
55
#include <print.h>
55
#include <print.h>
56
 
56
 
57
as_operations_t *as_operations = NULL;
57
as_operations_t *as_operations = NULL;
58
 
58
 
-
 
59
/** Address space lock. It protects inactive_as_with_asid_head. */
-
 
60
SPINLOCK_INITIALIZE(as_lock);
-
 
61
 
-
 
62
/**
-
 
63
 * This list contains address spaces that are not active on any
-
 
64
 * processor and that have valid ASID.
-
 
65
 */
-
 
66
LIST_INITIALIZE(inactive_as_with_asid_head);
-
 
67
 
59
/** Kernel address space. */
68
/** Kernel address space. */
60
as_t *AS_KERNEL = NULL;
69
as_t *AS_KERNEL = NULL;
61
 
70
 
62
static int get_area_flags(as_area_t *a);
71
static int get_area_flags(as_area_t *a);
63
 
72
 
Line 77... Line 86...
77
as_t *as_create(int flags)
86
as_t *as_create(int flags)
78
{
87
{
79
    as_t *as;
88
    as_t *as;
80
 
89
 
81
    as = (as_t *) malloc(sizeof(as_t), 0);
90
    as = (as_t *) malloc(sizeof(as_t), 0);
82
 
-
 
83
    list_initialize(&as->as_with_asid_link);
91
    link_initialize(&as->inactive_as_with_asid_link);
84
    spinlock_initialize(&as->lock, "as_lock");
92
    spinlock_initialize(&as->lock, "as_lock");
85
    list_initialize(&as->as_area_head);
93
    list_initialize(&as->as_area_head);
86
   
94
   
87
    if (flags & FLAG_AS_KERNEL)
95
    if (flags & FLAG_AS_KERNEL)
88
        as->asid = ASID_KERNEL;
96
        as->asid = ASID_KERNEL;
89
    else
97
    else
90
        as->asid = ASID_INVALID;
98
        as->asid = ASID_INVALID;
91
   
99
   
-
 
100
    as->refcount = 0;
92
    as->page_table = page_table_create(flags);
101
    as->page_table = page_table_create(flags);
93
 
102
 
94
    return as;
103
    return as;
95
}
104
}
96
 
105
 
Line 265... Line 274...
265
    spinlock_unlock(&AS->lock);
274
    spinlock_unlock(&AS->lock);
266
 
275
 
267
    return 1;
276
    return 1;
268
}
277
}
269
 
278
 
270
/** Install address space on CPU.
279
/** Switch address spaces.
271
 *
280
 *
-
 
281
 * @param old Old address space or NULL.
272
 * @param as Address space.
282
 * @param new New address space.
273
 */
283
 */
274
void as_install(as_t *as)
284
void as_switch(as_t *old, as_t *new)
275
{
285
{
276
    ipl_t ipl;
286
    ipl_t ipl;
277
   
-
 
278
    asid_install(as);
287
    bool needs_asid = false;
279
   
288
   
280
    ipl = interrupts_disable();
289
    ipl = interrupts_disable();
281
    spinlock_lock(&as->lock);
290
    spinlock_lock(&as_lock);
-
 
291
 
-
 
292
    /*
-
 
293
     * First, take care of the old address space.
-
 
294
     */
-
 
295
    if (old) {
-
 
296
        spinlock_lock(&old->lock);
282
    SET_PTL0_ADDRESS(as->page_table);
297
        ASSERT(old->refcount);
-
 
298
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
-
 
299
            /*
-
 
300
             * The old address space is no longer active on
-
 
301
             * any processor. It can be appended to the
-
 
302
             * list of inactive address spaces with assigned
-
 
303
             * ASID.
-
 
304
             */
-
 
305
             ASSERT(old->asid != ASID_INVALID);
-
 
306
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
-
 
307
        }
283
    spinlock_unlock(&as->lock);
308
        spinlock_unlock(&old->lock);
-
 
309
    }
-
 
310
 
-
 
311
    /*
-
 
312
     * Second, prepare the new address space.
-
 
313
     */
284
    interrupts_restore(ipl);
314
    spinlock_lock(&new->lock);
-
 
315
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
-
 
316
        if (new->asid != ASID_INVALID)
-
 
317
            list_remove(&new->inactive_as_with_asid_link);
-
 
318
        else
-
 
319
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
-
 
320
    }
-
 
321
    SET_PTL0_ADDRESS(new->page_table);
-
 
322
    spinlock_unlock(&new->lock);
285
 
323
 
-
 
324
    if (needs_asid) {
-
 
325
        /*
-
 
326
         * Allocation of new ASID was deferred
-
 
327
         * until now in order to avoid deadlock.
-
 
328
         */
-
 
329
        asid_t asid;
-
 
330
       
-
 
331
        asid = asid_get();
-
 
332
        spinlock_lock(&new->lock);
-
 
333
        new->asid = asid;
-
 
334
        spinlock_unlock(&new->lock);
-
 
335
    }
-
 
336
    spinlock_unlock(&as_lock);
-
 
337
    interrupts_restore(ipl);
-
 
338
   
286
    /*
339
    /*
287
     * Perform architecture-specific steps.
340
     * Perform architecture-specific steps.
288
     * (e.g. write ASID to hardware register etc.)
341
     * (e.g. write ASID to hardware register etc.)
289
     */
342
     */
290
    as_install_arch(as);
343
    as_install_arch(new);
291
   
344
   
292
    AS = as;
345
    AS = new;
293
}
346
}
294
 
347
 
295
/** Compute flags for virtual address translation subsytem.
348
/** Compute flags for virtual address translation subsytem.
296
 *
349
 *
297
 * The address space area must be locked.
350
 * The address space area must be locked.