Subversion Repositories HelenOS-historic

Rev

Rev 1233 | Rev 1236 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/*
30
 * This file contains address space manipulation functions.
31
 * Roughly speaking, this is a higher-level client of
32
 * Virtual Address Translation (VAT) subsystem.
33
 */
34
 
35
#include <mm/as.h>
756 jermar 36
#include <arch/mm/as.h>
703 jermar 37
#include <mm/page.h>
38
#include <mm/frame.h>
814 palkovsky 39
#include <mm/slab.h>
703 jermar 40
#include <mm/tlb.h>
41
#include <arch/mm/page.h>
42
#include <genarch/mm/page_pt.h>
1108 jermar 43
#include <genarch/mm/page_ht.h>
727 jermar 44
#include <mm/asid.h>
703 jermar 45
#include <arch/mm/asid.h>
46
#include <synch/spinlock.h>
788 jermar 47
#include <adt/list.h>
1147 jermar 48
#include <adt/btree.h>
1235 jermar 49
#include <proc/task.h>
50
#include <arch/asm.h>
703 jermar 51
#include <panic.h>
52
#include <debug.h>
1235 jermar 53
#include <print.h>
703 jermar 54
#include <memstr.h>
1070 jermar 55
#include <macros.h>
703 jermar 56
#include <arch.h>
1235 jermar 57
#include <errno.h>
58
#include <config.h>
59
#include <arch/types.h>
60
#include <typedefs.h>
703 jermar 61
 
756 jermar 62
as_operations_t *as_operations = NULL;
703 jermar 63
 
823 jermar 64
/** Address space lock. It protects inactive_as_with_asid_head. */
65
SPINLOCK_INITIALIZE(as_lock);
66
 
67
/**
68
 * This list contains address spaces that are not active on any
69
 * processor and that have valid ASID.
70
 */
71
LIST_INITIALIZE(inactive_as_with_asid_head);
72
 
757 jermar 73
/** Kernel address space. */
74
as_t *AS_KERNEL = NULL;
75
 
1235 jermar 76
static int area_flags_to_page_flags(int aflags);
754 jermar 77
static int get_area_flags(as_area_t *a);
977 jermar 78
static as_area_t *find_area_and_lock(as_t *as, __address va);
1048 jermar 79
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
703 jermar 80
 
756 jermar 81
/** Initialize address space subsystem. */
82
void as_init(void)
83
{
84
    as_arch_init();
789 palkovsky 85
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
756 jermar 86
        if (!AS_KERNEL)
87
                panic("can't create kernel address space\n");
88
}
89
 
757 jermar 90
/** Create address space.
91
 *
92
 * @param flags Flags that influence way in wich the address space is created.
93
 */
756 jermar 94
as_t *as_create(int flags)
703 jermar 95
{
96
    as_t *as;
97
 
822 palkovsky 98
    as = (as_t *) malloc(sizeof(as_t), 0);
823 jermar 99
    link_initialize(&as->inactive_as_with_asid_link);
822 palkovsky 100
    spinlock_initialize(&as->lock, "as_lock");
1147 jermar 101
    btree_create(&as->as_area_btree);
822 palkovsky 102
 
103
    if (flags & FLAG_AS_KERNEL)
104
        as->asid = ASID_KERNEL;
105
    else
106
        as->asid = ASID_INVALID;
107
 
823 jermar 108
    as->refcount = 0;
822 palkovsky 109
    as->page_table = page_table_create(flags);
703 jermar 110
 
111
    return as;
112
}
113
 
973 palkovsky 114
/** Free Adress space */
115
void as_free(as_t *as)
116
{
117
    ASSERT(as->refcount == 0);
118
 
119
    /* TODO: free as_areas and other resources held by as */
120
    /* TODO: free page table */
121
    free(as);
122
}
123
 
703 jermar 124
/** Create address space area of common attributes.
125
 *
126
 * The created address space area is added to the target address space.
127
 *
128
 * @param as Target address space.
1026 jermar 129
 * @param flags Flags of the area.
1048 jermar 130
 * @param size Size of area.
703 jermar 131
 * @param base Base address of area.
132
 *
133
 * @return Address space area on success or NULL on failure.
134
 */
1026 jermar 135
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
703 jermar 136
{
137
    ipl_t ipl;
138
    as_area_t *a;
139
 
140
    if (base % PAGE_SIZE)
1048 jermar 141
        return NULL;
142
 
1233 jermar 143
    if (!size)
144
        return NULL;
145
 
1048 jermar 146
    /* Writeable executable areas are not supported. */
147
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
148
        return NULL;
703 jermar 149
 
150
    ipl = interrupts_disable();
151
    spinlock_lock(&as->lock);
152
 
1048 jermar 153
    if (!check_area_conflicts(as, base, size, NULL)) {
154
        spinlock_unlock(&as->lock);
155
        interrupts_restore(ipl);
156
        return NULL;
157
    }
703 jermar 158
 
822 palkovsky 159
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 160
 
822 palkovsky 161
    spinlock_initialize(&a->lock, "as_area_lock");
162
 
1026 jermar 163
    a->flags = flags;
1048 jermar 164
    a->pages = SIZE2FRAMES(size);
822 palkovsky 165
    a->base = base;
166
 
1147 jermar 167
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 168
 
703 jermar 169
    spinlock_unlock(&as->lock);
170
    interrupts_restore(ipl);
704 jermar 171
 
703 jermar 172
    return a;
173
}
174
 
1235 jermar 175
/** Find address space area and change it.
176
 *
177
 * @param as Address space.
178
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
179
 * @param size New size of the virtual memory block starting at address.
180
 * @param flags Flags influencing the remap operation. Currently unused.
181
 *
182
 * @return address on success, (__address) -1 otherwise.
183
 */
184
__address as_area_resize(as_t *as, __address address, size_t size, int flags)
185
{
186
    as_area_t *area = NULL;
187
    ipl_t ipl;
188
    size_t pages;
189
 
190
    ipl = interrupts_disable();
191
    spinlock_lock(&as->lock);
192
 
193
    /*
194
     * Locate the area.
195
     */
196
    area = find_area_and_lock(as, address);
197
    if (!area) {
198
        spinlock_unlock(&as->lock);
199
        interrupts_restore(ipl);
200
        return (__address) -1;
201
    }
202
 
203
    if (area->flags & AS_AREA_DEVICE) {
204
        /*
205
         * Remapping of address space areas associated
206
         * with memory mapped devices is not supported.
207
         */
208
        spinlock_unlock(&area->lock);
209
        spinlock_unlock(&as->lock);
210
        interrupts_restore(ipl);
211
        return (__address) -1;
212
    }
213
 
214
    pages = SIZE2FRAMES((address - area->base) + size);
215
    if (!pages) {
216
        /*
217
         * Zero size address space areas are not allowed.
218
         */
219
        spinlock_unlock(&area->lock);
220
        spinlock_unlock(&as->lock);
221
        interrupts_restore(ipl);
222
        return (__address) -1;
223
    }
224
 
225
    if (pages < area->pages) {
226
        int i;
227
 
228
        /*
229
         * Shrinking the area.
230
         * No need to check for overlaps.
231
         */
232
        for (i = pages; i < area->pages; i++) {
233
            pte_t *pte;
234
 
235
            /*
236
             * Releasing physical memory.
237
             * This depends on the fact that the memory was allocated using frame_alloc().
238
             */
239
            page_table_lock(as, false);
240
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
241
            if (pte && PTE_VALID(pte)) {
242
                __address frame;
243
 
244
                ASSERT(PTE_PRESENT(pte));
245
                frame = PTE_GET_FRAME(pte);
246
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
247
                page_table_unlock(as, false);
248
 
249
                frame_free(ADDR2PFN(frame));
250
            } else {
251
                page_table_unlock(as, false);
252
            }
253
        }
254
        /*
255
         * Invalidate TLB's.
256
         */
257
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
258
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
259
        tlb_shootdown_finalize();
260
    } else {
261
        /*
262
         * Growing the area.
263
         * Check for overlaps with other address space areas.
264
         */
265
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
266
            spinlock_unlock(&area->lock);
267
            spinlock_unlock(&as->lock);    
268
            interrupts_restore(ipl);
269
            return (__address) -1;
270
        }
271
    }
272
 
273
    area->pages = pages;
274
 
275
    spinlock_unlock(&area->lock);
276
    spinlock_unlock(&as->lock);
277
    interrupts_restore(ipl);
278
 
279
    return address;
280
}
281
 
282
/** Send address space area to another task.
283
 *
284
 * Address space area is sent to the specified task.
285
 * If the destination task is willing to accept the
286
 * area, a new area is created according to the
287
 * source area. Moreover, any existing mapping
288
 * is copied as well, providing thus a mechanism
289
 * for sharing group of pages. The source address
290
 * space area and any associated mapping is preserved.
291
 *
292
 * @param id Task ID of the accepting task.
293
 * @param base Base address of the source address space area.
294
 * @param size Size of the source address space area.
295
 * @param flags Flags of the source address space area.
296
 *
297
 * @return 0 on success or ENOENT if there is no such task or
298
 *     if there is no such address space area,
299
 *     EPERM if there was a problem in accepting the area or
300
 *     ENOMEM if there was a problem in allocating destination
301
 *     address space area.
302
 */
303
int as_area_send(task_id_t id, __address base, size_t size, int flags)
304
{
305
    ipl_t ipl;
306
    task_t *t;
307
    count_t i;
308
    as_t *as;
309
    __address dst_base;
310
 
311
    ipl = interrupts_disable();
312
    spinlock_lock(&tasks_lock);
313
 
314
    t = task_find_by_id(id);
315
    if (!NULL) {
316
        spinlock_unlock(&tasks_lock);
317
        interrupts_restore(ipl);
318
        return ENOENT;
319
    }
320
 
321
    spinlock_lock(&t->lock);
322
    spinlock_unlock(&tasks_lock);
323
 
324
    as = t->as;
325
    dst_base = (__address) t->accept_arg.base;
326
 
327
    if (as == AS) {
328
        /*
329
         * The two tasks share the entire address space.
330
         * Return error since there is no point in continuing.
331
         */
332
        spinlock_unlock(&t->lock);
333
        interrupts_restore(ipl);
334
        return EPERM;
335
    }
336
 
337
    if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != size) ||
338
        (t->accept_arg.flags != flags)) {
339
        /*
340
         * Discrepancy in either task ID, size or flags.
341
         */
342
        spinlock_unlock(&t->lock);
343
        interrupts_restore(ipl);
344
        return EPERM;
345
    }
346
 
347
    /*
348
     * Create copy of the address space area.
349
     */
350
    if (!as_area_create(as, flags, size, dst_base)) {
351
        /*
352
         * Destination address space area could not be created.
353
         */
354
        spinlock_unlock(&t->lock);
355
        interrupts_restore(ipl);
356
        return ENOMEM;
357
    }
358
 
359
    /*
360
     * NOTE: we have just introduced a race condition.
361
     * The destination task can try to attempt the newly
362
     * created area before its mapping is copied from
363
     * the source address space area. In result, frames
364
     * can get lost.
365
     *
366
     * Currently, this race is not solved, but one of the
367
     * possible solutions would be to sleep in as_page_fault()
368
     * when this situation is detected.
369
     */
370
 
371
    memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0);
372
    spinlock_unlock(&t->lock);
373
 
374
    /*
375
     * Avoid deadlock by first locking the address space with lower address.
376
     */
377
    if (as < AS) {
378
        spinlock_lock(&as->lock);
379
        spinlock_lock(&AS->lock);
380
    } else {
381
        spinlock_lock(&AS->lock);
382
        spinlock_lock(&as->lock);
383
    }
384
 
385
    for (i = 0; i < SIZE2FRAMES(size); i++) {
386
        pte_t *pte;
387
        __address frame;
388
 
389
        page_table_lock(AS, false);
390
        pte = page_mapping_find(AS, base + i*PAGE_SIZE);
391
        if (pte && PTE_VALID(pte)) {
392
            ASSERT(PTE_PRESENT(pte));
393
            frame = PTE_GET_FRAME(pte);
394
            if (!(flags & AS_AREA_DEVICE)) {
395
                /* TODO: increment frame reference count */
396
            }
397
            page_table_unlock(AS, false);
398
        } else {
399
            page_table_unlock(AS, false);
400
            continue;
401
        }
402
 
403
        page_table_lock(as, false);
404
        page_mapping_insert(as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(flags));
405
        page_table_unlock(as, false);
406
    }
407
 
408
    spinlock_unlock(&AS->lock);
409
    spinlock_unlock(&as->lock);
410
    interrupts_restore(ipl);
411
 
412
    return 0;
413
}
414
 
754 jermar 415
/** Initialize mapping for one page of address space.
703 jermar 416
 *
754 jermar 417
 * This functions maps 'page' to 'frame' according
418
 * to attributes of the address space area to
419
 * wich 'page' belongs.
703 jermar 420
 *
840 jermar 421
 * @param as Target address space.
754 jermar 422
 * @param page Virtual page within the area.
423
 * @param frame Physical frame to which page will be mapped.
703 jermar 424
 */
754 jermar 425
void as_set_mapping(as_t *as, __address page, __address frame)
703 jermar 426
{
977 jermar 427
    as_area_t *area;
703 jermar 428
    ipl_t ipl;
429
 
430
    ipl = interrupts_disable();
1044 jermar 431
    page_table_lock(as, true);
703 jermar 432
 
977 jermar 433
    area = find_area_and_lock(as, page);
754 jermar 434
    if (!area) {
435
        panic("page not part of any as_area\n");
436
    }
437
 
756 jermar 438
    page_mapping_insert(as, page, frame, get_area_flags(area));
754 jermar 439
 
440
    spinlock_unlock(&area->lock);
1044 jermar 441
    page_table_unlock(as, true);
703 jermar 442
    interrupts_restore(ipl);
443
}
444
 
445
/** Handle page fault within the current address space.
446
 *
447
 * This is the high-level page fault handler.
448
 * Interrupts are assumed disabled.
449
 *
450
 * @param page Faulting page.
451
 *
704 jermar 452
 * @return 0 on page fault, 1 on success.
703 jermar 453
 */
454
int as_page_fault(__address page)
455
{
1044 jermar 456
    pte_t *pte;
977 jermar 457
    as_area_t *area;
703 jermar 458
    __address frame;
459
 
460
    ASSERT(AS);
1044 jermar 461
 
703 jermar 462
    spinlock_lock(&AS->lock);
977 jermar 463
    area = find_area_and_lock(AS, page);   
703 jermar 464
    if (!area) {
465
        /*
466
         * No area contained mapping for 'page'.
467
         * Signal page fault to low-level handler.
468
         */
469
        spinlock_unlock(&AS->lock);
470
        return 0;
471
    }
472
 
1179 jermar 473
    ASSERT(!(area->flags & AS_AREA_DEVICE));
474
 
1044 jermar 475
    page_table_lock(AS, false);
476
 
703 jermar 477
    /*
1044 jermar 478
     * To avoid race condition between two page faults
479
     * on the same address, we need to make sure
480
     * the mapping has not been already inserted.
481
     */
482
    if ((pte = page_mapping_find(AS, page))) {
483
        if (PTE_PRESENT(pte)) {
484
            page_table_unlock(AS, false);
485
            spinlock_unlock(&area->lock);
486
            spinlock_unlock(&AS->lock);
487
            return 1;
488
        }
489
    }
490
 
491
    /*
754 jermar 492
     * In general, there can be several reasons that
493
     * can have caused this fault.
494
     *
495
     * - non-existent mapping: the area is a scratch
496
     *   area (e.g. stack) and so far has not been
497
     *   allocated a frame for the faulting page
498
     *
499
     * - non-present mapping: another possibility,
500
     *   currently not implemented, would be frame
501
     *   reuse; when this becomes a possibility,
502
     *   do not forget to distinguish between
503
     *   the different causes
703 jermar 504
     */
814 palkovsky 505
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
754 jermar 506
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
703 jermar 507
 
508
    /*
509
     * Map 'page' to 'frame'.
510
     * Note that TLB shootdown is not attempted as only new information is being
511
     * inserted into page tables.
512
     */
756 jermar 513
    page_mapping_insert(AS, page, frame, get_area_flags(area));
1044 jermar 514
    page_table_unlock(AS, false);
703 jermar 515
 
516
    spinlock_unlock(&area->lock);
517
    spinlock_unlock(&AS->lock);
518
    return 1;
519
}
520
 
823 jermar 521
/** Switch address spaces.
703 jermar 522
 *
823 jermar 523
 * @param old Old address space or NULL.
524
 * @param new New address space.
703 jermar 525
 */
823 jermar 526
void as_switch(as_t *old, as_t *new)
703 jermar 527
{
528
    ipl_t ipl;
823 jermar 529
    bool needs_asid = false;
703 jermar 530
 
531
    ipl = interrupts_disable();
823 jermar 532
    spinlock_lock(&as_lock);
703 jermar 533
 
534
    /*
823 jermar 535
     * First, take care of the old address space.
536
     */
537
    if (old) {
538
        spinlock_lock(&old->lock);
539
        ASSERT(old->refcount);
540
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
541
            /*
542
             * The old address space is no longer active on
543
             * any processor. It can be appended to the
544
             * list of inactive address spaces with assigned
545
             * ASID.
546
             */
547
             ASSERT(old->asid != ASID_INVALID);
548
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
549
        }
550
        spinlock_unlock(&old->lock);
551
    }
552
 
553
    /*
554
     * Second, prepare the new address space.
555
     */
556
    spinlock_lock(&new->lock);
557
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
558
        if (new->asid != ASID_INVALID)
559
            list_remove(&new->inactive_as_with_asid_link);
560
        else
561
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
562
    }
563
    SET_PTL0_ADDRESS(new->page_table);
564
    spinlock_unlock(&new->lock);
565
 
566
    if (needs_asid) {
567
        /*
568
         * Allocation of new ASID was deferred
569
         * until now in order to avoid deadlock.
570
         */
571
        asid_t asid;
572
 
573
        asid = asid_get();
574
        spinlock_lock(&new->lock);
575
        new->asid = asid;
576
        spinlock_unlock(&new->lock);
577
    }
578
    spinlock_unlock(&as_lock);
579
    interrupts_restore(ipl);
580
 
581
    /*
703 jermar 582
     * Perform architecture-specific steps.
727 jermar 583
     * (e.g. write ASID to hardware register etc.)
703 jermar 584
     */
823 jermar 585
    as_install_arch(new);
703 jermar 586
 
823 jermar 587
    AS = new;
703 jermar 588
}
754 jermar 589
 
1235 jermar 590
/** Convert address space area flags to page flags.
754 jermar 591
 *
1235 jermar 592
 * @param aflags Flags of some address space area.
754 jermar 593
 *
1235 jermar 594
 * @return Flags to be passed to page_mapping_insert().
754 jermar 595
 */
1235 jermar 596
int area_flags_to_page_flags(int aflags)
754 jermar 597
{
598
    int flags;
599
 
1178 jermar 600
    flags = PAGE_USER | PAGE_PRESENT;
754 jermar 601
 
1235 jermar 602
    if (aflags & AS_AREA_READ)
1026 jermar 603
        flags |= PAGE_READ;
604
 
1235 jermar 605
    if (aflags & AS_AREA_WRITE)
1026 jermar 606
        flags |= PAGE_WRITE;
607
 
1235 jermar 608
    if (aflags & AS_AREA_EXEC)
1026 jermar 609
        flags |= PAGE_EXEC;
610
 
1235 jermar 611
    if (!(aflags & AS_AREA_DEVICE))
1178 jermar 612
        flags |= PAGE_CACHEABLE;
613
 
754 jermar 614
    return flags;
615
}
756 jermar 616
 
1235 jermar 617
/** Compute flags for virtual address translation subsytem.
618
 *
619
 * The address space area must be locked.
620
 * Interrupts must be disabled.
621
 *
622
 * @param a Address space area.
623
 *
624
 * @return Flags to be used in page_mapping_insert().
625
 */
626
int get_area_flags(as_area_t *a)
627
{
628
    return area_flags_to_page_flags(a->flags);
629
}
630
 
756 jermar 631
/** Create page table.
632
 *
633
 * Depending on architecture, create either address space
634
 * private or global page table.
635
 *
636
 * @param flags Flags saying whether the page table is for kernel address space.
637
 *
638
 * @return First entry of the page table.
639
 */
640
pte_t *page_table_create(int flags)
641
{
642
        ASSERT(as_operations);
643
        ASSERT(as_operations->page_table_create);
644
 
645
        return as_operations->page_table_create(flags);
646
}
977 jermar 647
 
1044 jermar 648
/** Lock page table.
649
 *
650
 * This function should be called before any page_mapping_insert(),
651
 * page_mapping_remove() and page_mapping_find().
652
 *
653
 * Locking order is such that address space areas must be locked
654
 * prior to this call. Address space can be locked prior to this
655
 * call in which case the lock argument is false.
656
 *
657
 * @param as Address space.
658
 * @param as_locked If false, do not attempt to lock as->lock.
659
 */
660
void page_table_lock(as_t *as, bool lock)
661
{
662
    ASSERT(as_operations);
663
    ASSERT(as_operations->page_table_lock);
664
 
665
    as_operations->page_table_lock(as, lock);
666
}
667
 
668
/** Unlock page table.
669
 *
670
 * @param as Address space.
671
 * @param as_locked If false, do not attempt to unlock as->lock.
672
 */
673
void page_table_unlock(as_t *as, bool unlock)
674
{
675
    ASSERT(as_operations);
676
    ASSERT(as_operations->page_table_unlock);
677
 
678
    as_operations->page_table_unlock(as, unlock);
679
}
680
 
977 jermar 681
 
682
/** Find address space area and lock it.
683
 *
684
 * The address space must be locked and interrupts must be disabled.
685
 *
686
 * @param as Address space.
687
 * @param va Virtual address.
688
 *
689
 * @return Locked address space area containing va on success or NULL on failure.
690
 */
691
as_area_t *find_area_and_lock(as_t *as, __address va)
692
{
693
    as_area_t *a;
1147 jermar 694
    btree_node_t *leaf, *lnode;
695
    int i;
977 jermar 696
 
1147 jermar 697
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
698
    if (a) {
699
        /* va is the base address of an address space area */
977 jermar 700
        spinlock_lock(&a->lock);
1147 jermar 701
        return a;
702
    }
703
 
704
    /*
1150 jermar 705
     * Search the leaf node and the righmost record of its left neighbour
1147 jermar 706
     * to find out whether this is a miss or va belongs to an address
707
     * space area found there.
708
     */
709
 
710
    /* First, search the leaf node itself. */
711
    for (i = 0; i < leaf->keys; i++) {
712
        a = (as_area_t *) leaf->value[i];
713
        spinlock_lock(&a->lock);
714
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
715
            return a;
716
        }
717
        spinlock_unlock(&a->lock);
718
    }
977 jermar 719
 
1147 jermar 720
    /*
1150 jermar 721
     * Second, locate the left neighbour and test its last record.
1148 jermar 722
     * Because of its position in the B+tree, it must have base < va.
1147 jermar 723
     */
1150 jermar 724
    if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 725
        a = (as_area_t *) lnode->value[lnode->keys - 1];
726
        spinlock_lock(&a->lock);
727
        if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 728
            return a;
1147 jermar 729
        }
977 jermar 730
        spinlock_unlock(&a->lock);
731
    }
732
 
733
    return NULL;
734
}
1048 jermar 735
 
736
/** Check area conflicts with other areas.
737
 *
738
 * The address space must be locked and interrupts must be disabled.
739
 *
740
 * @param as Address space.
741
 * @param va Starting virtual address of the area being tested.
742
 * @param size Size of the area being tested.
743
 * @param avoid_area Do not touch this area.
744
 *
745
 * @return True if there is no conflict, false otherwise.
746
 */
747
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
748
{
749
    as_area_t *a;
1147 jermar 750
    btree_node_t *leaf, *node;
751
    int i;
1048 jermar 752
 
1070 jermar 753
    /*
754
     * We don't want any area to have conflicts with NULL page.
755
     */
756
    if (overlaps(va, size, NULL, PAGE_SIZE))
757
        return false;
758
 
1147 jermar 759
    /*
760
     * The leaf node is found in O(log n), where n is proportional to
761
     * the number of address space areas belonging to as.
762
     * The check for conflicts is then attempted on the rightmost
1150 jermar 763
     * record in the left neighbour, the leftmost record in the right
764
     * neighbour and all records in the leaf node itself.
1147 jermar 765
     */
1048 jermar 766
 
1147 jermar 767
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
768
        if (a != avoid_area)
769
            return false;
770
    }
771
 
772
    /* First, check the two border cases. */
1150 jermar 773
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 774
        a = (as_area_t *) node->value[node->keys - 1];
775
        spinlock_lock(&a->lock);
776
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
777
            spinlock_unlock(&a->lock);
778
            return false;
779
        }
780
        spinlock_unlock(&a->lock);
781
    }
1150 jermar 782
    if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 783
        a = (as_area_t *) node->value[0];
784
        spinlock_lock(&a->lock);
785
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
786
            spinlock_unlock(&a->lock);
787
            return false;
788
        }
789
        spinlock_unlock(&a->lock);
790
    }
791
 
792
    /* Second, check the leaf node. */
793
    for (i = 0; i < leaf->keys; i++) {
794
        a = (as_area_t *) leaf->value[i];
795
 
1048 jermar 796
        if (a == avoid_area)
797
            continue;
1147 jermar 798
 
1048 jermar 799
        spinlock_lock(&a->lock);
1147 jermar 800
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
801
            spinlock_unlock(&a->lock);
802
            return false;
803
        }
1048 jermar 804
        spinlock_unlock(&a->lock);
805
    }
806
 
1070 jermar 807
    /*
808
     * So far, the area does not conflict with other areas.
809
     * Check if it doesn't conflict with kernel address space.
810
     */  
811
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
812
        return !overlaps(va, size,
813
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
814
    }
815
 
1048 jermar 816
    return true;
817
}
1235 jermar 818
 
819
/*
820
 * Address space related syscalls.
821
 */
822
 
823
/** Wrapper for as_area_create(). */
824
__native sys_as_area_create(__address address, size_t size, int flags)
825
{
826
    if (as_area_create(AS, flags, size, address))
827
        return (__native) address;
828
    else
829
        return (__native) -1;
830
}
831
 
832
/** Wrapper for as_area_resize. */
833
__native sys_as_area_resize(__address address, size_t size, int flags)
834
{
835
    return as_area_resize(AS, address, size, 0);
836
}
837
 
838
/** Prepare task for accepting address space area from another task.
839
 *
840
 * @param uspace_accept_arg Accept structure passed from userspace.
841
 *
842
 * @return EPERM if the task ID encapsulated in @uspace_accept_arg references
843
 *     TASK. Otherwise zero is returned.
844
 */
845
__native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg)
846
{
847
    as_area_acptsnd_arg_t arg;
848
 
849
    copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t));
850
 
851
    if (!arg.size)
852
        return (__native) EPERM;
853
 
854
    if (arg.task_id == TASK->taskid) {
855
        /*
856
         * Accepting from itself is not allowed.
857
         */
858
        return (__native) EPERM;
859
    }
860
 
861
    memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t));
862
 
863
        return 0;
864
}
865
 
866
/** Wrapper for as_area_send. */
867
__native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg)
868
{
869
    as_area_acptsnd_arg_t arg;
870
 
871
    copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t));
872
 
873
    if (!arg.size)
874
        return (__native) EPERM;
875
 
876
    if (arg.task_id == TASK->taskid) {
877
        /*
878
         * Sending to itself is not allowed.
879
         */
880
        return (__native) EPERM;
881
    }
882
 
883
    return (__native) as_area_send(arg.task_id, (__address) arg.base, arg.size, arg.flags);
884
}