Subversion Repositories HelenOS-historic

Rev

Rev 1233 | Rev 1236 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1233 Rev 1235
Line 41... Line 41...
41
#include <arch/mm/page.h>
41
#include <arch/mm/page.h>
42
#include <genarch/mm/page_pt.h>
42
#include <genarch/mm/page_pt.h>
43
#include <genarch/mm/page_ht.h>
43
#include <genarch/mm/page_ht.h>
44
#include <mm/asid.h>
44
#include <mm/asid.h>
45
#include <arch/mm/asid.h>
45
#include <arch/mm/asid.h>
46
#include <arch/types.h>
-
 
47
#include <typedefs.h>
-
 
48
#include <synch/spinlock.h>
46
#include <synch/spinlock.h>
49
#include <config.h>
-
 
50
#include <adt/list.h>
47
#include <adt/list.h>
51
#include <adt/btree.h>
48
#include <adt/btree.h>
52
#include <panic.h>
49
#include <proc/task.h>
53
#include <arch/asm.h>
50
#include <arch/asm.h>
-
 
51
#include <panic.h>
54
#include <debug.h>
52
#include <debug.h>
-
 
53
#include <print.h>
55
#include <memstr.h>
54
#include <memstr.h>
56
#include <macros.h>
55
#include <macros.h>
57
#include <arch.h>
56
#include <arch.h>
58
#include <print.h>
57
#include <errno.h>
-
 
58
#include <config.h>
-
 
59
#include <arch/types.h>
-
 
60
#include <typedefs.h>
59
 
61
 
60
as_operations_t *as_operations = NULL;
62
as_operations_t *as_operations = NULL;
61
 
63
 
62
/** Address space lock. It protects inactive_as_with_asid_head. */
64
/** Address space lock. It protects inactive_as_with_asid_head. */
63
SPINLOCK_INITIALIZE(as_lock);
65
SPINLOCK_INITIALIZE(as_lock);
Line 69... Line 71...
69
LIST_INITIALIZE(inactive_as_with_asid_head);
71
LIST_INITIALIZE(inactive_as_with_asid_head);
70
 
72
 
71
/** Kernel address space. */
73
/** Kernel address space. */
72
as_t *AS_KERNEL = NULL;
74
as_t *AS_KERNEL = NULL;
73
 
75
 
-
 
76
static int area_flags_to_page_flags(int aflags);
74
static int get_area_flags(as_area_t *a);
77
static int get_area_flags(as_area_t *a);
75
static as_area_t *find_area_and_lock(as_t *as, __address va);
78
static as_area_t *find_area_and_lock(as_t *as, __address va);
76
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
79
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
77
 
80
 
78
/** Initialize address space subsystem. */
81
/** Initialize address space subsystem. */
Line 167... Line 170...
167
    interrupts_restore(ipl);
170
    interrupts_restore(ipl);
168
 
171
 
169
    return a;
172
    return a;
170
}
173
}
171
 
174
 
-
 
175
/** Find address space area and change it.
-
 
176
 *
-
 
177
 * @param as Address space.
-
 
178
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
-
 
179
 * @param size New size of the virtual memory block starting at address.
-
 
180
 * @param flags Flags influencing the remap operation. Currently unused.
-
 
181
 *
-
 
182
 * @return address on success, (__address) -1 otherwise.
-
 
183
 */
-
 
184
__address as_area_resize(as_t *as, __address address, size_t size, int flags)
-
 
185
{
-
 
186
    as_area_t *area = NULL;
-
 
187
    ipl_t ipl;
-
 
188
    size_t pages;
-
 
189
   
-
 
190
    ipl = interrupts_disable();
-
 
191
    spinlock_lock(&as->lock);
-
 
192
   
-
 
193
    /*
-
 
194
     * Locate the area.
-
 
195
     */
-
 
196
    area = find_area_and_lock(as, address);
-
 
197
    if (!area) {
-
 
198
        spinlock_unlock(&as->lock);
-
 
199
        interrupts_restore(ipl);
-
 
200
        return (__address) -1;
-
 
201
    }
-
 
202
 
-
 
203
    if (area->flags & AS_AREA_DEVICE) {
-
 
204
        /*
-
 
205
         * Remapping of address space areas associated
-
 
206
         * with memory mapped devices is not supported.
-
 
207
         */
-
 
208
        spinlock_unlock(&area->lock);
-
 
209
        spinlock_unlock(&as->lock);
-
 
210
        interrupts_restore(ipl);
-
 
211
        return (__address) -1;
-
 
212
    }
-
 
213
 
-
 
214
    pages = SIZE2FRAMES((address - area->base) + size);
-
 
215
    if (!pages) {
-
 
216
        /*
-
 
217
         * Zero size address space areas are not allowed.
-
 
218
         */
-
 
219
        spinlock_unlock(&area->lock);
-
 
220
        spinlock_unlock(&as->lock);
-
 
221
        interrupts_restore(ipl);
-
 
222
        return (__address) -1;
-
 
223
    }
-
 
224
   
-
 
225
    if (pages < area->pages) {
-
 
226
        int i;
-
 
227
 
-
 
228
        /*
-
 
229
         * Shrinking the area.
-
 
230
         * No need to check for overlaps.
-
 
231
         */
-
 
232
        for (i = pages; i < area->pages; i++) {
-
 
233
            pte_t *pte;
-
 
234
           
-
 
235
            /*
-
 
236
             * Releasing physical memory.
-
 
237
             * This depends on the fact that the memory was allocated using frame_alloc().
-
 
238
             */
-
 
239
            page_table_lock(as, false);
-
 
240
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
-
 
241
            if (pte && PTE_VALID(pte)) {
-
 
242
                __address frame;
-
 
243
 
-
 
244
                ASSERT(PTE_PRESENT(pte));
-
 
245
                frame = PTE_GET_FRAME(pte);
-
 
246
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
-
 
247
                page_table_unlock(as, false);
-
 
248
 
-
 
249
                frame_free(ADDR2PFN(frame));
-
 
250
            } else {
-
 
251
                page_table_unlock(as, false);
-
 
252
            }
-
 
253
        }
-
 
254
        /*
-
 
255
         * Invalidate TLB's.
-
 
256
         */
-
 
257
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
-
 
258
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
-
 
259
        tlb_shootdown_finalize();
-
 
260
    } else {
-
 
261
        /*
-
 
262
         * Growing the area.
-
 
263
         * Check for overlaps with other address space areas.
-
 
264
         */
-
 
265
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
-
 
266
            spinlock_unlock(&area->lock);
-
 
267
            spinlock_unlock(&as->lock);    
-
 
268
            interrupts_restore(ipl);
-
 
269
            return (__address) -1;
-
 
270
        }
-
 
271
    }
-
 
272
 
-
 
273
    area->pages = pages;
-
 
274
   
-
 
275
    spinlock_unlock(&area->lock);
-
 
276
    spinlock_unlock(&as->lock);
-
 
277
    interrupts_restore(ipl);
-
 
278
 
-
 
279
    return address;
-
 
280
}
-
 
281
 
-
 
282
/** Send address space area to another task.
-
 
283
 *
-
 
284
 * Address space area is sent to the specified task.
-
 
285
 * If the destination task is willing to accept the
-
 
286
 * area, a new area is created according to the
-
 
287
 * source area. Moreover, any existing mapping
-
 
288
 * is copied as well, providing thus a mechanism
-
 
289
 * for sharing group of pages. The source address
-
 
290
 * space area and any associated mapping is preserved.
-
 
291
 *
-
 
292
 * @param id Task ID of the accepting task.
-
 
293
 * @param base Base address of the source address space area.
-
 
294
 * @param size Size of the source address space area.
-
 
295
 * @param flags Flags of the source address space area.
-
 
296
 *
-
 
297
 * @return 0 on success or ENOENT if there is no such task or
-
 
298
 *     if there is no such address space area,
-
 
299
 *     EPERM if there was a problem in accepting the area or
-
 
300
 *     ENOMEM if there was a problem in allocating destination
-
 
301
 *     address space area.
-
 
302
 */
-
 
303
int as_area_send(task_id_t id, __address base, size_t size, int flags)
-
 
304
{
-
 
305
    ipl_t ipl;
-
 
306
    task_t *t;
-
 
307
    count_t i;
-
 
308
    as_t *as;
-
 
309
    __address dst_base;
-
 
310
   
-
 
311
    ipl = interrupts_disable();
-
 
312
    spinlock_lock(&tasks_lock);
-
 
313
   
-
 
314
    t = task_find_by_id(id);
-
 
315
    if (!NULL) {
-
 
316
        spinlock_unlock(&tasks_lock);
-
 
317
        interrupts_restore(ipl);
-
 
318
        return ENOENT;
-
 
319
    }
-
 
320
 
-
 
321
    spinlock_lock(&t->lock);
-
 
322
    spinlock_unlock(&tasks_lock);
-
 
323
 
-
 
324
    as = t->as;
-
 
325
    dst_base = (__address) t->accept_arg.base;
-
 
326
   
-
 
327
    if (as == AS) {
-
 
328
        /*
-
 
329
         * The two tasks share the entire address space.
-
 
330
         * Return error since there is no point in continuing.
-
 
331
         */
-
 
332
        spinlock_unlock(&t->lock);
-
 
333
        interrupts_restore(ipl);
-
 
334
        return EPERM;
-
 
335
    }
-
 
336
 
-
 
337
    if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != size) ||
-
 
338
        (t->accept_arg.flags != flags)) {
-
 
339
        /*
-
 
340
         * Discrepancy in either task ID, size or flags.
-
 
341
         */
-
 
342
        spinlock_unlock(&t->lock);
-
 
343
        interrupts_restore(ipl);
-
 
344
        return EPERM;
-
 
345
    }
-
 
346
   
-
 
347
    /*
-
 
348
     * Create copy of the address space area.
-
 
349
     */
-
 
350
    if (!as_area_create(as, flags, size, dst_base)) {
-
 
351
        /*
-
 
352
         * Destination address space area could not be created.
-
 
353
         */
-
 
354
        spinlock_unlock(&t->lock);
-
 
355
        interrupts_restore(ipl);
-
 
356
        return ENOMEM;
-
 
357
    }
-
 
358
   
-
 
359
    /*
-
 
360
     * NOTE: we have just introduced a race condition.
-
 
361
     * The destination task can try to attempt the newly
-
 
362
     * created area before its mapping is copied from
-
 
363
     * the source address space area. In result, frames
-
 
364
     * can get lost.
-
 
365
     *
-
 
366
     * Currently, this race is not solved, but one of the
-
 
367
     * possible solutions would be to sleep in as_page_fault()
-
 
368
     * when this situation is detected.
-
 
369
     */
-
 
370
 
-
 
371
    memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0);
-
 
372
    spinlock_unlock(&t->lock);
-
 
373
   
-
 
374
    /*
-
 
375
     * Avoid deadlock by first locking the address space with lower address.
-
 
376
     */
-
 
377
    if (as < AS) {
-
 
378
        spinlock_lock(&as->lock);
-
 
379
        spinlock_lock(&AS->lock);
-
 
380
    } else {
-
 
381
        spinlock_lock(&AS->lock);
-
 
382
        spinlock_lock(&as->lock);
-
 
383
    }
-
 
384
   
-
 
385
    for (i = 0; i < SIZE2FRAMES(size); i++) {
-
 
386
        pte_t *pte;
-
 
387
        __address frame;
-
 
388
           
-
 
389
        page_table_lock(AS, false);
-
 
390
        pte = page_mapping_find(AS, base + i*PAGE_SIZE);
-
 
391
        if (pte && PTE_VALID(pte)) {
-
 
392
            ASSERT(PTE_PRESENT(pte));
-
 
393
            frame = PTE_GET_FRAME(pte);
-
 
394
            if (!(flags & AS_AREA_DEVICE)) {
-
 
395
                /* TODO: increment frame reference count */
-
 
396
            }
-
 
397
            page_table_unlock(AS, false);
-
 
398
        } else {
-
 
399
            page_table_unlock(AS, false);
-
 
400
            continue;
-
 
401
        }
-
 
402
       
-
 
403
        page_table_lock(as, false);
-
 
404
        page_mapping_insert(as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(flags));
-
 
405
        page_table_unlock(as, false);
-
 
406
    }
-
 
407
   
-
 
408
    spinlock_unlock(&AS->lock);
-
 
409
    spinlock_unlock(&as->lock);
-
 
410
    interrupts_restore(ipl);
-
 
411
   
-
 
412
    return 0;
-
 
413
}
-
 
414
 
172
/** Initialize mapping for one page of address space.
415
/** Initialize mapping for one page of address space.
173
 *
416
 *
174
 * This functions maps 'page' to 'frame' according
417
 * This functions maps 'page' to 'frame' according
175
 * to attributes of the address space area to
418
 * to attributes of the address space area to
176
 * wich 'page' belongs.
419
 * wich 'page' belongs.
Line 342... Line 585...
342
    as_install_arch(new);
585
    as_install_arch(new);
343
   
586
   
344
    AS = new;
587
    AS = new;
345
}
588
}
346
 
589
 
347
/** Compute flags for virtual address translation subsytem.
590
/** Convert address space area flags to page flags.
348
 *
591
 *
349
 * The address space area must be locked.
-
 
350
 * Interrupts must be disabled.
-
 
351
 *
-
 
352
 * @param a Address space area.
592
 * @param aflags Flags of some address space area.
353
 *
593
 *
354
 * @return Flags to be used in page_mapping_insert().
594
 * @return Flags to be passed to page_mapping_insert().
355
 */
595
 */
356
int get_area_flags(as_area_t *a)
596
int area_flags_to_page_flags(int aflags)
357
{
597
{
358
    int flags;
598
    int flags;
359
 
599
 
360
    flags = PAGE_USER | PAGE_PRESENT;
600
    flags = PAGE_USER | PAGE_PRESENT;
361
   
601
   
362
    if (a->flags & AS_AREA_READ)
602
    if (aflags & AS_AREA_READ)
363
        flags |= PAGE_READ;
603
        flags |= PAGE_READ;
364
       
604
       
365
    if (a->flags & AS_AREA_WRITE)
605
    if (aflags & AS_AREA_WRITE)
366
        flags |= PAGE_WRITE;
606
        flags |= PAGE_WRITE;
367
   
607
   
368
    if (a->flags & AS_AREA_EXEC)
608
    if (aflags & AS_AREA_EXEC)
369
        flags |= PAGE_EXEC;
609
        flags |= PAGE_EXEC;
370
   
610
   
371
    if (!(a->flags & AS_AREA_DEVICE))
611
    if (!(aflags & AS_AREA_DEVICE))
372
        flags |= PAGE_CACHEABLE;
612
        flags |= PAGE_CACHEABLE;
373
       
613
       
374
    return flags;
614
    return flags;
375
}
615
}
376
 
616
 
-
 
617
/** Compute flags for virtual address translation subsytem.
-
 
618
 *
-
 
619
 * The address space area must be locked.
-
 
620
 * Interrupts must be disabled.
-
 
621
 *
-
 
622
 * @param a Address space area.
-
 
623
 *
-
 
624
 * @return Flags to be used in page_mapping_insert().
-
 
625
 */
-
 
626
int get_area_flags(as_area_t *a)
-
 
627
{
-
 
628
    return area_flags_to_page_flags(a->flags);
-
 
629
}
-
 
630
 
377
/** Create page table.
631
/** Create page table.
378
 *
632
 *
379
 * Depending on architecture, create either address space
633
 * Depending on architecture, create either address space
380
 * private or global page table.
634
 * private or global page table.
381
 *
635
 *
Line 422... Line 676...
422
    ASSERT(as_operations->page_table_unlock);
676
    ASSERT(as_operations->page_table_unlock);
423
 
677
 
424
    as_operations->page_table_unlock(as, unlock);
678
    as_operations->page_table_unlock(as, unlock);
425
}
679
}
426
 
680
 
427
/** Find address space area and change it.
-
 
428
 *
-
 
429
 * @param as Address space.
-
 
430
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
-
 
431
 * @param size New size of the virtual memory block starting at address.
-
 
432
 * @param flags Flags influencing the remap operation. Currently unused.
-
 
433
 *
-
 
434
 * @return address on success, (__address) -1 otherwise.
-
 
435
 */
-
 
436
__address as_area_resize(as_t *as, __address address, size_t size, int flags)
-
 
437
{
-
 
438
    as_area_t *area = NULL;
-
 
439
    ipl_t ipl;
-
 
440
    size_t pages;
-
 
441
   
-
 
442
    ipl = interrupts_disable();
-
 
443
    spinlock_lock(&as->lock);
-
 
444
   
-
 
445
    /*
-
 
446
     * Locate the area.
-
 
447
     */
-
 
448
    area = find_area_and_lock(as, address);
-
 
449
    if (!area) {
-
 
450
        spinlock_unlock(&as->lock);
-
 
451
        interrupts_restore(ipl);
-
 
452
        return (__address) -1;
-
 
453
    }
-
 
454
 
-
 
455
    if (area->flags & AS_AREA_DEVICE) {
-
 
456
        /*
-
 
457
         * Remapping of address space areas associated
-
 
458
         * with memory mapped devices is not supported.
-
 
459
         */
-
 
460
        spinlock_unlock(&area->lock);
-
 
461
        spinlock_unlock(&as->lock);
-
 
462
        interrupts_restore(ipl);
-
 
463
        return (__address) -1;
-
 
464
    }
-
 
465
 
-
 
466
    pages = SIZE2FRAMES((address - area->base) + size);
-
 
467
    if (!pages) {
-
 
468
        /*
-
 
469
         * Zero size address space areas are not allowed.
-
 
470
         */
-
 
471
        spinlock_unlock(&area->lock);
-
 
472
        spinlock_unlock(&as->lock);
-
 
473
        interrupts_restore(ipl);
-
 
474
        return (__address) -1;
-
 
475
    }
-
 
476
   
-
 
477
    if (pages < area->pages) {
-
 
478
        int i;
-
 
479
 
-
 
480
        /*
-
 
481
         * Shrinking the area.
-
 
482
         * No need to check for overlaps.
-
 
483
         */
-
 
484
        for (i = pages; i < area->pages; i++) {
-
 
485
            pte_t *pte;
-
 
486
           
-
 
487
            /*
-
 
488
             * Releasing physical memory.
-
 
489
             * This depends on the fact that the memory was allocated using frame_alloc().
-
 
490
             */
-
 
491
            page_table_lock(as, false);
-
 
492
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
-
 
493
            if (pte && PTE_VALID(pte)) {
-
 
494
                __address frame;
-
 
495
 
-
 
496
                ASSERT(PTE_PRESENT(pte));
-
 
497
                frame = PTE_GET_FRAME(pte);
-
 
498
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
-
 
499
                page_table_unlock(as, false);
-
 
500
 
-
 
501
                frame_free(ADDR2PFN(frame));
-
 
502
            } else {
-
 
503
                page_table_unlock(as, false);
-
 
504
            }
-
 
505
        }
-
 
506
        /*
-
 
507
         * Invalidate TLB's.
-
 
508
         */
-
 
509
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
-
 
510
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
-
 
511
        tlb_shootdown_finalize();
-
 
512
    } else {
-
 
513
        /*
-
 
514
         * Growing the area.
-
 
515
         * Check for overlaps with other address space areas.
-
 
516
         */
-
 
517
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
-
 
518
            spinlock_unlock(&area->lock);
-
 
519
            spinlock_unlock(&as->lock);    
-
 
520
            interrupts_restore(ipl);
-
 
521
            return (__address) -1;
-
 
522
        }
-
 
523
    }
-
 
524
 
-
 
525
    area->pages = pages;
-
 
526
   
-
 
527
    spinlock_unlock(&area->lock);
-
 
528
    spinlock_unlock(&as->lock);
-
 
529
    interrupts_restore(ipl);
-
 
530
 
-
 
531
    return address;
-
 
532
}
-
 
533
 
681
 
534
/** Find address space area and lock it.
682
/** Find address space area and lock it.
535
 *
683
 *
536
 * The address space must be locked and interrupts must be disabled.
684
 * The address space must be locked and interrupts must be disabled.
537
 *
685
 *
Line 665... Line 813...
665
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
813
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
666
    }
814
    }
667
 
815
 
668
    return true;
816
    return true;
669
}
817
}
-
 
818
 
-
 
819
/*
-
 
820
 * Address space related syscalls.
-
 
821
 */
-
 
822
 
-
 
823
/** Wrapper for as_area_create(). */
-
 
824
__native sys_as_area_create(__address address, size_t size, int flags)
-
 
825
{
-
 
826
    if (as_area_create(AS, flags, size, address))
-
 
827
        return (__native) address;
-
 
828
    else
-
 
829
        return (__native) -1;
-
 
830
}
-
 
831
 
-
 
832
/** Wrapper for as_area_resize. */
-
 
833
__native sys_as_area_resize(__address address, size_t size, int flags)
-
 
834
{
-
 
835
    return as_area_resize(AS, address, size, 0);
-
 
836
}
-
 
837
 
-
 
838
/** Prepare task for accepting address space area from another task.
-
 
839
 *
-
 
840
 * @param uspace_accept_arg Accept structure passed from userspace.
-
 
841
 *
-
 
842
 * @return EPERM if the task ID encapsulated in @uspace_accept_arg references
-
 
843
 *     TASK. Otherwise zero is returned.
-
 
844
 */
-
 
845
__native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg)
-
 
846
{
-
 
847
    as_area_acptsnd_arg_t arg;
-
 
848
   
-
 
849
    copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t));
-
 
850
   
-
 
851
    if (!arg.size)
-
 
852
        return (__native) EPERM;
-
 
853
   
-
 
854
    if (arg.task_id == TASK->taskid) {
-
 
855
        /*
-
 
856
         * Accepting from itself is not allowed.
-
 
857
         */
-
 
858
        return (__native) EPERM;
-
 
859
    }
-
 
860
   
-
 
861
    memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t));
-
 
862
   
-
 
863
        return 0;
-
 
864
}
-
 
865
 
-
 
866
/** Wrapper for as_area_send. */
-
 
867
__native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg)
-
 
868
{
-
 
869
    as_area_acptsnd_arg_t arg;
-
 
870
   
-
 
871
    copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t));
-
 
872
 
-
 
873
    if (!arg.size)
-
 
874
        return (__native) EPERM;
-
 
875
   
-
 
876
    if (arg.task_id == TASK->taskid) {
-
 
877
        /*
-
 
878
         * Sending to itself is not allowed.
-
 
879
         */
-
 
880
        return (__native) EPERM;
-
 
881
    }
-
 
882
 
-
 
883
    return (__native) as_area_send(arg.task_id, (__address) arg.base, arg.size, arg.flags);
-
 
884
}