Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1890 → Rev 1891

/trunk/kernel/genarch/src/mm/asid.c
128,7 → 128,7
* of TLB entries (e.g. TSB on sparc64), the
* cache must be invalidated as well.
*/
as_invalidate_translation_cache(as, 0, 0);
as_invalidate_translation_cache(as, 0, (count_t) -1);
mutex_unlock(&as->lock);
 
/trunk/kernel/generic/include/mm/as.h
160,8 → 160,6
 
/** Data to be used by the backend. */
mem_backend_data_t backend_data;
as_arch_t arch;
};
 
extern as_t *AS_KERNEL;
190,7 → 188,17
extern int used_space_insert(as_area_t *a, uintptr_t page, count_t count);
extern int used_space_remove(as_area_t *a, uintptr_t page, count_t count);
 
 
/* Interface to be implemented by architectures. */
#ifndef as_constructor_arch
extern int as_constructor_arch(as_t *as, int flags);
#endif /* !def as_constructor_arch */
#ifndef as_destructor_arch
extern int as_destructor_arch(as_t *as);
#endif /* !def as_destructor_arch */
#ifndef as_create_arch
extern int as_create_arch(as_t *as, int flags);
#endif /* !def as_create_arch */
#ifndef as_install_arch
extern void as_install_arch(as_t *as);
#endif /* !def as_install_arch */
/trunk/kernel/generic/src/mm/as.c
106,12 → 106,33
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
static void sh_info_remove_reference(share_info_t *sh_info);
 
static int as_constructor(void *obj, int flags)
{
as_t *as = (as_t *) obj;
int rc;
 
link_initialize(&as->inactive_as_with_asid_link);
mutex_initialize(&as->lock);
rc = as_constructor_arch(as, flags);
return rc;
}
 
static int as_destructor(void *obj)
{
as_t *as = (as_t *) obj;
 
return as_destructor_arch(as);
}
 
/** Initialize address space subsystem. */
void as_init(void)
{
as_arch_init();
as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
AS_KERNEL = as_create(FLAG_AS_KERNEL);
if (!AS_KERNEL)
128,8 → 149,8
as_t *as;
 
as = (as_t *) slab_alloc(as_slab, 0);
link_initialize(&as->inactive_as_with_asid_link);
mutex_initialize(&as->lock);
(void) as_create_arch(as, 0);
btree_create(&as->as_area_btree);
if (flags & FLAG_AS_KERNEL)
/trunk/kernel/Makefile
79,6 → 79,10
DEFS += -DCONFIG_VHPT
endif
 
ifeq ($(CONFIG_TSB),y)
DEFS += -DCONFIG_TSB
endif
 
ifeq ($(CONFIG_POWEROFF),y)
DEFS += -DCONFIG_POWEROFF
endif
/trunk/kernel/arch/xen32/include/mm/as.h
47,6 → 47,9
typedef struct {
} as_arch_t;
 
#define as_constructor_arch(as, flags) (as != as)
#define as_destructor_arch(as) (as != as)
#define as_create_arch(as, flags) (as != as)
#define as_install_arch(as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
/trunk/kernel/arch/sparc64/include/arch.h
37,11 → 37,13
#ifndef KERN_sparc64_ARCH_H_
#define KERN_sparc64_ARCH_H_
 
#define ASI_AIUP 0x10 /** Access to primary context with user privileges. */
#define ASI_AIUS 0x11 /** Access to secondary context with user privileges. */
#define ASI_AIUP 0x10 /** Access to primary context with user privileges. */
#define ASI_AIUS 0x11 /** Access to secondary context with user privileges. */
 
#define NWINDOW 8 /** Number of register window sets. */
#define ASI_NUCLEUS_QUAD_LDD 0x24 /** ASI for 16-byte atomic loads. */
 
#define NWINDOW 8 /** Number of register window sets. */
 
#endif
 
/** @}
/trunk/kernel/arch/sparc64/include/trap/mmu.h
44,6 → 44,10
#include <arch/mm/tte.h>
#include <arch/trap/regwin.h>
 
#ifdef CONFIG_TSB
#include <arch/mm/tsb.h>
#endif
 
#define TT_FAST_INSTRUCTION_ACCESS_MMU_MISS 0x64
#define TT_FAST_DATA_ACCESS_MMU_MISS 0x68
#define TT_FAST_DATA_ACCESS_PROTECTION 0x6c
56,8 → 60,19
/*
* First, try to refill TLB from TSB.
*/
! TODO
 
#ifdef CONFIG_TSB
ldxa [%g0] ASI_IMMU, %g1 ! read TSB Tag Target Register
ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g2 ! read TSB 8K Pointer
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! 16-byte atomic load into %g4 and %g5
cmp %g1, %g4 ! is this the entry we are looking for?
bne,pn %xcc, 0f
nop
stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG ! copy mapping from ITSB to ITLB
retry
#endif
 
0:
wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
PREEMPTIBLE_HANDLER fast_instruction_access_mmu_miss
.endm
66,8 → 81,20
/*
* First, try to refill TLB from TSB.
*/
! TODO
 
#ifdef CONFIG_TSB
ldxa [%g0] ASI_DMMU, %g1 ! read TSB Tag Target Register
srlx %g1, TSB_TAG_TARGET_CONTEXT_SHIFT, %g2 ! is this kernel miss?
brz,pn %g2, 0f
ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g3 ! read TSB 8K Pointer
ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 ! 16-byte atomic load into %g4 and %g5
cmp %g1, %g4 ! is this the entry we are looking for?
bne,pn %xcc, 0f
nop
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG ! copy mapping from DTSB to DTLB
retry
#endif
 
/*
* Second, test if it is the portion of the kernel address space
* which is faulting. If that is the case, immediately create
76,7 → 103,7
*
* Note that branch-delay slots are used in order to save space.
*/
 
0:
mov VA_DMMU_TAG_ACCESS, %g1
ldxa [%g1] ASI_DMMU, %g1 ! read the faulting Context and VPN
set TLB_TAG_ACCESS_CONTEXT_MASK, %g2
110,13 → 137,9
 
.macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl
/*
* First, try to refill TLB from TSB.
* The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER.
*/
! TODO
 
/*
* The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER.
*/
.if (\tl > 0)
wrpr %g0, 1, %tl
.endif
/trunk/kernel/arch/sparc64/include/mm/tte.h
50,6 → 50,8
 
#include <arch/types.h>
 
#define VA_TAG_PAGE_SHIFT 22
 
/** Translation Table Entry - Tag. */
union tte_tag {
uint64_t value;
/trunk/kernel/arch/sparc64/include/mm/mmu.h
48,7 → 48,7
#define ASI_IMMU_DEMAP 0x57
 
/* Virtual Addresses within ASI_IMMU. */
#define VA_IMMU_TAG_TARGET 0x0 /**< IMMU tag target register. */
#define VA_IMMU_TSB_TAG_TARGET 0x0 /**< IMMU TSB tag target register. */
#define VA_IMMU_SFSR 0x18 /**< IMMU sync fault status register. */
#define VA_IMMU_TSB_BASE 0x28 /**< IMMU TSB base register. */
#define VA_IMMU_TAG_ACCESS 0x30 /**< IMMU TLB tag access register. */
64,7 → 64,7
#define ASI_DMMU_DEMAP 0x5f
 
/* Virtual Addresses within ASI_DMMU. */
#define VA_DMMU_TAG_TARGET 0x0 /**< DMMU tag target register. */
#define VA_DMMU_TSB_TAG_TARGET 0x0 /**< DMMU TSB tag target register. */
#define VA_PRIMARY_CONTEXT_REG 0x8 /**< DMMU primary context register. */
#define VA_SECONDARY_CONTEXT_REG 0x10 /**< DMMU secondary context register. */
#define VA_DMMU_SFSR 0x18 /**< DMMU sync fault status register. */
/trunk/kernel/arch/sparc64/include/mm/tsb.h
35,11 → 35,6
#ifndef KERN_sparc64_TSB_H_
#define KERN_sparc64_TSB_H_
 
#include <arch/mm/tte.h>
#include <arch/mm/mmu.h>
#include <arch/types.h>
#include <typedefs.h>
 
/*
* ITSB abd DTSB will claim 64K of memory, which
* is a nice number considered that it is one of
51,8 → 46,31
#define ITSB_ENTRY_COUNT (512*(1<<TSB_SIZE))
#define DTSB_ENTRY_COUNT (512*(1<<TSB_SIZE))
 
#define TSB_TAG_TARGET_CONTEXT_SHIFT 48
 
#ifndef __ASM__
 
#include <arch/mm/tte.h>
#include <arch/mm/mmu.h>
#include <arch/types.h>
#include <typedefs.h>
 
/** TSB Tag Target register. */
union tsb_tag_target {
uint64_t value;
struct {
unsigned invalid : 1; /**< Invalidated by software. */
unsigned : 2;
unsigned context : 13; /**< Software ASID. */
unsigned : 6;
uint64_t va_tag : 42; /**< Virtual address bits <63:22>. */
} __attribute__ ((packed));
};
typedef union tsb_tag_target tsb_tag_target_t;
 
/** TSB entry. */
struct tsb_entry {
tte_tag_t tag;
tsb_tag_target_t tag;
tte_data_t data;
} __attribute__ ((packed));
typedef struct tsb_entry tsb_entry_t;
109,7 → 127,11
}
 
extern void tsb_invalidate(as_t *as, uintptr_t page, count_t pages);
extern void itsb_pte_copy(pte_t *t);
extern void dtsb_pte_copy(pte_t *t, bool ro);
 
#endif /* !def __ASM__ */
 
#endif
 
/** @}
/trunk/kernel/arch/sparc64/include/barrier.h
41,9 → 41,9
#define CS_ENTER_BARRIER() __asm__ volatile ("" ::: "memory")
#define CS_LEAVE_BARRIER() __asm__ volatile ("" ::: "memory")
 
#define memory_barrier()
#define read_barrier()
#define write_barrier()
#define memory_barrier() __asm__ volatile ("membar #LoadLoad | #StoreStore\n" ::: "memory")
#define read_barrier() __asm__ volatile ("membar #LoadLoad\n" ::: "memory")
#define write_barrier() __asm__ volatile ("membar #StoreStore\n" ::: "memory")
 
/** Flush Instruction Memory instruction. */
static inline void flush(void)
/trunk/kernel/arch/sparc64/Makefile.inc
66,7 → 66,7
#
 
CONFIG_Z8530 = y
DEFS += -DCONFIG_Z8530
DEFS += -DCONFIG_Z8530
endif
ifeq ($(MACHINE),ultra)
## Compile with support for ns16550 controller.
107,7 → 107,7
arch/$(ARCH)/src/drivers/tick.c \
arch/$(ARCH)/src/drivers/kbd.c
 
ifdef CONFIG_TSB
ifeq ($(CONFIG_TSB),y)
ARCH_SOURCES += \
arch/$(ARCH)/src/mm/tsb.c
endif
/trunk/kernel/arch/sparc64/src/mm/tlb.c
51,6 → 51,10
#include <panic.h>
#include <arch/asm.h>
 
#ifdef CONFIG_TSB
#include <arch/mm/tsb.h>
#endif
 
static void dtlb_pte_copy(pte_t *t, bool ro);
static void itlb_pte_copy(pte_t *t);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
144,6 → 148,10
dtlb_data_in_write(data.value);
}
 
/** Copy PTE to ITLB.
*
* @param t Page Table Entry to be copied.
*/
void itlb_pte_copy(pte_t *t)
{
tlb_tag_access_reg_t tag;
189,6 → 197,9
*/
t->a = true;
itlb_pte_copy(t);
#ifdef CONFIG_TSB
itsb_pte_copy(t);
#endif
page_table_unlock(AS, true);
} else {
/*
233,6 → 244,9
*/
t->a = true;
dtlb_pte_copy(t, true);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, true);
#endif
page_table_unlock(AS, true);
} else {
/*
266,6 → 280,9
t->d = true;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
dtlb_pte_copy(t, false);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, false);
#endif
page_table_unlock(AS, true);
} else {
/*
/trunk/kernel/arch/sparc64/src/mm/as.c
40,6 → 40,12
 
#ifdef CONFIG_TSB
#include <arch/mm/tsb.h>
#include <arch/memstr.h>
#include <synch/mutex.h>
#include <arch/asm.h>
#include <mm/frame.h>
#include <bitops.h>
#include <macros.h>
#endif
 
/** Architecture dependent address space init. */
49,6 → 55,47
asid_fifo_init();
}
 
int as_constructor_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH);
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags);
 
if (!tsb)
return -1;
 
as->arch.itsb = (tsb_entry_t *) tsb;
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t));
#endif
return 0;
}
 
int as_destructor_arch(as_t *as)
{
#ifdef CONFIG_TSB
count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH;
frame_free((uintptr_t) as->arch.itsb);
return cnt;
#else
return 0;
#endif
}
 
int as_create_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
ipl_t ipl;
 
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0);
ipl = interrupts_disable();
mutex_lock_active(&as->lock); /* completely unnecessary, but polite */
tsb_invalidate(as, 0, (count_t) -1);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
#endif
return 0;
}
 
/** Perform sparc64-specific tasks when an address space becomes active on the processor.
*
* Install ASID and map TSBs.
78,37 → 125,35
mmu_secondary_context_write(ctx.v);
 
#ifdef CONFIG_TSB
if (as != AS_KERNEL) {
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
 
ASSERT(as->arch.itsb && as->arch.dtsb);
ASSERT(as->arch.itsb && as->arch.dtsb);
 
uintptr_t tsb = as->arch.itsb;
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
* to map both TSBs explicitly.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
}
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* Setup TSB Base registers.
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
* to map both TSBs explicitly.
*/
tsb_base_reg_t tsb_base;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
}
tsb_base.value = 0;
tsb_base.size = TSB_SIZE;
tsb_base.split = 0;
/*
* Setup TSB Base registers.
*/
tsb_base_reg_t tsb_base;
tsb_base.value = 0;
tsb_base.size = TSB_SIZE;
tsb_base.split = 0;
 
tsb_base.base = as->arch.itsb >> PAGE_WIDTH;
itsb_base_write(tsb_base.value);
tsb_base.base = as->arch.dtsb >> PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
}
tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
itsb_base_write(tsb_base.value);
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
#endif
}
 
129,22 → 174,19
*/
 
#ifdef CONFIG_TSB
if (as != AS_KERNEL) {
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
 
ASSERT(as->arch.itsb && as->arch.dtsb);
ASSERT(as->arch.itsb && as->arch.dtsb);
 
uintptr_t tsb = as->arch.itsb;
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
* to demap the entry installed by as_install_arch().
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
}
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
* to demap the entry installed by as_install_arch().
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
}
#endif
}
/trunk/kernel/arch/sparc64/src/mm/tsb.c
33,21 → 33,117
*/
 
#include <arch/mm/tsb.h>
#include <arch/mm/tlb.h>
#include <arch/barrier.h>
#include <mm/as.h>
#include <arch/types.h>
#include <typedefs.h>
#include <macros.h>
#include <debug.h>
 
#define TSB_INDEX_MASK ((1<<(21+1+TSB_SIZE-PAGE_WIDTH))-1)
 
/** Invalidate portion of TSB.
*
* We assume that the address space is already locked.
* Note that respective portions of both TSBs
* are invalidated at a time.
*
* @param as Address space.
* @param page First page to invalidate in TSB.
* @param pages Number of pages to invalidate.
* @param pages Number of pages to invalidate. Value of (count_t) -1 means the whole TSB.
*/
void tsb_invalidate(as_t *as, uintptr_t page, count_t pages)
{
index_t i0, i;
count_t cnt;
ASSERT(as->arch.itsb && as->arch.dtsb);
i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK;
cnt = min(pages, ITSB_ENTRY_COUNT);
for (i = 0; i < cnt; i++) {
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT-1)].tag.invalid = 0;
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT-1)].tag.invalid = 0;
}
}
 
/** Copy software PTE to ITSB.
*
* @param t Software PTE.
*/
void itsb_pte_copy(pte_t *t)
{
as_t *as;
tsb_entry_t *tsb;
as = t->as;
tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
 
/*
* We use write barriers to make sure that the TSB load
* won't use inconsistent data or that the fault will
* be repeated.
*/
 
tsb->tag.invalid = 1; /* invalidate the entry (tag target has this set to 0 */
 
write_barrier();
 
tsb->tag.context = as->asid;
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> PAGE_WIDTH;
tsb->data.cp = t->c;
tsb->data.cv = t->c;
tsb->data.p = t->k; /* p as privileged */
tsb->data.v = t->p;
write_barrier();
tsb->tag.invalid = 0; /* mark the entry as valid */
}
 
/** Copy software PTE to DTSB.
*
* @param t Software PTE.
* @param ro If true, the mapping is copied read-only.
*/
void dtsb_pte_copy(pte_t *t, bool ro)
{
as_t *as;
tsb_entry_t *tsb;
as = t->as;
tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
 
/*
* We use write barriers to make sure that the TSB load
* won't use inconsistent data or that the fault will
* be repeated.
*/
 
tsb->tag.invalid = 1; /* invalidate the entry (tag target has this set to 0) */
 
write_barrier();
 
tsb->tag.context = as->asid;
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> PAGE_WIDTH;
tsb->data.cp = t->c;
tsb->data.cv = t->c;
tsb->data.p = t->k; /* p as privileged */
tsb->data.w = ro ? false : t->w;
tsb->data.v = t->p;
write_barrier();
tsb->tag.invalid = 0; /* mark the entry as valid */
}
 
/** @}
*/
/trunk/kernel/arch/ia64/include/mm/as.h
47,6 → 47,9
typedef struct {
} as_arch_t;
 
#define as_constructor_arch(as, flags) (as != as)
#define as_destructor_arch(as) (as != as)
#define as_create_arch(as, flags) (as != as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
 
/trunk/kernel/arch/ppc32/include/mm/as.h
47,6 → 47,9
typedef struct {
} as_arch_t;
 
#define as_constructor_arch(as, flags) (as != as)
#define as_destructor_arch(as) (as != as)
#define as_create_arch(as, flags) (as != as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
 
/trunk/kernel/arch/amd64/include/mm/as.h
47,6 → 47,9
typedef struct {
} as_arch_t;
 
#define as_constructor_arch(as, flags) (as != as)
#define as_destructor_arch(as) (as != as)
#define as_create_arch(as, flags) (as != as)
#define as_install_arch(as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
/trunk/kernel/arch/ppc64/include/mm/as.h
47,6 → 47,9
typedef struct {
} as_arch_t;
 
#define as_constructor_arch(as, flags) (as != as)
#define as_destructor_arch(as) (as != as)
#define as_create_arch(as, flags) (as != as)
#define as_install_arch(as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
/trunk/kernel/arch/mips32/include/mm/as.h
47,6 → 47,9
typedef struct {
} as_arch_t;
 
#define as_constructor_arch(as, flags) (as != as)
#define as_destructor_arch(as) (as != as)
#define as_create_arch(as, flags) (as != as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
 
/trunk/kernel/arch/ia32/include/mm/as.h
47,6 → 47,9
typedef struct {
} as_arch_t;
 
#define as_constructor_arch(as, flags) (as != as)
#define as_destructor_arch(as) (as != as)
#define as_create_arch(as, flags) (as != as)
#define as_install_arch(as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)