/kernel/trunk/genarch/src/mm/asid.c |
---|
125,6 → 125,7 |
*/ |
tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0); |
tlb_shootdown_finalize(); |
tlb_invalidate_asid(asid); |
as->asid = ASID_INVALID; |
spinlock_unlock(&as->lock); |
/kernel/trunk/generic/src/mm/tlb.c |
---|
54,25 → 54,12 |
spinlock_lock(&tlblock); |
/* |
* TODO: assemble shootdown message. |
* TODO: wrap parameters into a message and |
* dispatch it to all CPUs excluding this one. |
*/ |
tlb_shootdown_ipi_send(); |
switch (type) { |
case TLB_INVL_ALL: |
tlb_invalidate_all(); |
break; |
case TLB_INVL_ASID: |
tlb_invalidate_asid(asid); |
break; |
case TLB_INVL_PAGES: |
tlb_invalidate_pages(asid, page, cnt); |
break; |
default: |
panic("unknown tlb_invalidate_type_t value: %d\n", type); |
break; |
} |
busy_wait: |
for (i = 0; i<config.cpu_count; i++) |
if (cpus[i].tlb_active) |
95,7 → 82,7 |
CPU->tlb_active = 0; |
spinlock_lock(&tlblock); |
spinlock_unlock(&tlblock); |
tlb_invalidate_all(); /* TODO: use valid ASID */ |
tlb_invalidate_all(); /* TODO: be more finer-grained in what to invalidate */ |
CPU->tlb_active = 1; |
} |
#endif /* CONFIG_SMP */ |
/kernel/trunk/arch/ia64/include/mm/asid.h |
---|
33,13 → 33,22 |
typedef __u32 asid_t; |
/* |
* ASID_MAX can range from 2^18 - 1 to 2^24 - 1, |
* depending on architecture implementation. |
/** Number of ia64 RIDs (Region Identifiers) per kernel ASID. */ |
#define RIDS_PER_ASID 7 |
#define RID_OVERFLOW 16777216 /* 2^24 */ |
/** |
* The point is to have ASID_MAX_ARCH big enough |
* so that it is never reached and the ASID allocation |
* mechanism in asid_get() never resorts to stealing. |
*/ |
#define ASID_MAX_ARCH 16777215 /* 2^24 - 1 */ |
#define ASID_MAX_ARCH ((asid_t) -1) /**< This value is never reached. */ |
#define asid_find_free() ASID_MAX_ARCH |
/** |
* Value used to recognize the situation when all ASIDs were already allocated. |
*/ |
#define ASID_OVERFLOW (RID_OVERFLOW/RIDS_PER_ASID) |
#define asid_put_arch(x) |
#endif |
/kernel/trunk/arch/ia64/Makefile.inc |
---|
64,6 → 64,8 |
arch/$(ARCH)/src/cpu/cpu.c \ |
arch/$(ARCH)/src/ivt.S \ |
arch/$(ARCH)/src/interrupt.c \ |
arch/$(ARCH)/src/mm/asid.c \ |
arch/$(ARCH)/src/mm/frame.c \ |
arch/$(ARCH)/src/mm/page.c \ |
arch/$(ARCH)/src/mm/tlb.c \ |
arch/$(ARCH)/src/drivers/it.c |
/kernel/trunk/arch/ia64/src/mm/asid.c |
---|
0,0 → 1,117 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/* |
* ASID management. |
* |
* Because ia64 has much wider ASIDs (18-24 bits) compared to other |
* architectures (e.g. 8 bits on mips32 and 12 bits on sparc32), it is |
* inappropriate to use same methods (i.e. genarch/mm/asid_fifo.c) for |
* all of them. |
* |
* Instead, ia64 assigns ASID values from a counter that eventually |
* overflows. When this happens, the counter is reset and all TLBs are |
* entirely invalidated. Furthermore, all address space structures, |
* except for the one with asid == ASID_KERNEL, are assigned new ASID. |
* |
* It is important to understand that, in SPARTAN, one ASID represents |
* RIDS_PER_ASID consecutive hardware RIDs (Region ID's). |
* |
* Note that the algorithm used can handle only the maximum of |
* ASID_OVERFLOW-ASID_START address spaces at a time. |
*/ |
#include <arch/mm/asid.h> |
#include <mm/asid.h> |
#include <mm/as.h> |
#include <mm/tlb.h> |
#include <list.h> |
#include <typedefs.h> |
#include <debug.h> |
/** |
* Stores the ASID to be returned next. |
* Must be only accessed when asidlock is held. |
*/ |
static asid_t next_asid = ASID_START; |
/** Assign next ASID. |
* |
* On ia64, this function is used only to allocate ASID |
* for a newly created address space. As a side effect, |
* it might attempt to shootdown TLBs and reassign |
* ASIDs to existing address spaces. |
* |
* When calling this function, interrupts must be disabled |
* and the asidlock must be held. |
* |
* @return ASID for new address space. |
*/ |
asid_t asid_find_free(void) |
{ |
as_t *as; |
link_t *cur; |
if (next_asid == ASID_OVERFLOW) { |
/* |
* The counter has overflown. |
*/ |
/* |
* Reset the counter. |
*/ |
next_asid = ASID_START; |
/* |
* Initiate TLB shootdown. |
*/ |
tlb_shootdown_start(TLB_INVL_ALL, 0, 0, 0); |
/* |
* Reassign ASIDs to existing address spaces. |
*/ |
for (cur = as_with_asid_head.next; cur != &as_with_asid_head; cur = cur->next) { |
ASSERT(next_asid < ASID_OVERFLOW); |
as = list_get_instance(cur, as_t, as_with_asid_link); |
spinlock_lock(&as->lock); |
as->asid = next_asid++; |
spinlock_unlock(&as->lock); |
} |
/* |
* Finish TLB shootdown. |
*/ |
tlb_shootdown_finalize(); |
tlb_invalidate_all(); |
} |
ASSERT(next_asid < ASID_OVERFLOW); |
return next_asid++; |
} |
/kernel/trunk/arch/ia64/src/mm/tlb.c |
---|
0,0 → 1,53 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/* |
* TLB management. |
*/ |
#include <mm/tlb.h> |
#include <arch/mm/asid.h> |
/** Invalidate all TLB entries. |
* |
* Because of ASID management, region registers must be reset |
* with new RIDs derived from the potentionally new ASID. |
*/ |
void tlb_invalidate_all(void) |
{ |
/* TODO */ |
} |
/** Invalidate entries belonging to an address space. |
* |
* @param asid Address space identifier. |
*/ |
void tlb_invalidate_asid(asid_t asid) |
{ |
/* TODO */ |
} |