Rev 341 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 341 | Rev 391 | ||
---|---|---|---|
Line 24... | Line 24... | ||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
- | 29 | #include <arch/mm/asid.h> |
|
- | 30 | #include <synch/spinlock.h> |
|
29 | #include <arch.h> |
31 | #include <arch.h> |
30 | #include <memstr.h> |
32 | #include <debug.h> |
31 | 33 | ||
32 | /**< Array of threads that have currently some ASID assigned, |
- | |
33 | NULL means no thread have ASID with number of that index assigned */ |
- | |
34 | struct thread * asids[256]; |
34 | #define ASIDS 256 |
35 | int last_asid; /**< The number of last assigned ASID */ |
- | |
36 | int asid_bitmap[32]; /**< Bitmap of ASIDs currently in TLB */ |
- | |
37 | 35 | ||
- | 36 | static spinlock_t asid_usage_lock; |
|
- | 37 | static count_t asid_usage[ASIDS]; /**< Usage tracking array for ASIDs */ |
|
38 | 38 | ||
39 | /** Cleanup asid_bitmap |
39 | /** Get ASID |
40 | * |
40 | * |
- | 41 | * Get the least used ASID. |
|
- | 42 | * |
|
- | 43 | * @return ASID |
|
41 | */ |
44 | */ |
42 | void asid_bitmap_reset(void) |
45 | asid_t asid_get(void) |
43 | { |
46 | { |
- | 47 | pri_t pri; |
|
- | 48 | int i, j; |
|
- | 49 | count_t min; |
|
- | 50 | ||
- | 51 | min = (unsigned) -1; |
|
- | 52 | ||
- | 53 | pri = cpu_priority_high(); |
|
- | 54 | spinlock_lock(&asid_usage_lock); |
|
- | 55 | ||
- | 56 | for (i=0, j = 0; (i<ASIDS); i++) { |
|
44 | memsetb(asid_bitmap, sizeof(asid_bitmap), 0); |
57 | if (asid_usage[i] < min) { |
- | 58 | j = i; |
|
- | 59 | min = asid_usage[i]; |
|
- | 60 | if (!min) |
|
- | 61 | break; |
|
- | 62 | } |
|
45 | } |
63 | } |
- | 64 | ||
- | 65 | asid_usage[i]++; |
|
46 | 66 | ||
- | 67 | spinlock_unlock(&asid_usage_lock); |
|
- | 68 | cpu_priority_restore(pri); |
|
- | 69 | ||
- | 70 | return i; |
|
- | 71 | } |
|
47 | 72 | ||
48 | /** Initialize manipulating with ASIDs |
73 | /** Release ASID |
49 | * |
74 | * |
- | 75 | * Release ASID by decrementing its usage count. |
|
- | 76 | * |
|
- | 77 | * @param asid ASID. |
|
50 | */ |
78 | */ |
51 | void init_asids(void) |
79 | void asid_put(asid_t asid) |
52 | { |
80 | { |
- | 81 | pri_t pri; |
|
- | 82 | ||
- | 83 | pri = cpu_priority_high(); |
|
- | 84 | spinlock_lock(&asid_usage_lock); |
|
- | 85 | ||
53 | memsetb(asids, sizeof(asids), 0); |
86 | ASSERT(asid_usage[asid] > 0); |
54 | asid_bitmap_reset(); |
87 | asid_usage[asid]--; |
- | 88 | ||
- | 89 | spinlock_unlock(&asid_usage_lock); |
|
55 | last_asid = 0; |
90 | cpu_priority_restore(pri); |
56 | } |
91 | } |