Rev 3675 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3675 | Rev 4692 | ||
---|---|---|---|
Line 48... | Line 48... | ||
48 | * We assume that the address space is already locked. Note that respective |
48 | * We assume that the address space is already locked. Note that respective |
49 | * portions of both TSBs are invalidated at a time. |
49 | * portions of both TSBs are invalidated at a time. |
50 | * |
50 | * |
51 | * @param as Address space. |
51 | * @param as Address space. |
52 | * @param page First page to invalidate in TSB. |
52 | * @param page First page to invalidate in TSB. |
53 | * @param pages Number of pages to invalidate. Value of (count_t) -1 means the |
53 | * @param pages Number of pages to invalidate. Value of (size_t) -1 means the |
54 | * whole TSB. |
54 | * whole TSB. |
55 | */ |
55 | */ |
56 | void tsb_invalidate(as_t *as, uintptr_t page, count_t pages) |
56 | void tsb_invalidate(as_t *as, uintptr_t page, size_t pages) |
57 | { |
57 | { |
58 | index_t i0, i; |
58 | size_t i0; |
- | 59 | size_t i; |
|
59 | count_t cnt; |
60 | size_t cnt; |
60 | 61 | ||
61 | ASSERT(as->arch.itsb && as->arch.dtsb); |
62 | ASSERT(as->arch.itsb && as->arch.dtsb); |
62 | 63 | ||
63 | i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; |
64 | i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; |
64 | ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT); |
65 | ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT); |
65 | 66 | ||
66 | if (pages == (count_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT) |
67 | if (pages == (size_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT) |
67 | cnt = ITSB_ENTRY_COUNT; |
68 | cnt = ITSB_ENTRY_COUNT; |
68 | else |
69 | else |
69 | cnt = pages * 2; |
70 | cnt = pages * 2; |
70 | 71 | ||
71 | for (i = 0; i < cnt; i++) { |
72 | for (i = 0; i < cnt; i++) { |
Line 79... | Line 80... | ||
79 | /** Copy software PTE to ITSB. |
80 | /** Copy software PTE to ITSB. |
80 | * |
81 | * |
81 | * @param t Software PTE. |
82 | * @param t Software PTE. |
82 | * @param index Zero if lower 8K-subpage, one if higher 8K subpage. |
83 | * @param index Zero if lower 8K-subpage, one if higher 8K subpage. |
83 | */ |
84 | */ |
84 | void itsb_pte_copy(pte_t *t, index_t index) |
85 | void itsb_pte_copy(pte_t *t, size_t index) |
85 | { |
86 | { |
86 | as_t *as; |
87 | as_t *as; |
87 | tsb_entry_t *tsb; |
88 | tsb_entry_t *tsb; |
88 | index_t entry; |
89 | size_t entry; |
89 | 90 | ||
90 | ASSERT(index <= 1); |
91 | ASSERT(index <= 1); |
91 | 92 | ||
92 | as = t->as; |
93 | as = t->as; |
93 | entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
94 | entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
Line 125... | Line 126... | ||
125 | * |
126 | * |
126 | * @param t Software PTE. |
127 | * @param t Software PTE. |
127 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
128 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
128 | * @param ro If true, the mapping is copied read-only. |
129 | * @param ro If true, the mapping is copied read-only. |
129 | */ |
130 | */ |
130 | void dtsb_pte_copy(pte_t *t, index_t index, bool ro) |
131 | void dtsb_pte_copy(pte_t *t, size_t index, bool ro) |
131 | { |
132 | { |
132 | as_t *as; |
133 | as_t *as; |
133 | tsb_entry_t *tsb; |
134 | tsb_entry_t *tsb; |
134 | index_t entry; |
135 | size_t entry; |
135 | 136 | ||
136 | ASSERT(index <= 1); |
137 | ASSERT(index <= 1); |
137 | 138 | ||
138 | as = t->as; |
139 | as = t->as; |
139 | entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
140 | entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |