Subversion Repositories HelenOS

Rev

Rev 2071 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2071 Rev 2170
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genarchmm
29
/** @addtogroup genarchmm
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   ASID management.
35
 * @brief   ASID management.
36
 *
36
 *
37
 * Modern processor architectures optimize TLB utilization
37
 * Modern processor architectures optimize TLB utilization
38
 * by using ASIDs (a.k.a. memory contexts on sparc64 and
38
 * by using ASIDs (a.k.a. memory contexts on sparc64 and
39
 * region identifiers on ia64). These ASIDs help to associate
39
 * region identifiers on ia64). These ASIDs help to associate
40
 * each TLB item with an address space, thus making
40
 * each TLB item with an address space, thus making
41
 * finer-grained TLB invalidation possible.
41
 * finer-grained TLB invalidation possible.
42
 *
42
 *
43
 * Unfortunatelly, there are usually less ASIDs available than
43
 * Unfortunatelly, there are usually less ASIDs available than
44
 * there can be unique as_t structures (i.e. address spaces
44
 * there can be unique as_t structures (i.e. address spaces
45
 * recognized by the kernel).
45
 * recognized by the kernel).
46
 *
46
 *
47
 * When system runs short of ASIDs, it will attempt to steal
47
 * When system runs short of ASIDs, it will attempt to steal
48
 * ASID from an address space that has not been active for
48
 * ASID from an address space that has not been active for
49
 * a while.
49
 * a while.
50
 *
50
 *
51
 * This code depends on the fact that ASIDS_ALLOCABLE
51
 * This code depends on the fact that ASIDS_ALLOCABLE
52
 * is greater than number of supported CPUs (i.e. the
52
 * is greater than number of supported CPUs (i.e. the
53
 * amount of concurently active address spaces).
53
 * amount of concurently active address spaces).
54
 *
54
 *
55
 * Architectures that don't have hardware support for address
55
 * Architectures that don't have hardware support for address
56
 * spaces do not compile with this file.
56
 * spaces do not compile with this file.
57
 */
57
 */
58
 
58
 
59
#include <mm/asid.h>
59
#include <mm/asid.h>
60
#include <mm/as.h>
60
#include <mm/as.h>
61
#include <mm/tlb.h>
61
#include <mm/tlb.h>
62
#include <arch/mm/asid.h>
62
#include <arch/mm/asid.h>
63
#include <synch/spinlock.h>
63
#include <synch/spinlock.h>
64
#include <synch/mutex.h>
64
#include <synch/mutex.h>
65
#include <arch.h>
-
 
66
#include <adt/list.h>
65
#include <adt/list.h>
67
#include <debug.h>
66
#include <debug.h>
68
 
67
 
69
/**
-
 
70
 * asidlock protects the asids_allocated counter.
-
 
71
 */
-
 
72
SPINLOCK_INITIALIZE(asidlock);
-
 
73
 
-
 
74
static count_t asids_allocated = 0;
68
static count_t asids_allocated = 0;
75
 
69
 
76
/** Allocate free address space identifier.
70
/** Allocate free address space identifier.
77
 *
71
 *
78
 * Interrupts must be disabled and inactive_as_with_asid_lock must be held
72
 * Interrupts must be disabled and inactive_as_with_asid_lock must be held
79
 * prior to this call
73
 * prior to this call
80
 *
74
 *
81
 * @return New ASID.
75
 * @return New ASID.
82
 */
76
 */
83
asid_t asid_get(void)
77
asid_t asid_get(void)
84
{
78
{
85
    asid_t asid;
79
    asid_t asid;
86
    link_t *tmp;
80
    link_t *tmp;
87
    as_t *as;
81
    as_t *as;
88
 
82
 
89
    /*
83
    /*
90
     * Check if there is an unallocated ASID.
84
     * Check if there is an unallocated ASID.
91
     */
85
     */
92
   
86
   
93
    spinlock_lock(&asidlock);
-
 
94
    if (asids_allocated == ASIDS_ALLOCABLE) {
87
    if (asids_allocated == ASIDS_ALLOCABLE) {
95
 
88
 
96
        /*
89
        /*
97
         * All ASIDs are already allocated.
90
         * All ASIDs are already allocated.
98
         * Resort to stealing.
91
         * Resort to stealing.
99
         */
92
         */
100
       
93
       
101
        /*
94
        /*
102
         * Remove the first item on the list.
95
         * Remove the first item on the list.
103
         * It is guaranteed to belong to an
96
         * It is guaranteed to belong to an
104
         * inactive address space.
97
         * inactive address space.
105
         */
98
         */
106
        ASSERT(!list_empty(&inactive_as_with_asid_head));
99
        ASSERT(!list_empty(&inactive_as_with_asid_head));
107
        tmp = inactive_as_with_asid_head.next;
100
        tmp = inactive_as_with_asid_head.next;
108
        list_remove(tmp);
101
        list_remove(tmp);
109
       
102
       
110
        as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
103
        as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
111
        mutex_lock_active(&as->lock);
-
 
112
 
104
 
113
        /*
105
        /*
114
         * Steal the ASID.
106
         * Steal the ASID.
115
         * Note that the stolen ASID is not active.
107
         * Note that the stolen ASID is not active.
116
         */
108
         */
117
        asid = as->asid;
109
        asid = as->asid;
118
        ASSERT(asid != ASID_INVALID);
110
        ASSERT(asid != ASID_INVALID);
119
 
111
 
120
        /*
112
        /*
121
         * Notify the address space from wich the ASID
113
         * Notify the address space from wich the ASID
122
         * was stolen by invalidating its asid member.
114
         * was stolen by invalidating its asid member.
123
         */
115
         */
124
        as->asid = ASID_INVALID;
116
        as->asid = ASID_INVALID;
125
       
117
       
126
        /*
118
        /*
127
         * If the architecture uses some software cache
119
         * If the architecture uses some software cache
128
         * of TLB entries (e.g. TSB on sparc64), the
120
         * of TLB entries (e.g. TSB on sparc64), the
129
         * cache must be invalidated as well.
121
         * cache must be invalidated as well.
130
         */
122
         */
131
        as_invalidate_translation_cache(as, 0, (count_t) -1);
123
        as_invalidate_translation_cache(as, 0, (count_t) -1);
132
       
124
       
133
        mutex_unlock(&as->lock);
-
 
134
 
-
 
135
        /*
125
        /*
136
         * Get the system rid of the stolen ASID.
126
         * Get the system rid of the stolen ASID.
137
         */
127
         */
138
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
128
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
139
        tlb_invalidate_asid(asid);
129
        tlb_invalidate_asid(asid);
140
        tlb_shootdown_finalize();
130
        tlb_shootdown_finalize();
141
    } else {
131
    } else {
142
 
132
 
143
        /*
133
        /*
144
         * There is at least one unallocated ASID.
134
         * There is at least one unallocated ASID.
145
         * Find it and assign it.
135
         * Find it and assign it.
146
         */
136
         */
147
 
137
 
148
        asid = asid_find_free();
138
        asid = asid_find_free();
149
        asids_allocated++;
139
        asids_allocated++;
150
 
140
 
151
        /*
141
        /*
152
         * Purge the allocated ASID from TLBs.
142
         * Purge the allocated ASID from TLBs.
153
         */
143
         */
154
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
144
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
155
        tlb_invalidate_asid(asid);
145
        tlb_invalidate_asid(asid);
156
        tlb_shootdown_finalize();
146
        tlb_shootdown_finalize();
157
    }
147
    }
158
   
148
   
159
    spinlock_unlock(&asidlock);
-
 
160
   
-
 
161
    return asid;
149
    return asid;
162
}
150
}
163
 
151
 
164
/** Release address space identifier.
152
/** Release address space identifier.
165
 *
153
 *
166
 * This code relies on architecture
154
 * This code relies on architecture
167
 * dependent functionality.
155
 * dependent functionality.
168
 *
156
 *
169
 * @param asid ASID to be released.
157
 * @param asid ASID to be released.
170
 */
158
 */
171
void asid_put(asid_t asid)
159
void asid_put(asid_t asid)
172
{
160
{
173
    ipl_t ipl;
-
 
174
 
-
 
175
    ipl = interrupts_disable();
-
 
176
    spinlock_lock(&asidlock);
-
 
177
 
-
 
178
    asids_allocated--;
161
    asids_allocated--;
179
    asid_put_arch(asid);
162
    asid_put_arch(asid);
180
   
-
 
181
    spinlock_unlock(&asidlock);
-
 
182
    interrupts_restore(ipl);
-
 
183
}
163
}
184
 
164
 
185
/** @}
165
/** @}
186
 */
166
 */
187
 
167