Subversion Repositories HelenOS

Rev

Rev 1891 | Rev 2170 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1891 Rev 2071
1
/*
1
/*
2
 * Copyright (C) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genarchmm
29
/** @addtogroup genarchmm
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   ASID management.
35
 * @brief   ASID management.
36
 *
36
 *
37
 * Modern processor architectures optimize TLB utilization
37
 * Modern processor architectures optimize TLB utilization
38
 * by using ASIDs (a.k.a. memory contexts on sparc64 and
38
 * by using ASIDs (a.k.a. memory contexts on sparc64 and
39
 * region identifiers on ia64). These ASIDs help to associate
39
 * region identifiers on ia64). These ASIDs help to associate
40
 * each TLB item with an address space, thus making
40
 * each TLB item with an address space, thus making
41
 * finer-grained TLB invalidation possible.
41
 * finer-grained TLB invalidation possible.
42
 *
42
 *
43
 * Unfortunatelly, there are usually less ASIDs available than
43
 * Unfortunatelly, there are usually less ASIDs available than
44
 * there can be unique as_t structures (i.e. address spaces
44
 * there can be unique as_t structures (i.e. address spaces
45
 * recognized by the kernel).
45
 * recognized by the kernel).
46
 *
46
 *
47
 * When system runs short of ASIDs, it will attempt to steal
47
 * When system runs short of ASIDs, it will attempt to steal
48
 * ASID from an address space that has not been active for
48
 * ASID from an address space that has not been active for
49
 * a while.
49
 * a while.
50
 *
50
 *
51
 * This code depends on the fact that ASIDS_ALLOCABLE
51
 * This code depends on the fact that ASIDS_ALLOCABLE
52
 * is greater than number of supported CPUs (i.e. the
52
 * is greater than number of supported CPUs (i.e. the
53
 * amount of concurently active address spaces).
53
 * amount of concurently active address spaces).
54
 *
54
 *
55
 * Architectures that don't have hardware support for address
55
 * Architectures that don't have hardware support for address
56
 * spaces do not compile with this file.
56
 * spaces do not compile with this file.
57
 */
57
 */
58
 
58
 
59
#include <mm/asid.h>
59
#include <mm/asid.h>
60
#include <mm/as.h>
60
#include <mm/as.h>
61
#include <mm/tlb.h>
61
#include <mm/tlb.h>
62
#include <arch/mm/asid.h>
62
#include <arch/mm/asid.h>
63
#include <synch/spinlock.h>
63
#include <synch/spinlock.h>
64
#include <synch/mutex.h>
64
#include <synch/mutex.h>
65
#include <arch.h>
65
#include <arch.h>
66
#include <adt/list.h>
66
#include <adt/list.h>
67
#include <debug.h>
67
#include <debug.h>
68
 
68
 
69
/**
69
/**
70
 * asidlock protects the asids_allocated counter.
70
 * asidlock protects the asids_allocated counter.
71
 */
71
 */
72
SPINLOCK_INITIALIZE(asidlock);
72
SPINLOCK_INITIALIZE(asidlock);
73
 
73
 
74
static count_t asids_allocated = 0;
74
static count_t asids_allocated = 0;
75
 
75
 
76
/** Allocate free address space identifier.
76
/** Allocate free address space identifier.
77
 *
77
 *
78
 * Interrupts must be disabled and inactive_as_with_asid_lock must be held
78
 * Interrupts must be disabled and inactive_as_with_asid_lock must be held
79
 * prior to this call
79
 * prior to this call
80
 *
80
 *
81
 * @return New ASID.
81
 * @return New ASID.
82
 */
82
 */
83
asid_t asid_get(void)
83
asid_t asid_get(void)
84
{
84
{
85
    asid_t asid;
85
    asid_t asid;
86
    link_t *tmp;
86
    link_t *tmp;
87
    as_t *as;
87
    as_t *as;
88
 
88
 
89
    /*
89
    /*
90
     * Check if there is an unallocated ASID.
90
     * Check if there is an unallocated ASID.
91
     */
91
     */
92
   
92
   
93
    spinlock_lock(&asidlock);
93
    spinlock_lock(&asidlock);
94
    if (asids_allocated == ASIDS_ALLOCABLE) {
94
    if (asids_allocated == ASIDS_ALLOCABLE) {
95
 
95
 
96
        /*
96
        /*
97
         * All ASIDs are already allocated.
97
         * All ASIDs are already allocated.
98
         * Resort to stealing.
98
         * Resort to stealing.
99
         */
99
         */
100
       
100
       
101
        /*
101
        /*
102
         * Remove the first item on the list.
102
         * Remove the first item on the list.
103
         * It is guaranteed to belong to an
103
         * It is guaranteed to belong to an
104
         * inactive address space.
104
         * inactive address space.
105
         */
105
         */
106
        ASSERT(!list_empty(&inactive_as_with_asid_head));
106
        ASSERT(!list_empty(&inactive_as_with_asid_head));
107
        tmp = inactive_as_with_asid_head.next;
107
        tmp = inactive_as_with_asid_head.next;
108
        list_remove(tmp);
108
        list_remove(tmp);
109
       
109
       
110
        as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
110
        as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
111
        mutex_lock_active(&as->lock);
111
        mutex_lock_active(&as->lock);
112
 
112
 
113
        /*
113
        /*
114
         * Steal the ASID.
114
         * Steal the ASID.
115
         * Note that the stolen ASID is not active.
115
         * Note that the stolen ASID is not active.
116
         */
116
         */
117
        asid = as->asid;
117
        asid = as->asid;
118
        ASSERT(asid != ASID_INVALID);
118
        ASSERT(asid != ASID_INVALID);
119
 
119
 
120
        /*
120
        /*
121
         * Notify the address space from wich the ASID
121
         * Notify the address space from wich the ASID
122
         * was stolen by invalidating its asid member.
122
         * was stolen by invalidating its asid member.
123
         */
123
         */
124
        as->asid = ASID_INVALID;
124
        as->asid = ASID_INVALID;
125
       
125
       
126
        /*
126
        /*
127
         * If the architecture uses some software cache
127
         * If the architecture uses some software cache
128
         * of TLB entries (e.g. TSB on sparc64), the
128
         * of TLB entries (e.g. TSB on sparc64), the
129
         * cache must be invalidated as well.
129
         * cache must be invalidated as well.
130
         */
130
         */
131
        as_invalidate_translation_cache(as, 0, (count_t) -1);
131
        as_invalidate_translation_cache(as, 0, (count_t) -1);
132
       
132
       
133
        mutex_unlock(&as->lock);
133
        mutex_unlock(&as->lock);
134
 
134
 
135
        /*
135
        /*
136
         * Get the system rid of the stolen ASID.
136
         * Get the system rid of the stolen ASID.
137
         */
137
         */
138
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
138
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
139
        tlb_invalidate_asid(asid);
139
        tlb_invalidate_asid(asid);
140
        tlb_shootdown_finalize();
140
        tlb_shootdown_finalize();
141
    } else {
141
    } else {
142
 
142
 
143
        /*
143
        /*
144
         * There is at least one unallocated ASID.
144
         * There is at least one unallocated ASID.
145
         * Find it and assign it.
145
         * Find it and assign it.
146
         */
146
         */
147
 
147
 
148
        asid = asid_find_free();
148
        asid = asid_find_free();
149
        asids_allocated++;
149
        asids_allocated++;
150
 
150
 
151
        /*
151
        /*
152
         * Purge the allocated ASID from TLBs.
152
         * Purge the allocated ASID from TLBs.
153
         */
153
         */
154
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
154
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
155
        tlb_invalidate_asid(asid);
155
        tlb_invalidate_asid(asid);
156
        tlb_shootdown_finalize();
156
        tlb_shootdown_finalize();
157
    }
157
    }
158
   
158
   
159
    spinlock_unlock(&asidlock);
159
    spinlock_unlock(&asidlock);
160
   
160
   
161
    return asid;
161
    return asid;
162
}
162
}
163
 
163
 
164
/** Release address space identifier.
164
/** Release address space identifier.
165
 *
165
 *
166
 * This code relies on architecture
166
 * This code relies on architecture
167
 * dependent functionality.
167
 * dependent functionality.
168
 *
168
 *
169
 * @param asid ASID to be released.
169
 * @param asid ASID to be released.
170
 */
170
 */
171
void asid_put(asid_t asid)
171
void asid_put(asid_t asid)
172
{
172
{
173
    ipl_t ipl;
173
    ipl_t ipl;
174
 
174
 
175
    ipl = interrupts_disable();
175
    ipl = interrupts_disable();
176
    spinlock_lock(&asidlock);
176
    spinlock_lock(&asidlock);
177
 
177
 
178
    asids_allocated--;
178
    asids_allocated--;
179
    asid_put_arch(asid);
179
    asid_put_arch(asid);
180
   
180
   
181
    spinlock_unlock(&asidlock);
181
    spinlock_unlock(&asidlock);
182
    interrupts_restore(ipl);
182
    interrupts_restore(ipl);
183
}
183
}
184
 
184
 
185
/** @}
185
/** @}
186
 */
186
 */
187
 
187