Subversion Repositories HelenOS

Rev

Rev 2927 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2927 Rev 4337
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genarchmm
29
/** @addtogroup genarchmm
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   ASID management.
35
 * @brief ASID management.
36
 *
36
 *
37
 * Modern processor architectures optimize TLB utilization
37
 * Modern processor architectures optimize TLB utilization
38
 * by using ASIDs (a.k.a. memory contexts on sparc64 and
38
 * by using ASIDs (a.k.a. memory contexts on sparc64 and
39
 * region identifiers on ia64). These ASIDs help to associate
39
 * region identifiers on ia64). These ASIDs help to associate
40
 * each TLB item with an address space, thus making
40
 * each TLB item with an address space, thus making
41
 * finer-grained TLB invalidation possible.
41
 * finer-grained TLB invalidation possible.
42
 *
42
 *
43
 * Unfortunatelly, there are usually less ASIDs available than
43
 * Unfortunatelly, there are usually less ASIDs available than
44
 * there can be unique as_t structures (i.e. address spaces
44
 * there can be unique as_t structures (i.e. address spaces
45
 * recognized by the kernel).
45
 * recognized by the kernel).
46
 *
46
 *
47
 * When system runs short of ASIDs, it will attempt to steal
47
 * When system runs short of ASIDs, it will attempt to steal
48
 * ASID from an address space that has not been active for
48
 * ASID from an address space that has not been active for
49
 * a while.
49
 * a while.
50
 *
50
 *
51
 * This code depends on the fact that ASIDS_ALLOCABLE
51
 * This code depends on the fact that ASIDS_ALLOCABLE
52
 * is greater than number of supported CPUs (i.e. the
52
 * is greater than number of supported CPUs (i.e. the
53
 * amount of concurently active address spaces).
53
 * amount of concurently active address spaces).
54
 *
54
 *
55
 * Architectures that don't have hardware support for address
55
 * Architectures that don't have hardware support for address
56
 * spaces do not compile with this file.
56
 * spaces do not compile with this file.
57
 */
57
 */
58
 
58
 
59
#include <mm/asid.h>
59
#include <mm/asid.h>
60
#include <mm/as.h>
60
#include <mm/as.h>
61
#include <mm/tlb.h>
61
#include <mm/tlb.h>
62
#include <arch/mm/asid.h>
62
#include <arch/mm/asid.h>
63
#include <synch/spinlock.h>
63
#include <synch/spinlock.h>
64
#include <synch/mutex.h>
64
#include <synch/mutex.h>
65
#include <adt/list.h>
65
#include <adt/list.h>
66
#include <debug.h>
66
#include <debug.h>
67
 
67
 
68
static count_t asids_allocated = 0;
68
static count_t asids_allocated = 0;
69
 
69
 
70
/** Allocate free address space identifier.
70
/** Allocate free address space identifier.
71
 *
71
 *
72
 * Interrupts must be disabled and inactive_as_with_asid_lock must be held
72
 * Interrupts must be disabled and inactive_as_with_asid_lock must be held
73
 * prior to this call
73
 * prior to this call
74
 *
74
 *
75
 * @return New ASID.
75
 * @return New ASID.
76
 */
76
 */
77
asid_t asid_get(void)
77
asid_t asid_get(void)
78
{
78
{
79
    asid_t asid;
79
    asid_t asid;
80
    link_t *tmp;
80
    link_t *tmp;
81
    as_t *as;
81
    as_t *as;
82
 
82
 
83
    /*
83
    /*
84
     * Check if there is an unallocated ASID.
84
     * Check if there is an unallocated ASID.
85
     */
85
     */
86
   
86
   
87
    if (asids_allocated == ASIDS_ALLOCABLE) {
87
    if (asids_allocated == ASIDS_ALLOCABLE) {
88
 
88
 
89
        /*
89
        /*
90
         * All ASIDs are already allocated.
90
         * All ASIDs are already allocated.
91
         * Resort to stealing.
91
         * Resort to stealing.
92
         */
92
         */
93
       
93
       
94
        /*
94
        /*
95
         * Remove the first item on the list.
95
         * Remove the first item on the list.
96
         * It is guaranteed to belong to an
96
         * It is guaranteed to belong to an
97
         * inactive address space.
97
         * inactive address space.
98
         */
98
         */
99
        ASSERT(!list_empty(&inactive_as_with_asid_head));
99
        ASSERT(!list_empty(&inactive_as_with_asid_head));
100
        tmp = inactive_as_with_asid_head.next;
100
        tmp = inactive_as_with_asid_head.next;
101
        list_remove(tmp);
101
        list_remove(tmp);
102
       
102
       
103
        as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
103
        as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
104
 
104
 
105
        /*
105
        /*
106
         * Steal the ASID.
106
         * Steal the ASID.
107
         * Note that the stolen ASID is not active.
107
         * Note that the stolen ASID is not active.
108
         */
108
         */
109
        asid = as->asid;
109
        asid = as->asid;
110
        ASSERT(asid != ASID_INVALID);
110
        ASSERT(asid != ASID_INVALID);
111
 
111
 
112
        /*
112
        /*
113
         * Notify the address space from wich the ASID
113
         * Notify the address space from wich the ASID
114
         * was stolen by invalidating its asid member.
114
         * was stolen by invalidating its asid member.
115
         */
115
         */
116
        as->asid = ASID_INVALID;
116
        as->asid = ASID_INVALID;
117
       
117
       
118
        /*
118
        /*
119
         * If the architecture uses some software cache
119
         * If the architecture uses some software cache
120
         * of TLB entries (e.g. TSB on sparc64), the
120
         * of TLB entries (e.g. TSB on sparc64), the
121
         * cache must be invalidated as well.
121
         * cache must be invalidated as well.
122
         */
122
         */
123
        as_invalidate_translation_cache(as, 0, (count_t) -1);
123
        as_invalidate_translation_cache(as, 0, (count_t) -1);
124
       
124
       
125
        /*
125
        /*
126
         * Get the system rid of the stolen ASID.
126
         * Get the system rid of the stolen ASID.
127
         */
127
         */
128
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
128
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
129
        tlb_invalidate_asid(asid);
129
        tlb_invalidate_asid(asid);
130
        tlb_shootdown_finalize();
130
        tlb_shootdown_finalize();
131
    } else {
131
    } else {
132
 
132
 
133
        /*
133
        /*
134
         * There is at least one unallocated ASID.
134
         * There is at least one unallocated ASID.
135
         * Find it and assign it.
135
         * Find it and assign it.
136
         */
136
         */
137
 
137
 
138
        asid = asid_find_free();
138
        asid = asid_find_free();
139
        asids_allocated++;
139
        asids_allocated++;
140
 
140
 
141
        /*
141
        /*
142
         * Purge the allocated ASID from TLBs.
142
         * Purge the allocated ASID from TLBs.
143
         */
143
         */
144
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
144
        tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
145
        tlb_invalidate_asid(asid);
145
        tlb_invalidate_asid(asid);
146
        tlb_shootdown_finalize();
146
        tlb_shootdown_finalize();
147
    }
147
    }
148
   
148
   
149
    return asid;
149
    return asid;
150
}
150
}
151
 
151
 
152
/** Release address space identifier.
152
/** Release address space identifier.
153
 *
153
 *
154
 * This code relies on architecture
154
 * This code relies on architecture
155
 * dependent functionality.
155
 * dependent functionality.
156
 *
156
 *
157
 * @param asid ASID to be released.
157
 * @param asid ASID to be released.
158
 */
158
 */
159
void asid_put(asid_t asid)
159
void asid_put(asid_t asid)
160
{
160
{
161
    asids_allocated--;
161
    asids_allocated--;
162
    asid_put_arch(asid);
162
    asid_put_arch(asid);
163
}
163
}
164
 
164
 
165
/** @}
165
/** @}
166
 */
166
 */
167
 
167