Subversion Repositories HelenOS-historic

Rev

Rev 622 | Rev 1100 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
383 jermar 29
#include <synch/spinlock.h>
111 palkovsky 30
#include <arch/atomic.h>
153 jermar 31
#include <arch/barrier.h>
383 jermar 32
#include <arch.h>
223 jermar 33
#include <preemption.h>
195 vana 34
#include <print.h>
383 jermar 35
#include <debug.h>
552 palkovsky 36
#include <symtab.h>
1 jermar 37
 
458 decky 38
#ifdef CONFIG_SMP
1 jermar 39
 
383 jermar 40
/** Initialize spinlock
41
 *
42
 * Initialize spinlock.
43
 *
44
 * @param sl Pointer to spinlock_t structure.
45
 */
552 palkovsky 46
void spinlock_initialize(spinlock_t *sl, char *name)
1 jermar 47
{
625 palkovsky 48
    atomic_set(&sl->val, 0);
552 palkovsky 49
#ifdef CONFIG_DEBUG_SPINLOCK
50
    sl->name = name;
51
#endif  
1 jermar 52
}
53
 
458 decky 54
#ifdef CONFIG_DEBUG_SPINLOCK
383 jermar 55
/** Lock spinlock
56
 *
57
 * Lock spinlock.
58
 * This version has limitted ability to report
59
 * possible occurence of deadlock.
60
 *
61
 * @param sl Pointer to spinlock_t structure.
62
 */
1 jermar 63
void spinlock_lock(spinlock_t *sl)
64
{
557 jermar 65
    count_t i = 0;
552 palkovsky 66
    char *symbol;
557 jermar 67
    bool deadlock_reported = false;
1 jermar 68
 
223 jermar 69
    preemption_disable();
1 jermar 70
    while (test_and_set(&sl->val)) {
71
        if (i++ > 300000) {
552 palkovsky 72
            printf("cpu%d: looping on spinlock %p:%s, caller=%p",
622 palkovsky 73
                   CPU->id, sl, sl->name, CALLER);
74
            symbol = get_symtab_entry(CALLER);
552 palkovsky 75
            if (symbol)
76
                printf("(%s)", symbol);
77
            printf("\n");
1 jermar 78
            i = 0;
557 jermar 79
            deadlock_reported = true;
1 jermar 80
        }
81
    }
383 jermar 82
 
557 jermar 83
    if (deadlock_reported)
84
        printf("cpu%d: not deadlocked\n", CPU->id);
85
 
383 jermar 86
    /*
87
     * Prevent critical section code from bleeding out this way up.
88
     */
153 jermar 89
    CS_ENTER_BARRIER();
1 jermar 90
 
91
}
383 jermar 92
 
1 jermar 93
#else
383 jermar 94
 
95
/** Lock spinlock
96
 *
97
 * Lock spinlock.
98
 *
99
 * @param sl Pointer to spinlock_t structure.
100
 */
1 jermar 101
void spinlock_lock(spinlock_t *sl)
102
{
223 jermar 103
    preemption_disable();
104
 
1 jermar 105
    /*
106
     * Each architecture has its own efficient/recommended
107
     * implementation of spinlock.
108
     */
109
    spinlock_arch(&sl->val);
383 jermar 110
 
111
    /*
112
     * Prevent critical section code from bleeding out this way up.
113
     */
153 jermar 114
    CS_ENTER_BARRIER();
1 jermar 115
}
116
#endif
117
 
383 jermar 118
/** Lock spinlock conditionally
119
 *
120
 * Lock spinlock conditionally.
121
 * If the spinlock is not available at the moment,
122
 * signal failure.
123
 *
124
 * @param sl Pointer to spinlock_t structure.
125
 *
126
 * @return Zero on failure, non-zero otherwise.
127
 */
1 jermar 128
int spinlock_trylock(spinlock_t *sl)
129
{
153 jermar 130
    int rc;
131
 
223 jermar 132
    preemption_disable();
153 jermar 133
    rc = !test_and_set(&sl->val);
383 jermar 134
 
135
    /*
136
     * Prevent critical section code from bleeding out this way up.
137
     */
153 jermar 138
    CS_ENTER_BARRIER();
223 jermar 139
 
140
    if (!rc)
141
        preemption_enable();
153 jermar 142
 
143
    return rc;
1 jermar 144
}
145
 
383 jermar 146
/** Unlock spinlock
147
 *
148
 * Unlock spinlock.
149
 *
150
 * @param sl Pointer to spinlock_t structure.
151
 */
1 jermar 152
void spinlock_unlock(spinlock_t *sl)
153
{
625 palkovsky 154
    ASSERT(atomic_get(&sl->val) != 0);
383 jermar 155
 
156
    /*
157
     * Prevent critical section code from bleeding out this way down.
158
     */
153 jermar 159
    CS_LEAVE_BARRIER();
383 jermar 160
 
625 palkovsky 161
    atomic_set(&sl->val,0);
223 jermar 162
    preemption_enable();
1 jermar 163
}
164
 
165
#endif