Subversion Repositories HelenOS

Rev

Rev 1890 | Rev 2054 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1890 Rev 2049
1
/*
1
/*
2
 * Copyright (C) 2006 Jakub Jermar
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64proc
29
/** @addtogroup sparc64proc
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <proc/scheduler.h>
35
#include <proc/scheduler.h>
36
#include <proc/thread.h>
36
#include <proc/thread.h>
37
#include <arch.h>
37
#include <arch.h>
38
#include <arch/asm.h>
38
#include <arch/asm.h>
39
#include <arch/regdef.h>
39
#include <arch/regdef.h>
40
#include <arch/stack.h>
40
#include <arch/stack.h>
41
#include <arch/mm/tlb.h>
41
#include <arch/mm/tlb.h>
42
#include <arch/mm/page.h>
42
#include <arch/mm/page.h>
43
#include <config.h>
43
#include <config.h>
44
#include <align.h>
44
#include <align.h>
45
#include <macros.h>
45
#include <macros.h>
46
 
46
 
47
/** Perform sparc64 specific tasks needed before the new task is run. */
47
/** Perform sparc64 specific tasks needed before the new task is run. */
48
void before_task_runs_arch(void)
48
void before_task_runs_arch(void)
49
{
49
{
50
}
50
}
51
 
51
 
52
/** Perform sparc64 specific steps before scheduling a thread.
52
/** Perform sparc64 specific steps before scheduling a thread.
53
 *
53
 *
54
 * Ensure that thread's kernel stack, as well as userspace window
54
 * Ensure that thread's kernel stack, as well as userspace window buffer for
55
 * buffer for userspace threads, are locked in DTLB.
-
 
56
 * For userspace threads, initialize reserved global registers
55
 * userspace threads, are locked in DTLB. For userspace threads, initialize
57
 * in the alternate and interrupt sets.
56
 * reserved global registers in the alternate and interrupt sets.
58
 */
57
 */
59
void before_thread_runs_arch(void)
58
void before_thread_runs_arch(void)
60
{
59
{
61
    uintptr_t base;
60
    uintptr_t base;
62
   
61
   
63
    base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
62
    base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
64
 
63
 
65
    if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
64
    if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1 <<
-
 
65
        KERNEL_PAGE_WIDTH))) {
66
        /*
66
        /*
67
         * Kernel stack of this thread is not locked in DTLB.
67
         * Kernel stack of this thread is not locked in DTLB.
68
         * First, make sure it is not mapped already.
68
         * First, make sure it is not mapped already.
69
         * If not, create a locked mapping for it.
69
         * If not, create a locked mapping for it.
70
         */
70
         */
71
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
71
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t)
-
 
72
            THREAD->kstack);
-
 
73
        dtlb_insert_mapping((uintptr_t) THREAD->kstack,
72
        dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
74
            KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
73
    }
75
    }
74
   
76
   
75
    if ((THREAD->flags & THREAD_FLAG_USPACE)) {
77
    if ((THREAD->flags & THREAD_FLAG_USPACE)) {
76
        /*
78
        /*
77
         * If this thread executes also in userspace, we have to lock
79
         * If this thread executes also in userspace, we have to lock
78
         * its userspace window buffer into DTLB.
80
         * its userspace window buffer into DTLB.
79
         */
81
         */
80
        ASSERT(THREAD->arch.uspace_window_buffer);
82
        ASSERT(THREAD->arch.uspace_window_buffer);
-
 
83
        uintptr_t uw_buf = ALIGN_DOWN((uintptr_t)
81
        uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
84
            THREAD->arch.uspace_window_buffer, PAGE_SIZE);
82
        if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
85
        if (!overlaps(uw_buf, PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH))
-
 
86
            {
83
            /*
87
            /*
84
             * The buffer is not covered by the 4M locked kernel DTLB entry.
88
             * The buffer is not covered by the 4M locked kernel
-
 
89
             * DTLB entry.
85
             */
90
             */
86
            dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
91
            dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
87
            dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K, true, true);
92
            dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K,
-
 
93
                true, true);
88
        }
94
        }
89
       
95
       
90
        /*
96
        /*
91
         * Write kernel stack address to %g6 and a pointer to the last item
97
         * Write kernel stack address to %g6 and a pointer to the last
92
         * in the userspace window buffer to %g7 in the alternate and interrupt sets.
98
         * item in the userspace window buffer to %g7 in the alternate
-
 
99
         * and interrupt sets.
93
         */
100
         */
94
        uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE
101
        uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE
95
            - (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
102
            - (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE,
-
 
103
            STACK_ALIGNMENT));
96
        write_to_ig_g6(sp);
104
        write_to_ig_g6(sp);
97
        write_to_ag_g6(sp);
105
        write_to_ag_g6(sp);
98
        write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer);
106
        write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer);
99
    }
107
    }
100
}
108
}
101
 
109
 
102
/** Perform sparc64 specific steps before a thread stops running.
110
/** Perform sparc64 specific steps before a thread stops running.
103
 *
111
 *
104
 * Demap any locked DTLB entries isntalled by the thread (i.e. kernel stack
112
 * Demap any locked DTLB entries isntalled by the thread (i.e. kernel stack
105
 * and userspace window buffer).
113
 * and userspace window buffer).
106
 */
114
 */
107
void after_thread_ran_arch(void)
115
void after_thread_ran_arch(void)
108
{
116
{
109
    uintptr_t base;
117
    uintptr_t base;
110
 
118
 
111
    base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
119
    base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
112
 
120
 
113
    if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
121
    if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1 <<
-
 
122
        KERNEL_PAGE_WIDTH))) {
114
        /*
123
        /*
115
         * Kernel stack of this thread is locked in DTLB.
124
         * Kernel stack of this thread is locked in DTLB.
116
         * Destroy the mapping.
125
         * Destroy the mapping.
117
         */
126
         */
118
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
127
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t)
-
 
128
            THREAD->kstack);
119
    }
129
    }
120
   
130
   
121
    if ((THREAD->flags & THREAD_FLAG_USPACE)) {
131
    if ((THREAD->flags & THREAD_FLAG_USPACE)) {
122
        /*
132
        /*
123
         * If this thread executes also in userspace, we have to
133
         * If this thread executes also in userspace, we have to
124
         * demap the userspace window buffer from DTLB.
134
         * demap the userspace window buffer from DTLB.
125
         */
135
         */
126
        ASSERT(THREAD->arch.uspace_window_buffer);
136
        ASSERT(THREAD->arch.uspace_window_buffer);
127
       
137
       
-
 
138
        uintptr_t uw_buf = ALIGN_DOWN((uintptr_t)
128
        uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
139
            THREAD->arch.uspace_window_buffer, PAGE_SIZE);
129
        if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
140
        if (!overlaps(uw_buf, PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
130
            /*
141
            /*
131
             * The buffer is not covered by the 4M locked kernel DTLB entry
142
             * The buffer is not covered by the 4M locked kernel DTLB entry
132
             * and therefore it was given a dedicated locked DTLB entry.
143
             * and therefore it was given a dedicated locked DTLB entry.
133
             * Demap it.
144
             * Demap it.
134
             */
145
             */
135
            dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
146
            dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
136
        }
147
        }
137
   
148
   
138
        /* sample the state of the userspace window buffer */  
149
        /* sample the state of the userspace window buffer */  
139
        THREAD->arch.uspace_window_buffer = (uint8_t *) read_from_ag_g7();
150
        THREAD->arch.uspace_window_buffer = (uint8_t *) read_from_ag_g7();
140
    }
151
    }
141
}
152
}
142
 
153
 
143
/** @}
154
/** @}
144
 */
155
 */
145
 
156