Subversion Repositories HelenOS

Rev

Rev 1879 | Rev 2054 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1879 Rev 1890
1
/*
1
/*
2
 * Copyright (C) 2006 Jakub Jermar
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64proc
29
/** @addtogroup sparc64proc
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <proc/scheduler.h>
35
#include <proc/scheduler.h>
36
#include <proc/thread.h>
36
#include <proc/thread.h>
37
#include <arch.h>
37
#include <arch.h>
38
#include <arch/asm.h>
38
#include <arch/asm.h>
39
#include <arch/regdef.h>
39
#include <arch/regdef.h>
40
#include <arch/stack.h>
40
#include <arch/stack.h>
41
#include <arch/mm/tlb.h>
41
#include <arch/mm/tlb.h>
42
#include <arch/mm/page.h>
42
#include <arch/mm/page.h>
43
#include <config.h>
43
#include <config.h>
44
#include <align.h>
44
#include <align.h>
45
#include <macros.h>
45
#include <macros.h>
46
 
46
 
47
/** Perform sparc64 specific tasks needed before the new task is run. */
47
/** Perform sparc64 specific tasks needed before the new task is run. */
48
void before_task_runs_arch(void)
48
void before_task_runs_arch(void)
49
{
49
{
50
}
50
}
51
 
51
 
52
/** Perform sparc64 specific steps before scheduling a thread.
52
/** Perform sparc64 specific steps before scheduling a thread.
53
 *
53
 *
54
 * Ensure that thread's kernel stack, as well as userspace window
54
 * Ensure that thread's kernel stack, as well as userspace window
55
 * buffer for userspace threads, are locked in DTLB.
55
 * buffer for userspace threads, are locked in DTLB.
56
 * For userspace threads, initialize reserved global registers
56
 * For userspace threads, initialize reserved global registers
57
 * in the alternate and interrupt sets.
57
 * in the alternate and interrupt sets.
58
 */
58
 */
59
void before_thread_runs_arch(void)
59
void before_thread_runs_arch(void)
60
{
60
{
61
    uintptr_t base;
61
    uintptr_t base;
62
   
62
   
63
    base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
63
    base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
64
 
64
 
65
    if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
65
    if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
66
        /*
66
        /*
67
         * Kernel stack of this thread is not locked in DTLB.
67
         * Kernel stack of this thread is not locked in DTLB.
68
         * First, make sure it is not mapped already.
68
         * First, make sure it is not mapped already.
69
         * If not, create a locked mapping for it.
69
         * If not, create a locked mapping for it.
70
         */
70
         */
71
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
71
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
72
        dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
72
        dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
73
    }
73
    }
74
   
74
   
75
    if ((THREAD->flags & THREAD_FLAG_USPACE)) {
75
    if ((THREAD->flags & THREAD_FLAG_USPACE)) {
76
        /*
76
        /*
77
         * If this thread executes also in userspace, we have to lock
77
         * If this thread executes also in userspace, we have to lock
78
         * its userspace window buffer into DTLB.
78
         * its userspace window buffer into DTLB.
79
         */
79
         */
80
        ASSERT(THREAD->arch.uspace_window_buffer);
80
        ASSERT(THREAD->arch.uspace_window_buffer);
81
        uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
81
        uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
82
        if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
82
        if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
83
            /*
83
            /*
84
             * The buffer is not covered by the 4M locked kernel DTLB entry.
84
             * The buffer is not covered by the 4M locked kernel DTLB entry.
85
             */
85
             */
86
            dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
86
            dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
87
            dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K, true, true);
87
            dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K, true, true);
88
        }
88
        }
89
       
89
       
90
        /*
90
        /*
91
         * Write kernel stack address to %g6 and a pointer to the last item
91
         * Write kernel stack address to %g6 and a pointer to the last item
92
         * in the userspace window buffer to %g7 in the alternate and interrupt sets.
92
         * in the userspace window buffer to %g7 in the alternate and interrupt sets.
93
         */
93
         */
94
        uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE
94
        uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE
95
            - (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
95
            - (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
96
        write_to_ig_g6(sp);
96
        write_to_ig_g6(sp);
97
        write_to_ag_g6(sp);
97
        write_to_ag_g6(sp);
98
        write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer);
98
        write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer);
99
    }
99
    }
100
}
100
}
101
 
101
 
102
/** Perform sparc64 specific steps before a thread stops running.
102
/** Perform sparc64 specific steps before a thread stops running.
103
 *
103
 *
104
 * Demap any locked DTLB entries isntalled by the thread (i.e. kernel stack
104
 * Demap any locked DTLB entries isntalled by the thread (i.e. kernel stack
105
 * and userspace window buffer).
105
 * and userspace window buffer).
106
 */
106
 */
107
void after_thread_ran_arch(void)
107
void after_thread_ran_arch(void)
108
{
108
{
109
    uintptr_t base;
109
    uintptr_t base;
110
 
110
 
111
    base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
111
    base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
112
 
112
 
113
    if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
113
    if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
114
        /*
114
        /*
115
         * Kernel stack of this thread is locked in DTLB.
115
         * Kernel stack of this thread is locked in DTLB.
116
         * Destroy the mapping.
116
         * Destroy the mapping.
117
         */
117
         */
118
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
118
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
119
    }
119
    }
120
   
120
   
121
    if ((THREAD->flags & THREAD_FLAG_USPACE)) {
121
    if ((THREAD->flags & THREAD_FLAG_USPACE)) {
122
        /*
122
        /*
123
         * If this thread executes also in userspace, we have to force all
123
         * If this thread executes also in userspace, we have to
124
         * its still-active userspace windows into the userspace window buffer
-
 
125
         * and demap the buffer from DTLB.
124
         * demap the userspace window buffer from DTLB.
126
         */
125
         */
127
        ASSERT(THREAD->arch.uspace_window_buffer);
126
        ASSERT(THREAD->arch.uspace_window_buffer);
128
       
127
       
129
        uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
128
        uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
130
        if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
129
        if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
131
            /*
130
            /*
132
             * The buffer is not covered by the 4M locked kernel DTLB entry
131
             * The buffer is not covered by the 4M locked kernel DTLB entry
133
             * and therefore it was given a dedicated locked DTLB entry.
132
             * and therefore it was given a dedicated locked DTLB entry.
134
             * Demap it.
133
             * Demap it.
135
             */
134
             */
136
            dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
135
            dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
137
        }
136
        }
138
   
137
   
139
        /* sample the state of the userspace window buffer */  
138
        /* sample the state of the userspace window buffer */  
140
        THREAD->arch.uspace_window_buffer = (uint8_t *) read_from_ag_g7();
139
        THREAD->arch.uspace_window_buffer = (uint8_t *) read_from_ag_g7();
141
    }
140
    }
142
}
141
}
143
 
142
 
144
/** @}
143
/** @}
145
 */
144
 */
146
 
145