Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2291 → Rev 2292

/branches/rcu/contrib/toolchain/toolchain.ppc32.sh
16,7 → 16,7
}
 
BINUTILS_VERSION="2.17"
GCC_VERSION="4.1.1"
GCC_VERSION="4.1.2"
 
BINUTILS="binutils-${BINUTILS_VERSION}.tar.gz"
GCC_CORE="gcc-core-${GCC_VERSION}.tar.bz2"
29,7 → 29,6
PLATFORM="ppc"
WORKDIR=`pwd`
TARGET="${PLATFORM}-linux-gnu"
HOST="i686-pc-linux-gnu"
PREFIX="/usr/local/${PLATFORM}"
BINUTILSDIR="${WORKDIR}/binutils-${BINUTILS_VERSION}"
GCCDIR="${WORKDIR}/gcc-${GCC_VERSION}"
81,7 → 80,7
echo ">>> Compiling and installing binutils"
cd "${BINUTILSDIR}"
check_error $? "Change directory failed."
./configure "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
./configure "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
check_error $? "Error configuring binutils."
make all install
check_error $? "Error compiling/installing binutils."
89,7 → 88,7
echo ">>> Compiling and installing GCC"
cd "${OBJDIR}"
check_error $? "Change directory failed."
"${GCCDIR}/configure" "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
"${GCCDIR}/configure" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
check_error $? "Error configuring GCC."
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc
check_error $? "Error compiling/installing GCC."
/branches/rcu/contrib/toolchain/toolchain.amd64.sh
16,7 → 16,7
}
 
BINUTILS_VERSION="2.17"
GCC_VERSION="4.1.1"
GCC_VERSION="4.1.2"
 
BINUTILS="binutils-${BINUTILS_VERSION}.tar.gz"
GCC_CORE="gcc-core-${GCC_VERSION}.tar.bz2"
29,7 → 29,6
PLATFORM="amd64"
WORKDIR=`pwd`
TARGET="${PLATFORM}-linux-gnu"
HOST="i686-pc-linux-gnu"
PREFIX="/usr/local/${PLATFORM}"
BINUTILSDIR="${WORKDIR}/binutils-${BINUTILS_VERSION}"
GCCDIR="${WORKDIR}/gcc-${GCC_VERSION}"
81,7 → 80,7
echo ">>> Compiling and installing binutils"
cd "${BINUTILSDIR}"
check_error $? "Change directory failed."
./configure "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
./configure "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
check_error $? "Error configuring binutils."
make all install
check_error $? "Error compiling/installing binutils."
89,7 → 88,7
echo ">>> Compiling and installing GCC"
cd "${OBJDIR}"
check_error $? "Change directory failed."
"${GCCDIR}/configure" "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
"${GCCDIR}/configure" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
check_error $? "Error configuring GCC."
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc
check_error $? "Error compiling/installing GCC."
/branches/rcu/contrib/toolchain/toolchain.ppc64.sh
16,7 → 16,7
}
 
BINUTILS_VERSION="2.17"
GCC_VERSION="4.1.1"
GCC_VERSION="4.1.2"
 
BINUTILS="binutils-${BINUTILS_VERSION}.tar.gz"
GCC_CORE="gcc-core-${GCC_VERSION}.tar.bz2"
29,7 → 29,6
PLATFORM="ppc64"
WORKDIR=`pwd`
TARGET="${PLATFORM}-linux-gnu"
HOST="i686-pc-linux-gnu"
PREFIX="/usr/local/${PLATFORM}"
BINUTILSDIR="${WORKDIR}/binutils-${BINUTILS_VERSION}"
GCCDIR="${WORKDIR}/gcc-${GCC_VERSION}"
81,7 → 80,7
echo ">>> Compiling and installing binutils"
cd "${BINUTILSDIR}"
check_error $? "Change directory failed."
./configure "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
./configure "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
check_error $? "Error configuring binutils."
make all install
check_error $? "Error compiling/installing binutils."
89,7 → 88,7
echo ">>> Compiling and installing GCC"
cd "${OBJDIR}"
check_error $? "Change directory failed."
"${GCCDIR}/configure" "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
"${GCCDIR}/configure" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
check_error $? "Error configuring GCC."
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc
check_error $? "Error compiling/installing GCC."
/branches/rcu/contrib/toolchain/toolchain.ia32.sh
16,7 → 16,7
}
 
BINUTILS_VERSION="2.17"
GCC_VERSION="4.1.1"
GCC_VERSION="4.1.2"
 
BINUTILS="binutils-${BINUTILS_VERSION}.tar.gz"
GCC_CORE="gcc-core-${GCC_VERSION}.tar.bz2"
29,7 → 29,6
PLATFORM="i686"
WORKDIR=`pwd`
TARGET="${PLATFORM}-pc-linux-gnu"
HOST="i686-pc-linux-gnu"
PREFIX="/usr/local/${PLATFORM}"
BINUTILSDIR="${WORKDIR}/binutils-${BINUTILS_VERSION}"
GCCDIR="${WORKDIR}/gcc-${GCC_VERSION}"
81,7 → 80,7
echo ">>> Compiling and installing binutils"
cd "${BINUTILSDIR}"
check_error $? "Change directory failed."
./configure "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
./configure "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
check_error $? "Error configuring binutils."
make all install
check_error $? "Error compiling/installing binutils."
89,9 → 88,9
echo ">>> Compiling and installing GCC"
cd "${OBJDIR}"
check_error $? "Change directory failed."
"${GCCDIR}/configure" "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
"${GCCDIR}/configure" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
check_error $? "Error configuring GCC."
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc all-target-libobjc install-target-libobjc
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc
check_error $? "Error compiling/installing GCC."
 
echo
/branches/rcu/contrib/toolchain/toolchain.mipsel32.sh
16,7 → 16,7
}
 
BINUTILS_VERSION="2.17"
GCC_VERSION="4.1.1"
GCC_VERSION="4.1.2"
 
BINUTILS="binutils-${BINUTILS_VERSION}.tar.gz"
GCC_CORE="gcc-core-${GCC_VERSION}.tar.bz2"
29,7 → 29,6
PLATFORM="mipsel"
WORKDIR=`pwd`
TARGET="${PLATFORM}-linux-gnu"
HOST="i686-pc-linux-gnu"
PREFIX="/usr/local/${PLATFORM}"
BINUTILSDIR="${WORKDIR}/binutils-${BINUTILS_VERSION}"
GCCDIR="${WORKDIR}/gcc-${GCC_VERSION}"
81,7 → 80,7
echo ">>> Compiling and installing binutils"
cd "${BINUTILSDIR}"
check_error $? "Change directory failed."
./configure "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
./configure "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
check_error $? "Error configuring binutils."
make all install
check_error $? "Error compiling/installing binutils."
89,7 → 88,7
echo ">>> Compiling and installing GCC"
cd "${OBJDIR}"
check_error $? "Change directory failed."
"${GCCDIR}/configure" "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
"${GCCDIR}/configure" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
check_error $? "Error configuring GCC."
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc
check_error $? "Error compiling/installing GCC."
/branches/rcu/contrib/toolchain/toolchain.sparc64.sh
16,7 → 16,7
}
 
BINUTILS_VERSION="2.17"
GCC_VERSION="4.1.1"
GCC_VERSION="4.1.2"
 
BINUTILS="binutils-${BINUTILS_VERSION}.tar.gz"
GCC_CORE="gcc-core-${GCC_VERSION}.tar.bz2"
29,7 → 29,6
PLATFORM="sparc64"
WORKDIR=`pwd`
TARGET="${PLATFORM}-linux-gnu"
HOST="i686-pc-linux-gnu"
PREFIX="/usr/local/${PLATFORM}"
BINUTILSDIR="${WORKDIR}/binutils-${BINUTILS_VERSION}"
GCCDIR="${WORKDIR}/gcc-${GCC_VERSION}"
81,7 → 80,7
echo ">>> Compiling and installing binutils"
cd "${BINUTILSDIR}"
check_error $? "Change directory failed."
./configure "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
./configure "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
check_error $? "Error configuring binutils."
make all install
check_error $? "Error compiling/installing binutils."
89,9 → 88,9
echo ">>> Compiling and installing GCC"
cd "${OBJDIR}"
check_error $? "Change directory failed."
"${GCCDIR}/configure" "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
"${GCCDIR}/configure" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
check_error $? "Error configuring GCC."
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc all-target-libobjc install-target-libobjc
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc
check_error $? "Error compiling/installing GCC."
 
echo
/branches/rcu/contrib/toolchain/toolchain.ia64.sh
16,7 → 16,7
}
 
BINUTILS_VERSION="2.17"
GCC_VERSION="4.1.1"
GCC_VERSION="4.1.2"
 
INCLUDES="ia64-pc-gnu-linux_includes.tar.bz2"
BINUTILS="binutils-${BINUTILS_VERSION}.tar.gz"
31,7 → 31,6
PLATFORM="ia64"
WORKDIR=`pwd`
TARGET="${PLATFORM}-pc-linux-gnu"
HOST="i686-pc-linux-gnu"
PREFIX="/usr/local/${PLATFORM}"
INCLUDESDIR="${WORKDIR}/include"
BINUTILSDIR="${WORKDIR}/binutils-${BINUTILS_VERSION}"
90,7 → 89,7
echo ">>> Compiling and installing binutils"
cd "${BINUTILSDIR}"
check_error $? "Change directory failed."
./configure "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
./configure "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
check_error $? "Error configuring binutils."
make all install
check_error $? "Error compiling/installing binutils."
98,7 → 97,7
echo ">>> Compiling and installing GCC"
cd "${OBJDIR}"
check_error $? "Change directory failed."
"${GCCDIR}/configure" "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj "--with-headers=${INCLUDESDIR}" --disable-shared
"${GCCDIR}/configure" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj "--with-headers=${INCLUDESDIR}" --disable-shared
check_error $? "Error configuring GCC."
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc
check_error $? "Error compiling/installing GCC."
/branches/rcu/contrib/toolchain/toolchain.arm32.sh
16,7 → 16,7
}
 
BINUTILS_VERSION="2.17"
GCC_VERSION="4.1.1"
GCC_VERSION="4.1.2"
 
BINUTILS="binutils-${BINUTILS_VERSION}.tar.gz"
GCC_CORE="gcc-core-${GCC_VERSION}.tar.bz2"
29,7 → 29,6
PLATFORM="arm"
WORKDIR=`pwd`
TARGET="${PLATFORM}-linux-gnu"
HOST="i86pc-linux-gnu"
PREFIX="/usr/local/${PLATFORM}"
BINUTILSDIR="${WORKDIR}/binutils-${BINUTILS_VERSION}"
GCCDIR="${WORKDIR}/gcc-${GCC_VERSION}"
81,7 → 80,7
echo ">>> Compiling and installing binutils"
cd "${BINUTILSDIR}"
check_error $? "Change directory failed."
./configure "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
./configure "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
check_error $? "Error configuring binutils."
make all install
check_error $? "Error compiling/installing binutils."
89,7 → 88,7
echo ">>> Compiling and installing GCC"
cd "${OBJDIR}"
check_error $? "Change directory failed."
"${GCCDIR}/configure" "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
"${GCCDIR}/configure" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
check_error $? "Error configuring GCC."
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc
check_error $? "Error compiling/installing GCC."
/branches/rcu/contrib/toolchain/toolchain.mipseb32.sh
16,7 → 16,7
}
 
BINUTILS_VERSION="2.16.1"
GCC_VERSION="4.1.1"
GCC_VERSION="4.1.2"
 
BINUTILS="binutils-${BINUTILS_VERSION}.tar.gz"
GCC_CORE="gcc-core-${GCC_VERSION}.tar.bz2"
29,7 → 29,6
PLATFORM="mips"
WORKDIR=`pwd`
TARGET="${PLATFORM}-sgi-irix5"
HOST="i686-pc-linux-gnu"
PREFIX="/usr/local/${PLATFORM}"
BINUTILSDIR="${WORKDIR}/binutils-${BINUTILS_VERSION}"
GCCDIR="${WORKDIR}/gcc-${GCC_VERSION}"
81,7 → 80,7
echo ">>> Compiling and installing binutils"
cd "${BINUTILSDIR}"
check_error $? "Change directory failed."
./configure "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
./configure "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
check_error $? "Error configuring binutils."
make all install
check_error $? "Error compiling/installing binutils."
89,7 → 88,7
echo ">>> Compiling and installing GCC"
cd "${OBJDIR}"
check_error $? "Change directory failed."
"${GCCDIR}/configure" "--host=${HOST}" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
"${GCCDIR}/configure" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,objc,c++,obj-c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
check_error $? "Error configuring GCC."
PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc
check_error $? "Error compiling/installing GCC."
/branches/rcu/kernel/test/test.c
63,3 → 63,4
 
/** @}
*/
 
/branches/rcu/kernel/test/test.h
77,3 → 77,4
 
/** @}
*/
 
/branches/rcu/kernel/test/tasklet/tasklet1.c
52,9 → 52,9
tasklet_descriptor_t *tasklet_desc;
//before we start we need to register a tasklet
if (!gquiet)
printf("Registering tasklet...");
printf("cpu:%d, Registering tasklet...", CPU->id);
if (!gquiet)
tasklet_desc=tasklet_register(&func, "\nTasklet called and received data from second thread\n");
tasklet_desc=tasklet_register(&func, "\nTasklet called and received data\n");
else
tasklet_desc=tasklet_register(&func, "");
if (!gquiet)
62,7 → 62,7
 
//first we'll try disabling the tasklet
if (!gquiet)
printf("Disabling tasklet...");
printf("cpu:%d, Disabling tasklet...", CPU->id);
tasklet_disable(tasklet_desc);
if (!gquiet)
printf("Done!\n");
69,7 → 69,7
 
//we'll schedule the disabled tasklet
if (!gquiet)
printf("Scheduling tasklet...");
printf("cpu:%d, Scheduling tasklet...", CPU->id);
tasklet_schedule(tasklet_desc);
if (!gquiet)
printf("Done!\n");
76,14 → 76,14
 
//and we'll wait if it gets called. It shouldn't however, because it's disabled
if (!gquiet)
printf("Waiting 1s...\n");
printf("cpu:%d, Waiting 1s...\n", CPU->id);
waitq_sleep_timeout(&wq,(uint32_t) 1000000,0);
if (!gquiet)
printf("Done!\n");
printf("cpu:%d, Done!\n", CPU->id);
 
//then we'll try to enable it
if (!gquiet)
printf("Enabling tasklet...");
printf("cpu:%d, Enabling tasklet...", CPU->id);
tasklet_enable(tasklet_desc);
if (!gquiet)
printf("Done!\n");
90,14 → 90,14
//and wait if it gets called this time. It should because it's enabled
if (!gquiet)
printf("Waiting 1s...\n");
printf("cpu:%d, Waiting 1s...\n", CPU->id);
waitq_sleep_timeout(&wq,(uint32_t) 1000000,0);
if (!gquiet)
printf("Done!\n");
printf("cpu:%d, Done!\n", CPU->id);
 
//finally we'll free the tasklet structure
if (!gquiet)
printf("Freeing...");
printf("cpu:%d, Freeing...", CPU->id);
tasklet_free(tasklet_desc);
if (!gquiet)
printf("Done!\n");
109,8 → 109,11
waitq_t wq;
waitq_initialize(&wq);
tasklet_descriptor_t *tasklet_desc;
thread_t* second_thread;
thread_t* second_thread = NULL;
#ifdef CONFIG_SMP
if (!quiet)
printf("cpus:%d\n", config.cpu_active);
 
if (config.cpu_active >1) {
second_thread = thread_create(&running_tasklet, NULL, TASK, THREAD_FLAG_WIRED,"running tasklet", false);
if (CPU->id == 0)
123,7 → 126,7
 
//before we start we need to register a tasklet
if (!quiet)
printf("Registering tasklet...");
printf("cpu:%d, Registering tasklet...", CPU->id);
if (!quiet)
tasklet_desc=tasklet_register(&func, "\nTasklet called and received data\n");
else
133,7 → 136,7
 
//first we'll try disabling the tasklet
if (!quiet)
printf("Disabling tasklet...");
printf("cpu:%d, Disabling tasklet...", CPU->id);
tasklet_disable(tasklet_desc);
if (!quiet)
printf("Done!\n");
140,7 → 143,7
 
//we'll schedule the disabled tasklet
if (!quiet)
printf("Scheduling tasklet...");
printf("cpu:%d, Scheduling tasklet...", CPU->id);
tasklet_schedule(tasklet_desc);
if (!quiet)
printf("Done!\n");
147,14 → 150,15
 
//and we'll wait if it gets called. It shouldn't however, because it's disabled
if (!quiet)
printf("Waiting 1s...\n");
printf("cpu:%d, Waiting 1s...\n", CPU->id);
waitq_sleep_timeout(&wq,(uint32_t) 1000000,0);
if (!quiet)
printf("Done!\n");
printf("cpu:%d, Done!\n", CPU->id);
 
 
//then we'll try to enable it
if (!quiet)
printf("Enabling tasklet...");
printf("cpu:%d, Enabling tasklet...", CPU->id);
tasklet_enable(tasklet_desc);
if (!quiet)
printf("Done!\n");
161,14 → 165,21
//and wait if it gets called this time. It should because it's enabled
if (!quiet)
printf("Waiting 1s...\n");
printf("cpu:%d, Waiting 1s...\n", CPU->id);
waitq_sleep_timeout(&wq,(uint32_t) 1000000,0);
if (!quiet)
printf("Done!\n");
printf("cpu:%d, Done!\n", CPU->id);
#ifdef CONFIG_SMP
if (config.cpu_active >1) {
printf("Joining with the second thread...");
thread_join(second_thread);
printf("Done\n");
}
 
#endif
//finally we'll free the tasklet structure
if (!quiet)
printf("Freeing...");
printf("cpu:%d, Freeing...", CPU->id);
tasklet_free(tasklet_desc);
if (!quiet)
printf("Done!\n");
/branches/rcu/kernel/genarch/src/fb/fb.c
511,8 → 511,6
sysinfo_set_item_val("fb.scanline", NULL, scan);
sysinfo_set_item_val("fb.visual", NULL, visual);
sysinfo_set_item_val("fb.address.physical", NULL, addr);
sysinfo_set_item_val("fb.address.color", NULL,
PAGE_COLOR((uintptr_t) fbaddress));
sysinfo_set_item_val("fb.invert-colors", NULL, invert_colors);
 
/* Allocate double buffer */
/branches/rcu/kernel/genarch/src/mm/asid.c
62,15 → 62,9
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <arch.h>
#include <adt/list.h>
#include <debug.h>
 
/**
* asidlock protects the asids_allocated counter.
*/
SPINLOCK_INITIALIZE(asidlock);
 
static count_t asids_allocated = 0;
 
/** Allocate free address space identifier.
90,7 → 84,6
* Check if there is an unallocated ASID.
*/
spinlock_lock(&asidlock);
if (asids_allocated == ASIDS_ALLOCABLE) {
 
/*
108,7 → 101,6
list_remove(tmp);
as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
mutex_lock_active(&as->lock);
 
/*
* Steal the ASID.
130,8 → 122,6
*/
as_invalidate_translation_cache(as, 0, (count_t) -1);
mutex_unlock(&as->lock);
 
/*
* Get the system rid of the stolen ASID.
*/
156,8 → 146,6
tlb_shootdown_finalize();
}
spinlock_unlock(&asidlock);
return asid;
}
 
170,16 → 158,8
*/
void asid_put(asid_t asid)
{
ipl_t ipl;
 
ipl = interrupts_disable();
spinlock_lock(&asidlock);
 
asids_allocated--;
asid_put_arch(asid);
spinlock_unlock(&asidlock);
interrupts_restore(ipl);
}
 
/** @}
/branches/rcu/kernel/genarch/src/mm/page_ht.c
55,7 → 55,8
static bool compare(unative_t key[], count_t keys, link_t *item);
static void remove_callback(link_t *item);
 
static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags);
static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
int flags);
static void ht_mapping_remove(as_t *as, uintptr_t page);
static pte_t *ht_mapping_find(as_t *as, uintptr_t page);
 
103,7 → 104,7
* of occurring. Least significant bits of VPN compose the
* hash index.
*/
index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES-1));
index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1));
/*
* Address space structures are likely to be allocated from
110,7 → 111,7
* similar addresses. Least significant bits compose the
* hash index.
*/
index |= ((unative_t) as) & (PAGE_HT_ENTRIES-1);
index |= ((unative_t) as) & (PAGE_HT_ENTRIES - 1);
return index;
}
136,7 → 137,8
t = hash_table_get_instance(item, pte_t, link);
 
if (keys == PAGE_HT_KEYS) {
return (key[KEY_AS] == (uintptr_t) t->as) && (key[KEY_PAGE] == t->page);
return (key[KEY_AS] == (uintptr_t) t->as) &&
(key[KEY_PAGE] == t->page);
} else {
return (key[KEY_AS] == (uintptr_t) t->as);
}
175,7 → 177,10
void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
{
pte_t *t;
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = {
(uintptr_t) as,
page = ALIGN_DOWN(page, PAGE_SIZE)
};
if (!hash_table_find(&page_ht, key)) {
t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);
209,7 → 214,10
*/
void ht_mapping_remove(as_t *as, uintptr_t page)
{
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = {
(uintptr_t) as,
page = ALIGN_DOWN(page, PAGE_SIZE)
};
/*
* Note that removed PTE's will be freed
234,7 → 242,10
{
link_t *hlp;
pte_t *t = NULL;
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = {
(uintptr_t) as,
page = ALIGN_DOWN(page, PAGE_SIZE)
};
hlp = hash_table_find(&page_ht, key);
if (hlp)
/branches/rcu/kernel/generic/include/proc/thread.h
53,10 → 53,7
 
/* Thread flags */
 
/** Thread cannot be migrated to another CPU.
* When using this flag, the caller must set cpu in the thread_t
* structure manually before calling thread_ready (even on uniprocessor)
*/
/** Thread cannot be migrated to another CPU. */
#define THREAD_FLAG_WIRED (1 << 0)
/** Thread was migrated to another CPU and has not run yet. */
#define THREAD_FLAG_STOLEN (1 << 1)
/branches/rcu/kernel/generic/include/ddi/irq.h
1,162 → 1,154
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericddi
* @{
*/
/** @file
*/
 
#ifndef KERN_IRQ_H_
#define KERN_IRQ_H_
 
typedef enum {
CMD_MEM_READ_1 = 0,
CMD_MEM_READ_2,
CMD_MEM_READ_4,
CMD_MEM_READ_8,
CMD_MEM_WRITE_1,
CMD_MEM_WRITE_2,
CMD_MEM_WRITE_4,
CMD_MEM_WRITE_8,
CMD_PORT_READ_1,
CMD_PORT_WRITE_1,
CMD_IA64_GETCHAR,
CMD_PPC32_GETCHAR,
CMD_LAST
} irq_cmd_type;
 
typedef struct {
irq_cmd_type cmd;
void *addr;
unsigned long long value;
int dstarg;
} irq_cmd_t;
 
typedef struct {
unsigned int cmdcount;
irq_cmd_t *cmds;
} irq_code_t;
 
#ifdef KERNEL
 
#include <arch/types.h>
#include <adt/list.h>
#include <synch/spinlock.h>
#include <proc/task.h>
 
typedef enum {
IRQ_DECLINE, /**< Decline to service. */
IRQ_ACCEPT /**< Accept to service. */
} irq_ownership_t;
 
typedef enum {
IRQ_TRIGGER_LEVEL = 1,
IRQ_TRIGGER_EDGE
} irq_trigger_t;
 
struct irq;
typedef void (* irq_handler_t)(struct irq *irq, void *arg, ...);
 
/** IPC notification config structure.
*
* Primarily, this structure is encapsulated in the irq_t structure.
* It is protected by irq_t::lock.
*/
typedef struct {
/** When false, notifications are not sent. */
bool notify;
/** Answerbox for notifications. */
answerbox_t *answerbox;
/** Method to be used for the notification. */
unative_t method;
/** Top-half pseudocode. */
irq_code_t *code;
/** Counter. */
count_t counter;
/**
* Link between IRQs that are notifying the same answerbox. The list is
* protected by the answerbox irq_lock.
*/
link_t link;
} ipc_notif_cfg_t;
 
/** Structure representing one device IRQ.
*
* If one device has multiple interrupts, there will be multiple irq_t
* instantions with the same devno.
*/
typedef struct irq {
/** Hash table link. */
link_t link;
 
/** Lock protecting everything in this structure
* except the link member. When both the IRQ
* hash table lock and this lock are to be acquired,
* this lock must not be taken first.
*/
SPINLOCK_DECLARE(lock);
/** Send EOI before processing the interrupt.
* This is essential for timer interrupt which
* has to be acknowledged before doing preemption
* to make sure another timer interrupt will
* be eventually generated.
*/
bool preack;
 
/** Unique device number. -1 if not yet assigned. */
devno_t devno;
 
/** Actual IRQ number. -1 if not yet assigned. */
inr_t inr;
/** Trigger level of the IRQ. */
irq_trigger_t trigger;
/** Claim ownership of the IRQ. */
irq_ownership_t (* claim)(void);
/** Handler for this IRQ and device. */
irq_handler_t handler;
/** Argument for the handler. */
void *arg;
 
/** Notification configuration structure. */
ipc_notif_cfg_t notif_cfg;
} irq_t;
 
extern void irq_init(count_t inrs, count_t chains);
extern void irq_initialize(irq_t *irq);
extern void irq_register(irq_t *irq);
extern irq_t *irq_dispatch_and_lock(inr_t inr);
extern irq_t *irq_find_and_lock(inr_t inr, devno_t devno);
 
#endif
 
#endif
 
/** @}
*/
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericddi
* @{
*/
/** @file
*/
 
#ifndef KERN_IRQ_H_
#define KERN_IRQ_H_
 
typedef enum {
CMD_MEM_READ_1 = 0,
CMD_MEM_READ_2,
CMD_MEM_READ_4,
CMD_MEM_READ_8,
CMD_MEM_WRITE_1,
CMD_MEM_WRITE_2,
CMD_MEM_WRITE_4,
CMD_MEM_WRITE_8,
CMD_PORT_READ_1,
CMD_PORT_WRITE_1,
CMD_IA64_GETCHAR,
CMD_PPC32_GETCHAR,
CMD_LAST
} irq_cmd_type;
 
typedef struct {
irq_cmd_type cmd;
void *addr;
unsigned long long value;
int dstarg;
} irq_cmd_t;
 
typedef struct {
unsigned int cmdcount;
irq_cmd_t *cmds;
} irq_code_t;
 
#ifdef KERNEL
 
#include <arch/types.h>
#include <adt/list.h>
#include <synch/spinlock.h>
#include <proc/task.h>
 
typedef enum {
IRQ_DECLINE, /**< Decline to service. */
IRQ_ACCEPT /**< Accept to service. */
} irq_ownership_t;
 
typedef enum {
IRQ_TRIGGER_LEVEL = 1,
IRQ_TRIGGER_EDGE
} irq_trigger_t;
 
struct irq;
typedef void (* irq_handler_t)(struct irq *irq, void *arg, ...);
 
/** IPC notification config structure.
*
* Primarily, this structure is encapsulated in the irq_t structure.
* It is protected by irq_t::lock.
*/
typedef struct {
/** When false, notifications are not sent. */
bool notify;
/** Answerbox for notifications. */
answerbox_t *answerbox;
/** Method to be used for the notification. */
unative_t method;
/** Top-half pseudocode. */
irq_code_t *code;
/** Counter. */
count_t counter;
/**
* Link between IRQs that are notifying the same answerbox. The list is
* protected by the answerbox irq_lock.
*/
link_t link;
} ipc_notif_cfg_t;
 
/** Structure representing one device IRQ.
*
* If one device has multiple interrupts, there will be multiple irq_t
* instantions with the same devno.
*/
typedef struct irq {
/** Hash table link. */
link_t link;
 
/** Lock protecting everything in this structure
* except the link member. When both the IRQ
* hash table lock and this lock are to be acquired,
* this lock must not be taken first.
*/
SPINLOCK_DECLARE(lock);
 
/** Unique device number. -1 if not yet assigned. */
devno_t devno;
 
/** Actual IRQ number. -1 if not yet assigned. */
inr_t inr;
/** Trigger level of the IRQ.*/
irq_trigger_t trigger;
/** Claim ownership of the IRQ. */
irq_ownership_t (* claim)(void);
/** Handler for this IRQ and device. */
irq_handler_t handler;
/** Argument for the handler. */
void *arg;
 
/** Notification configuration structure. */
ipc_notif_cfg_t notif_cfg;
} irq_t;
 
extern void irq_init(count_t inrs, count_t chains);
extern void irq_initialize(irq_t *irq);
extern void irq_register(irq_t *irq);
extern irq_t *irq_dispatch_and_lock(inr_t inr);
extern irq_t *irq_find_and_lock(inr_t inr, devno_t devno);
 
#endif
 
#endif
 
/** @}
*/
/branches/rcu/kernel/generic/include/mm/as.h
89,6 → 89,17
@public
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
/**
* Number of processors on wich is this address space active.
* Protected by asidlock.
*/
count_t cpu_refcount;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
* Protected by asidlock.
*/
asid_t asid;
mutex_t lock;
95,18 → 106,9
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
/** Number of processors on wich is this address space active. */
count_t cpu_refcount;
/** B+tree of address space areas. */
btree_t as_area_btree;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
*/
asid_t asid;
/** Non-generic content. */
as_genarch_t genarch;
133,6 → 135,17
typedef struct as {
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
/**
* Number of processors on wich is this address space active.
* Protected by asidlock.
*/
count_t cpu_refcount;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
* Protected by asidlock.
*/
asid_t asid;
 
mutex_t lock;
 
139,18 → 152,9
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
 
/** Number of processors on wich is this address space active. */
count_t cpu_refcount;
 
/** B+tree of address space areas. */
btree_t as_area_btree;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
*/
asid_t asid;
/** Non-generic content. */
as_genarch_t genarch;
 
205,7 → 209,6
/** Address space area structure.
*
* Each as_area_t structure describes one contiguous area of virtual memory.
* In the future, it should not be difficult to support shared areas.
*/
typedef struct {
mutex_t lock;
250,7 → 253,6
extern as_operations_t *as_operations;
#endif
 
SPINLOCK_EXTERN(inactive_as_with_asid_lock);
extern link_t inactive_as_with_asid_head;
 
extern void as_init(void);
/branches/rcu/kernel/generic/src/main/main.c
238,7 → 238,7
printf("config.memory_size=%zdM\n", config.memory_size >> 20);
printf("config.cpu_count=%zd\n", config.cpu_count);
cpu_init();
 
calibrate_delay_loop();
clock_counter_init();
timeout_init();
246,8 → 246,6
task_init();
thread_init();
futex_init();
 
 
klog_init();
if (init.cnt > 0) {
257,6 → 255,7
init.tasks[i].size);
} else
printf("No init binaries found\n");
ipc_init();
 
/*
273,10 → 272,9
if (!t)
panic("can't create kinit thread\n");
thread_ready(t);
tasklet_run_tasklet_thread(k);
 
//tasklets disabled for debugging purposes
tasklet_run_tasklet_thread(k);
/*
* This call to scheduler() will return to kinit,
* starting the thread of kernel threads.
/branches/rcu/kernel/generic/src/time/clock.c
104,8 → 104,6
* physmem_map() the clock_parea.
*/
sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
sysinfo_set_item_val("clock.fcolor", NULL, (unative_t)
PAGE_COLOR(clock_parea.vbase));
sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
}
 
/branches/rcu/kernel/generic/src/ddi/ddi.c
99,8 → 99,7
* @return 0 on success, EPERM if the caller lacks capabilities to use this
* syscall, ENOENT if there is no task matching the specified ID or the
* physical address space is not enabled for mapping and ENOMEM if there
* was a problem in creating address space area. ENOTSUP is returned when
* an attempt to create an illegal address alias is detected.
* was a problem in creating address space area.
*/
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
{
139,18 → 138,6
interrupts_restore(ipl);
return ENOENT;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) {
/*
* Refuse to create an illegal address alias.
*/
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
return ENOTSUP;
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
spinlock_unlock(&parea_lock);
 
spinlock_lock(&TASK->lock);
/branches/rcu/kernel/generic/src/ddi/irq.c
1,380 → 1,379
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericddi
* @{
*/
/**
* @file
* @brief IRQ dispatcher.
*
* This file provides means of connecting IRQs with particular
* devices and logic for dispatching interrupts to IRQ handlers
* defined by those devices.
*
* This code is designed to support:
* - multiple devices sharing single IRQ
* - multiple IRQs per signle device
*
*
* Note about architectures.
*
* Some architectures has the term IRQ well defined. Examples
* of such architectures include amd64, ia32 and mips32. Some
* other architectures, such as sparc64, don't use the term
* at all. In those cases, we boldly step forward and define what
* an IRQ is.
*
* The implementation is generic enough and still allows the
* architectures to use the hardware layout effectively.
* For instance, on amd64 and ia32, where there is only 16
* IRQs, the irq_hash_table can be optimized to a one-dimensional
* array. Next, when it is known that the IRQ numbers (aka INR's)
* are unique, the claim functions can always return IRQ_ACCEPT.
*
*
* Note about the irq_hash_table.
*
* The hash table is configured to use two keys: inr and devno.
* However, the hash index is computed only from inr. Moreover,
* if devno is -1, the match is based on the return value of
* the claim() function instead of on devno.
*/
 
#include <ddi/irq.h>
#include <adt/hash_table.h>
#include <arch/types.h>
#include <synch/spinlock.h>
#include <arch.h>
 
#define KEY_INR 0
#define KEY_DEVNO 1
 
/**
* Spinlock protecting the hash table.
* This lock must be taken only when interrupts are disabled.
*/
SPINLOCK_INITIALIZE(irq_hash_table_lock);
static hash_table_t irq_hash_table;
 
/**
* Hash table operations for cases when we know that
* there will be collisions between different keys.
*/
static index_t irq_ht_hash(unative_t *key);
static bool irq_ht_compare(unative_t *key, count_t keys, link_t *item);
 
static hash_table_operations_t irq_ht_ops = {
.hash = irq_ht_hash,
.compare = irq_ht_compare,
.remove_callback = NULL /* not used */
};
 
/**
* Hash table operations for cases when we know that
* there will be no collisions between different keys.
* However, there might be still collisions among
* elements with single key (sharing of one IRQ).
*/
static index_t irq_lin_hash(unative_t *key);
static bool irq_lin_compare(unative_t *key, count_t keys, link_t *item);
 
static hash_table_operations_t irq_lin_ops = {
.hash = irq_lin_hash,
.compare = irq_lin_compare,
.remove_callback = NULL /* not used */
};
 
/** Initialize IRQ subsystem.
*
* @param inrs Numbers of unique IRQ numbers or INRs.
* @param chains Number of chains in the hash table.
*/
void irq_init(count_t inrs, count_t chains)
{
/*
* Be smart about the choice of the hash table operations.
* In cases in which inrs equals the requested number of
* chains (i.e. where there is no collision between
* different keys), we can use optimized set of operations.
*/
if (inrs == chains)
hash_table_create(&irq_hash_table, chains, 2, &irq_lin_ops);
else
hash_table_create(&irq_hash_table, chains, 2, &irq_ht_ops);
}
 
/** Initialize one IRQ structure.
*
* @param irq Pointer to the IRQ structure to be initialized.
*
*/
void irq_initialize(irq_t *irq)
{
link_initialize(&irq->link);
spinlock_initialize(&irq->lock, "irq.lock");
irq->preack = false;
irq->inr = -1;
irq->devno = -1;
irq->trigger = (irq_trigger_t) 0;
irq->claim = NULL;
irq->handler = NULL;
irq->arg = NULL;
irq->notif_cfg.notify = false;
irq->notif_cfg.answerbox = NULL;
irq->notif_cfg.code = NULL;
irq->notif_cfg.method = 0;
irq->notif_cfg.counter = 0;
link_initialize(&irq->notif_cfg.link);
}
 
/** Register IRQ for device.
*
* The irq structure must be filled with information
* about the interrupt source and with the claim()
* function pointer and irq_handler() function pointer.
*
* @param irq IRQ structure belonging to a device.
*/
void irq_register(irq_t *irq)
{
ipl_t ipl;
unative_t key[] = {
(unative_t) irq->inr,
(unative_t) irq->devno
};
ipl = interrupts_disable();
spinlock_lock(&irq_hash_table_lock);
hash_table_insert(&irq_hash_table, key, &irq->link);
spinlock_unlock(&irq_hash_table_lock);
interrupts_restore(ipl);
}
 
/** Dispatch the IRQ.
*
* We assume this function is only called from interrupt
* context (i.e. that interrupts are disabled prior to
* this call).
*
* This function attempts to lookup a fitting IRQ
* structure. In case of success, return with interrupts
* disabled and holding the respective structure.
*
* @param inr Interrupt number (aka inr or irq).
*
* @return IRQ structure of the respective device or NULL.
*/
irq_t *irq_dispatch_and_lock(inr_t inr)
{
link_t *lnk;
unative_t key[] = {
(unative_t) inr,
(unative_t) -1 /* search will use claim() instead of devno */
};
spinlock_lock(&irq_hash_table_lock);
 
lnk = hash_table_find(&irq_hash_table, key);
if (lnk) {
irq_t *irq;
irq = hash_table_get_instance(lnk, irq_t, link);
 
spinlock_unlock(&irq_hash_table_lock);
return irq;
}
spinlock_unlock(&irq_hash_table_lock);
 
return NULL;
}
 
/** Find the IRQ structure corresponding to inr and devno.
*
* This functions attempts to lookup the IRQ structure
* corresponding to its arguments. On success, this
* function returns with interrups disabled, holding
* the lock of the respective IRQ structure.
*
* This function assumes interrupts are already disabled.
*
* @param inr INR being looked up.
* @param devno Devno being looked up.
*
* @return Locked IRQ structure on success or NULL on failure.
*/
irq_t *irq_find_and_lock(inr_t inr, devno_t devno)
{
link_t *lnk;
unative_t keys[] = {
(unative_t) inr,
(unative_t) devno
};
spinlock_lock(&irq_hash_table_lock);
 
lnk = hash_table_find(&irq_hash_table, keys);
if (lnk) {
irq_t *irq;
irq = hash_table_get_instance(lnk, irq_t, link);
 
spinlock_unlock(&irq_hash_table_lock);
return irq;
}
spinlock_unlock(&irq_hash_table_lock);
 
return NULL;
}
 
/** Compute hash index for the key.
*
* This function computes hash index into
* the IRQ hash table for which there
* can be collisions between different
* INRs.
*
* The devno is not used to compute the hash.
*
* @param key The first of the keys is inr and the second is devno or -1.
*
* @return Index into the hash table.
*/
index_t irq_ht_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr % irq_hash_table.entries;
}
 
/** Compare hash table element with a key.
*
* There are two things to note about this function.
* First, it is used for the more complex architecture setup
* in which there are way too many interrupt numbers (i.e. inr's)
* to arrange the hash table so that collisions occur only
* among same inrs of different devnos. So the explicit check
* for inr match must be done.
* Second, if devno is -1, the second key (i.e. devno) is not
* used for the match and the result of the claim() function
* is used instead.
*
* This function assumes interrupts are already disabled.
*
* @param key Keys (i.e. inr and devno).
* @param keys This is 2.
* @param item The item to compare the key with.
*
* @return True on match or false otherwise.
*/
bool irq_ht_compare(unative_t key[], count_t keys, link_t *item)
{
irq_t *irq = hash_table_get_instance(item, irq_t, link);
inr_t inr = (inr_t) key[KEY_INR];
devno_t devno = (devno_t) key[KEY_DEVNO];
 
bool rv;
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock(). */
rv = ((irq->inr == inr) && (irq->claim() == IRQ_ACCEPT));
} else {
/* Invoked by irq_find_and_lock(). */
rv = ((irq->inr == inr) && (irq->devno == devno));
}
/* unlock only on non-match */
if (!rv)
spinlock_unlock(&irq->lock);
 
return rv;
}
 
/** Compute hash index for the key.
*
* This function computes hash index into
* the IRQ hash table for which there
* are no collisions between different
* INRs.
*
* @param key The first of the keys is inr and the second is devno or -1.
*
* @return Index into the hash table.
*/
index_t irq_lin_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr;
}
 
/** Compare hash table element with a key.
*
* There are two things to note about this function.
* First, it is used for the less complex architecture setup
* in which there are not too many interrupt numbers (i.e. inr's)
* to arrange the hash table so that collisions occur only
* among same inrs of different devnos. So the explicit check
* for inr match is not done.
* Second, if devno is -1, the second key (i.e. devno) is not
* used for the match and the result of the claim() function
* is used instead.
*
* This function assumes interrupts are already disabled.
*
* @param key Keys (i.e. inr and devno).
* @param keys This is 2.
* @param item The item to compare the key with.
*
* @return True on match or false otherwise.
*/
bool irq_lin_compare(unative_t key[], count_t keys, link_t *item)
{
irq_t *irq = list_get_instance(item, irq_t, link);
devno_t devno = (devno_t) key[KEY_DEVNO];
bool rv;
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock() */
rv = (irq->claim() == IRQ_ACCEPT);
} else {
/* Invoked by irq_find_and_lock() */
rv = (irq->devno == devno);
}
/* unlock only on non-match */
if (!rv)
spinlock_unlock(&irq->lock);
return rv;
}
 
/** @}
*/
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericddi
* @{
*/
/**
* @file
* @brief IRQ dispatcher.
*
* This file provides means of connecting IRQs with particular
* devices and logic for dispatching interrupts to IRQ handlers
* defined by those devices.
*
* This code is designed to support:
* - multiple devices sharing single IRQ
* - multiple IRQs per signle device
*
*
* Note about architectures.
*
* Some architectures has the term IRQ well defined. Examples
* of such architectures include amd64, ia32 and mips32. Some
* other architectures, such as sparc64, don't use the term
* at all. In those cases, we boldly step forward and define what
* an IRQ is.
*
* The implementation is generic enough and still allows the
* architectures to use the hardware layout effectively.
* For instance, on amd64 and ia32, where there is only 16
* IRQs, the irq_hash_table can be optimized to a one-dimensional
* array. Next, when it is known that the IRQ numbers (aka INR's)
* are unique, the claim functions can always return IRQ_ACCEPT.
*
*
* Note about the irq_hash_table.
*
* The hash table is configured to use two keys: inr and devno.
* However, the hash index is computed only from inr. Moreover,
* if devno is -1, the match is based on the return value of
* the claim() function instead of on devno.
*/
 
#include <ddi/irq.h>
#include <adt/hash_table.h>
#include <arch/types.h>
#include <synch/spinlock.h>
#include <arch.h>
 
#define KEY_INR 0
#define KEY_DEVNO 1
 
/**
* Spinlock protecting the hash table.
* This lock must be taken only when interrupts are disabled.
*/
SPINLOCK_INITIALIZE(irq_hash_table_lock);
static hash_table_t irq_hash_table;
 
/**
* Hash table operations for cases when we know that
* there will be collisions between different keys.
*/
static index_t irq_ht_hash(unative_t *key);
static bool irq_ht_compare(unative_t *key, count_t keys, link_t *item);
 
static hash_table_operations_t irq_ht_ops = {
.hash = irq_ht_hash,
.compare = irq_ht_compare,
.remove_callback = NULL /* not used */
};
 
/**
* Hash table operations for cases when we know that
* there will be no collisions between different keys.
* However, there might be still collisions among
* elements with single key (sharing of one IRQ).
*/
static index_t irq_lin_hash(unative_t *key);
static bool irq_lin_compare(unative_t *key, count_t keys, link_t *item);
 
static hash_table_operations_t irq_lin_ops = {
.hash = irq_lin_hash,
.compare = irq_lin_compare,
.remove_callback = NULL /* not used */
};
 
/** Initialize IRQ subsystem.
*
* @param inrs Numbers of unique IRQ numbers or INRs.
* @param chains Number of chains in the hash table.
*/
void irq_init(count_t inrs, count_t chains)
{
/*
* Be smart about the choice of the hash table operations.
* In cases in which inrs equals the requested number of
* chains (i.e. where there is no collision between
* different keys), we can use optimized set of operations.
*/
if (inrs == chains)
hash_table_create(&irq_hash_table, chains, 2, &irq_lin_ops);
else
hash_table_create(&irq_hash_table, chains, 2, &irq_ht_ops);
}
 
/** Initialize one IRQ structure.
*
* @param irq Pointer to the IRQ structure to be initialized.
*
*/
void irq_initialize(irq_t *irq)
{
link_initialize(&irq->link);
spinlock_initialize(&irq->lock, "irq.lock");
irq->inr = -1;
irq->devno = -1;
irq->trigger = (irq_trigger_t) 0;
irq->claim = NULL;
irq->handler = NULL;
irq->arg = NULL;
irq->notif_cfg.notify = false;
irq->notif_cfg.answerbox = NULL;
irq->notif_cfg.code = NULL;
irq->notif_cfg.method = 0;
irq->notif_cfg.counter = 0;
link_initialize(&irq->notif_cfg.link);
}
 
/** Register IRQ for device.
*
* The irq structure must be filled with information
* about the interrupt source and with the claim()
* function pointer and irq_handler() function pointer.
*
* @param irq IRQ structure belonging to a device.
*/
void irq_register(irq_t *irq)
{
ipl_t ipl;
unative_t key[] = {
(unative_t) irq->inr,
(unative_t) irq->devno
};
ipl = interrupts_disable();
spinlock_lock(&irq_hash_table_lock);
hash_table_insert(&irq_hash_table, key, &irq->link);
spinlock_unlock(&irq_hash_table_lock);
interrupts_restore(ipl);
}
 
/** Dispatch the IRQ.
*
* We assume this function is only called from interrupt
* context (i.e. that interrupts are disabled prior to
* this call).
*
* This function attempts to lookup a fitting IRQ
* structure. In case of success, return with interrupts
* disabled and holding the respective structure.
*
* @param inr Interrupt number (aka inr or irq).
*
* @return IRQ structure of the respective device or NULL.
*/
irq_t *irq_dispatch_and_lock(inr_t inr)
{
link_t *lnk;
unative_t key[] = {
(unative_t) inr,
(unative_t) -1 /* search will use claim() instead of devno */
};
spinlock_lock(&irq_hash_table_lock);
 
lnk = hash_table_find(&irq_hash_table, key);
if (lnk) {
irq_t *irq;
irq = hash_table_get_instance(lnk, irq_t, link);
 
spinlock_unlock(&irq_hash_table_lock);
return irq;
}
spinlock_unlock(&irq_hash_table_lock);
 
return NULL;
}
 
/** Find the IRQ structure corresponding to inr and devno.
*
* This functions attempts to lookup the IRQ structure
* corresponding to its arguments. On success, this
* function returns with interrups disabled, holding
* the lock of the respective IRQ structure.
*
* This function assumes interrupts are already disabled.
*
* @param inr INR being looked up.
* @param devno Devno being looked up.
*
* @return Locked IRQ structure on success or NULL on failure.
*/
irq_t *irq_find_and_lock(inr_t inr, devno_t devno)
{
link_t *lnk;
unative_t keys[] = {
(unative_t) inr,
(unative_t) devno
};
spinlock_lock(&irq_hash_table_lock);
 
lnk = hash_table_find(&irq_hash_table, keys);
if (lnk) {
irq_t *irq;
irq = hash_table_get_instance(lnk, irq_t, link);
 
spinlock_unlock(&irq_hash_table_lock);
return irq;
}
spinlock_unlock(&irq_hash_table_lock);
 
return NULL;
}
 
/** Compute hash index for the key.
*
* This function computes hash index into
* the IRQ hash table for which there
* can be collisions between different
* INRs.
*
* The devno is not used to compute the hash.
*
* @param key The first of the keys is inr and the second is devno or -1.
*
* @return Index into the hash table.
*/
index_t irq_ht_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr % irq_hash_table.entries;
}
 
/** Compare hash table element with a key.
*
* There are two things to note about this function.
* First, it is used for the more complex architecture setup
* in which there are way too many interrupt numbers (i.e. inr's)
* to arrange the hash table so that collisions occur only
* among same inrs of different devnos. So the explicit check
* for inr match must be done.
* Second, if devno is -1, the second key (i.e. devno) is not
* used for the match and the result of the claim() function
* is used instead.
*
* This function assumes interrupts are already disabled.
*
* @param key Keys (i.e. inr and devno).
* @param keys This is 2.
* @param item The item to compare the key with.
*
* @return True on match or false otherwise.
*/
bool irq_ht_compare(unative_t key[], count_t keys, link_t *item)
{
irq_t *irq = hash_table_get_instance(item, irq_t, link);
inr_t inr = (inr_t) key[KEY_INR];
devno_t devno = (devno_t) key[KEY_DEVNO];
 
bool rv;
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock(). */
rv = ((irq->inr == inr) && (irq->claim() == IRQ_ACCEPT));
} else {
/* Invoked by irq_find_and_lock(). */
rv = ((irq->inr == inr) && (irq->devno == devno));
}
/* unlock only on non-match */
if (!rv)
spinlock_unlock(&irq->lock);
 
return rv;
}
 
/** Compute hash index for the key.
*
* This function computes hash index into
* the IRQ hash table for which there
* are no collisions between different
* INRs.
*
* @param key The first of the keys is inr and the second is devno or -1.
*
* @return Index into the hash table.
*/
index_t irq_lin_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr;
}
 
/** Compare hash table element with a key.
*
* There are two things to note about this function.
* First, it is used for the less complex architecture setup
* in which there are not too many interrupt numbers (i.e. inr's)
* to arrange the hash table so that collisions occur only
* among same inrs of different devnos. So the explicit check
* for inr match is not done.
* Second, if devno is -1, the second key (i.e. devno) is not
* used for the match and the result of the claim() function
* is used instead.
*
* This function assumes interrupts are already disabled.
*
* @param key Keys (i.e. inr and devno).
* @param keys This is 2.
* @param item The item to compare the key with.
*
* @return True on match or false otherwise.
*/
bool irq_lin_compare(unative_t key[], count_t keys, link_t *item)
{
irq_t *irq = list_get_instance(item, irq_t, link);
devno_t devno = (devno_t) key[KEY_DEVNO];
bool rv;
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock() */
rv = (irq->claim() == IRQ_ACCEPT);
} else {
/* Invoked by irq_find_and_lock() */
rv = (irq->devno == devno);
}
/* unlock only on non-match */
if (!rv)
spinlock_unlock(&irq->lock);
return rv;
}
 
/** @}
*/
/branches/rcu/kernel/generic/src/console/klog.c
90,8 → 90,6
ddi_parea_register(&klog_parea);
 
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr);
sysinfo_set_item_val("klog.fcolor", NULL, (unative_t)
PAGE_COLOR((uintptr_t) klog));
sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER);
sysinfo_set_item_val("klog.devno", NULL, devno);
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR);
/branches/rcu/kernel/generic/src/proc/scheduler.c
61,7 → 61,6
#include <cpu.h>
#include <print.h>
#include <debug.h>
#include <proc/tasklet.h>
 
static void before_task_runs(void);
static void before_thread_runs(void);
227,11 → 226,7
* Take the first thread from the queue.
*/
t = list_get_instance(r->rq_head.next, thread_t, rq_link);
if (verbose)
printf("cpu%d removing, rq_head %x, t: %x, next: %x, link: %x \n",CPU->id, r->rq_head, t, r->rq_head.next, t->rq_link);
list_remove(&t->rq_link);
if (verbose)
printf("cpu%d removed, rq_head %x, t: %x, next: %x, link: %x \n",CPU->id, r->rq_head, t, r->rq_head.next, t->rq_link);
 
spinlock_unlock(&r->lock);
 
443,8 → 438,7
THREAD->call_me = NULL;
THREAD->call_me_with = NULL;
}
if (verbose)
printf("cpu%d, Sleeping unlocking \n", CPU->id);
 
spinlock_unlock(&THREAD->lock);
 
break;
460,17 → 454,12
 
THREAD = NULL;
}
if (verbose)
printf("cpu%d looking for next thread\n", CPU->id);
 
THREAD = find_best_thread();
if (verbose)
printf("cpu%d t locking THREAD:%x \n", CPU->id, THREAD);
spinlock_lock(&THREAD->lock);
priority = THREAD->priority;
spinlock_unlock(&THREAD->lock);
if (verbose)
printf("cpu%d t unlocked after priority THREAD:%x \n", CPU->id, THREAD);
 
relink_rq(priority);
 
/branches/rcu/kernel/generic/src/proc/thread.c
238,7 → 238,6
cpu = CPU;
if (t->flags & THREAD_FLAG_WIRED) {
ASSERT(t->cpu != NULL);
cpu = t->cpu;
}
t->state = Ready;
/branches/rcu/kernel/generic/src/lib/rd.c
90,8 → 90,6
sysinfo_set_item_val("rd.size", NULL, dsize);
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t)
KA2PA((void *) header + hsize));
sysinfo_set_item_val("rd.address.color", NULL, (unative_t)
PAGE_COLOR((uintptr_t) header + hsize));
 
return RE_OK;
}
/branches/rcu/kernel/generic/src/mm/tlb.c
78,7 → 78,8
* @param page Virtual page address, if required by type.
* @param count Number of pages, if required by type.
*/
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, uintptr_t page, count_t count)
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,
uintptr_t page, count_t count)
{
int i;
 
107,11 → 108,11
/*
* Enqueue the message.
*/
cpu->tlb_messages[cpu->tlb_messages_count].type = type;
cpu->tlb_messages[cpu->tlb_messages_count].asid = asid;
cpu->tlb_messages[cpu->tlb_messages_count].page = page;
cpu->tlb_messages[cpu->tlb_messages_count].count = count;
cpu->tlb_messages_count++;
index_t idx = cpu->tlb_messages_count++;
cpu->tlb_messages[idx].type = type;
cpu->tlb_messages[idx].asid = asid;
cpu->tlb_messages[idx].page = page;
cpu->tlb_messages[idx].count = count;
}
spinlock_unlock(&cpu->lock);
}
/branches/rcu/kernel/generic/src/mm/backend_anon.c
72,11 → 72,13
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
*/
int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
uintptr_t frame;
bool dirty = false;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
86,13 → 88,14
/*
* The area is shared, chances are that the mapping can be found
* in the pagemap of the address space area share info structure.
* in the pagemap of the address space area share info
* structure.
* In the case that the pagemap does not contain the respective
* mapping, a new frame is allocated and the mapping is created.
*/
mutex_lock(&area->sh_info->lock);
frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
if (!frame) {
bool allocate = true;
int i;
102,7 → 105,8
* Just a small workaround.
*/
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
if (leaf->key[i] ==
ALIGN_DOWN(addr, PAGE_SIZE)) {
allocate = false;
break;
}
110,11 → 114,15
if (allocate) {
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
/*
* Insert the address of the newly allocated frame to the pagemap.
* Insert the address of the newly allocated
* frame to the pagemap.
*/
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
}
frame_reference_add(ADDR2PFN(frame));
137,12 → 145,13
*/
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
}
/*
* Map 'page' to 'frame'.
* Note that TLB shootdown is not attempted as only new information is being
* inserted into page tables.
* Note that TLB shootdown is not attempted as only new information is
* being inserted into page tables.
*/
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
162,9 → 171,6
void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
{
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
 
/** Share the anonymous address space area.
184,7 → 190,8
* Copy used portions of the area to sh_info's page map.
*/
mutex_lock(&area->sh_info->lock);
for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
for (cur = area->used_space.leaf_head.next;
cur != &area->used_space.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
198,14 → 205,19
pte_t *pte;
page_table_lock(area->as, false);
pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
pte = page_mapping_find(area->as,
base + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap,
(base + j * PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(area->as, false);
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
 
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
frame_reference_add(pfn);
}
 
}
}
mutex_unlock(&area->sh_info->lock);
/branches/rcu/kernel/generic/src/mm/as.c
95,10 → 95,13
#endif
 
/**
* This lock protects inactive_as_with_asid_head list. It must be acquired
* before as_t mutex.
* This lock serializes access to the ASID subsystem.
* It protects:
* - inactive_as_with_asid_head list
* - as->asid for each as of the as_t type
* - asids_allocated counter
*/
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
SPINLOCK_INITIALIZE(asidlock);
 
/**
* This list contains address spaces that are not active on any
205,14 → 208,15
* Since there is no reference to this area,
* it is safe not to lock its mutex.
*/
 
ipl = interrupts_disable();
spinlock_lock(&inactive_as_with_asid_lock);
spinlock_lock(&asidlock);
if (as->asid != ASID_INVALID && as != AS_KERNEL) {
if (as != AS && as->cpu_refcount == 0)
list_remove(&as->inactive_as_with_asid_link);
asid_put(as->asid);
}
spinlock_unlock(&inactive_as_with_asid_lock);
spinlock_unlock(&asidlock);
 
/*
* Destroy address space areas of the address space.
411,7 → 415,7
int i = 0;
if (overlaps(b, c * PAGE_SIZE, area->base,
pages*PAGE_SIZE)) {
pages * PAGE_SIZE)) {
if (b + c * PAGE_SIZE <= start_free) {
/*
553,7 → 557,7
if (area->backend &&
area->backend->frame_free) {
area->backend->frame_free(area, b +
j * PAGE_SIZE, PTE_GET_FRAME(pte));
j * PAGE_SIZE, PTE_GET_FRAME(pte));
}
page_mapping_remove(as, b + j * PAGE_SIZE);
page_table_unlock(as, false);
613,8 → 617,7
* such address space area, EPERM if there was a problem in accepting the area
* or ENOMEM if there was a problem in allocating destination address space
* area. ENOTSUP is returned if the address space area backend does not support
* sharing or if the kernel detects an attempt to create an illegal address
* alias.
* sharing.
*/
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
667,20 → 670,6
return EPERM;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (!(dst_flags_mask & AS_AREA_EXEC)) {
if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
/*
* Refuse to create an illegal address alias.
*/
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
interrupts_restore(ipl);
return ENOTSUP;
}
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/*
* Now we are committed to sharing the area.
* First, prepare the area for sharing.
875,7 → 864,8
/** Switch address spaces.
*
* Note that this function cannot sleep as it is essentially a part of
* scheduling. Sleeping here would lead to deadlock on wakeup.
* scheduling. Sleeping here would lead to deadlock on wakeup. Another
* thing which is forbidden in this context is locking the address space.
*
* @param old Old address space or NULL.
* @param new New address space.
882,17 → 872,12
*/
void as_switch(as_t *old_as, as_t *new_as)
{
ipl_t ipl;
bool needs_asid = false;
ipl = interrupts_disable();
spinlock_lock(&inactive_as_with_asid_lock);
spinlock_lock(&asidlock);
 
/*
* First, take care of the old address space.
*/
if (old_as) {
mutex_lock_active(&old_as->lock);
ASSERT(old_as->cpu_refcount);
if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
/*
901,11 → 886,10
* list of inactive address spaces with assigned
* ASID.
*/
ASSERT(old_as->asid != ASID_INVALID);
list_append(&old_as->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
ASSERT(old_as->asid != ASID_INVALID);
list_append(&old_as->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
}
mutex_unlock(&old_as->lock);
 
/*
* Perform architecture-specific tasks when the address space
917,36 → 901,15
/*
* Second, prepare the new address space.
*/
mutex_lock_active(&new_as->lock);
if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
if (new_as->asid != ASID_INVALID) {
if (new_as->asid != ASID_INVALID)
list_remove(&new_as->inactive_as_with_asid_link);
} else {
/*
* Defer call to asid_get() until new_as->lock is released.
*/
needs_asid = true;
}
else
new_as->asid = asid_get();
}
#ifdef AS_PAGE_TABLE
SET_PTL0_ADDRESS(new_as->genarch.page_table);
#endif
mutex_unlock(&new_as->lock);
 
if (needs_asid) {
/*
* Allocation of new ASID was deferred
* until now in order to avoid deadlock.
*/
asid_t asid;
asid = asid_get();
mutex_lock_active(&new_as->lock);
new_as->asid = asid;
mutex_unlock(&new_as->lock);
}
spinlock_unlock(&inactive_as_with_asid_lock);
interrupts_restore(ipl);
/*
* Perform architecture-specific steps.
953,6 → 916,8
* (e.g. write ASID to hardware register etc.)
*/
as_install_arch(new_as);
 
spinlock_unlock(&asidlock);
AS = new_as;
}
/branches/rcu/kernel/generic/src/mm/backend_phys.c
32,7 → 32,8
 
/**
* @file
* @brief Backend for address space areas backed by continuous physical memory.
* @brief Backend for address space areas backed by continuous physical
* memory.
*/
 
#include <debug.h>
62,7 → 63,8
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
*/
int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
72,7 → 74,8
return AS_PF_FAULT;
 
ASSERT(addr - area->base < area->backend_data.frames * FRAME_SIZE);
page_mapping_insert(AS, addr, base + (addr - area->base), as_area_get_flags(area));
page_mapping_insert(AS, addr, base + (addr - area->base),
as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
/branches/rcu/kernel/generic/src/mm/frame.c
70,16 → 70,19
typedef struct {
count_t refcount; /**< tracking of shared frames */
uint8_t buddy_order; /**< buddy system block order */
link_t buddy_link; /**< link to the next free block inside one order */
link_t buddy_link; /**< link to the next free block inside one
order */
void *parent; /**< If allocated by slab, this points there */
} frame_t;
 
typedef struct {
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */
pfn_t base; /**< frame_no of the first frame in the frames array */
pfn_t base; /**< frame_no of the first frame in the frames
array */
count_t count; /**< Size of zone */
 
frame_t *frames; /**< array of frame_t structures in this zone */
frame_t *frames; /**< array of frame_t structures in this
zone */
count_t free_count; /**< number of free frame_t structures */
count_t busy_count; /**< number of busy frame_t structures */
157,8 → 160,8
for (i = 0; i < zones.count; i++) {
/* Check for overflow */
z = zones.info[i];
if (overlaps(newzone->base,newzone->count,
z->base, z->count)) {
if (overlaps(newzone->base,newzone->count, z->base,
z->count)) {
printf("Zones overlap!\n");
return -1;
}
166,7 → 169,7
break;
}
/* Move other zones up */
for (j = i;j < zones.count; j++)
for (j = i; j < zones.count; j++)
zones.info[j + 1] = zones.info[j];
zones.info[i] = newzone;
zones.count++;
202,7 → 205,8
z = zones.info[i];
spinlock_lock(&z->lock);
if (z->base <= frame && z->base + z->count > frame) {
spinlock_unlock(&zones.lock); /* Unlock the global lock */
/* Unlock the global lock */
spinlock_unlock(&zones.lock);
if (pzone)
*pzone = i;
return z;
229,7 → 233,8
* Assume interrupts are disabled.
*
* @param order Size (2^order) of free space we are trying to find
* @param pzone Pointer to preferred zone or NULL, on return contains zone number
* @param pzone Pointer to preferred zone or NULL, on return contains zone
* number
*/
static zone_t * find_free_zone_and_lock(uint8_t order, unsigned int *pzone)
{
273,10 → 278,10
* @param order - Order of parent must be different then this parameter!!
*/
static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child,
uint8_t order)
uint8_t order)
{
frame_t * frame;
zone_t * zone;
frame_t *frame;
zone_t *zone;
index_t index;
frame = list_get_instance(child, frame_t, buddy_link);
293,8 → 298,8
 
static void zone_buddy_print_id(buddy_system_t *b, link_t *block)
{
frame_t * frame;
zone_t * zone;
frame_t *frame;
zone_t *zone;
index_t index;
 
frame = list_get_instance(block, frame_t, buddy_link);
310,16 → 315,17
*
* @return Buddy for given block if found
*/
static link_t * zone_buddy_find_buddy(buddy_system_t *b, link_t * block)
static link_t *zone_buddy_find_buddy(buddy_system_t *b, link_t *block)
{
frame_t * frame;
zone_t * zone;
frame_t *frame;
zone_t *zone;
index_t index;
bool is_left, is_right;
 
frame = list_get_instance(block, frame_t, buddy_link);
zone = (zone_t *) b->data;
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), frame->buddy_order));
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),
frame->buddy_order));
is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame);
is_right = IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame);
348,8 → 354,8
*
* @return right block
*/
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t * block) {
frame_t * frame_l, * frame_r;
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t *block) {
frame_t *frame_l, *frame_r;
 
frame_l = list_get_instance(block, frame_t, buddy_link);
frame_r = (frame_l + (1 << (frame_l->buddy_order - 1)));
365,8 → 371,8
*
* @return Coalesced block (actually block that represents lower address)
*/
static link_t * zone_buddy_coalesce(buddy_system_t *b, link_t * block_1,
link_t * block_2)
static link_t *zone_buddy_coalesce(buddy_system_t *b, link_t *block_1,
link_t *block_2)
{
frame_t *frame1, *frame2;
382,8 → 388,9
* @param block Buddy system block
* @param order Order to set
*/
static void zone_buddy_set_order(buddy_system_t *b, link_t * block, uint8_t order) {
frame_t * frame;
static void zone_buddy_set_order(buddy_system_t *b, link_t *block,
uint8_t order) {
frame_t *frame;
frame = list_get_instance(block, frame_t, buddy_link);
frame->buddy_order = order;
}
395,8 → 402,8
*
* @return Order of block
*/
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t * block) {
frame_t * frame;
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t *block) {
frame_t *frame;
frame = list_get_instance(block, frame_t, buddy_link);
return frame->buddy_order;
}
420,8 → 427,8
* @param block Buddy system block
*
*/
static void zone_buddy_mark_available(buddy_system_t *b, link_t * block) {
frame_t * frame;
static void zone_buddy_mark_available(buddy_system_t *b, link_t *block) {
frame_t *frame;
frame = list_get_instance(block, frame_t, buddy_link);
frame->refcount = 0;
}
520,8 → 527,8
frame = zone_get_frame(zone, frame_idx);
if (frame->refcount)
return;
link = buddy_system_alloc_block(zone->buddy_system,
&frame->buddy_link);
link = buddy_system_alloc_block(zone->buddy_system,
&frame->buddy_link);
ASSERT(link);
zone->free_count--;
}
545,12 → 552,12
pfn_t frame_idx;
frame_t *frame;
 
ASSERT(!overlaps(z1->base,z1->count,z2->base,z2->count));
ASSERT(!overlaps(z1->base, z1->count, z2->base, z2->count));
ASSERT(z1->base < z2->base);
 
spinlock_initialize(&z->lock, "zone_lock");
z->base = z1->base;
z->count = z2->base+z2->count - z1->base;
z->count = z2->base + z2->count - z1->base;
z->flags = z1->flags & z2->flags;
 
z->free_count = z1->free_count + z2->free_count;
558,12 → 565,12
max_order = fnzb(z->count);
 
z->buddy_system = (buddy_system_t *)&z[1];
buddy_system_create(z->buddy_system, max_order,
&zone_buddy_system_operations,
(void *) z);
z->buddy_system = (buddy_system_t *) &z[1];
buddy_system_create(z->buddy_system, max_order,
&zone_buddy_system_operations, (void *) z);
 
z->frames = (frame_t *)((uint8_t *) z->buddy_system + buddy_conf_size(max_order));
z->frames = (frame_t *)((uint8_t *) z->buddy_system +
buddy_conf_size(max_order));
for (i = 0; i < z->count; i++) {
/* This marks all frames busy */
frame_initialize(&z->frames[i]);
603,7 → 610,7
}
while (zone_can_alloc(z2, 0)) {
frame_idx = zone_frame_alloc(z2, 0);
frame = &z->frames[frame_idx + (z2->base-z1->base)];
frame = &z->frames[frame_idx + (z2->base - z1->base)];
frame->refcount = 0;
buddy_system_free(z->buddy_system, &frame->buddy_link);
}
668,7 → 675,7
for (i = 0; i < (count_t) (1 << order); i++) {
frame = &zone->frames[i + frame_idx];
frame->buddy_order = 0;
if (! frame->refcount)
if (!frame->refcount)
frame->refcount = 1;
ASSERT(frame->refcount == 1);
}
710,7 → 717,8
spinlock_lock(&zone1->lock);
spinlock_lock(&zone2->lock);
 
cframes = SIZE2FRAMES(zone_conf_size(zone2->base+zone2->count-zone1->base));
cframes = SIZE2FRAMES(zone_conf_size(zone2->base + zone2->count -
zone1->base));
if (cframes == 1)
order = 0;
else
803,7 → 811,8
/* Allocate frames _after_ the conframe */
/* Check sizes */
z->frames = (frame_t *)((uint8_t *) z->buddy_system + buddy_conf_size(max_order));
z->frames = (frame_t *)((uint8_t *) z->buddy_system +
buddy_conf_size(max_order));
for (i = 0; i < count; i++) {
frame_initialize(&z->frames[i]);
}
865,16 → 874,20
if (confframe >= start && confframe < start+count) {
for (;confframe < start + count; confframe++) {
addr = PFN2ADDR(confframe);
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.base), config.kernel_size))
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(config.base), config.kernel_size))
continue;
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.stack_base), config.stack_size))
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(config.stack_base), config.stack_size))
continue;
bool overlap = false;
count_t i;
for (i = 0; i < init.cnt; i++)
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(init.tasks[i].addr), init.tasks[i].size)) {
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(init.tasks[i].addr),
init.tasks[i].size)) {
overlap = true;
break;
}
915,7 → 928,7
spinlock_unlock(&zone->lock);
}
 
void * frame_get_parent(pfn_t pfn, unsigned int hint)
void *frame_get_parent(pfn_t pfn, unsigned int hint)
{
zone_t *zone = find_zone_and_lock(pfn, &hint);
void *res;
1073,15 → 1086,21
/* Tell the architecture to create some memory */
frame_arch_init();
if (config.cpu_active == 1) {
frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), SIZE2FRAMES(config.kernel_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)), SIZE2FRAMES(config.stack_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
SIZE2FRAMES(config.kernel_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)),
SIZE2FRAMES(config.stack_size));
count_t i;
for (i = 0; i < init.cnt; i++)
frame_mark_unavailable(ADDR2PFN(KA2PA(init.tasks[i].addr)), SIZE2FRAMES(init.tasks[i].size));
for (i = 0; i < init.cnt; i++) {
pfn_t pfn = ADDR2PFN(KA2PA(init.tasks[i].addr));
frame_mark_unavailable(pfn,
SIZE2FRAMES(init.tasks[i].size));
}
 
if (ballocs.size)
frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)), SIZE2FRAMES(ballocs.size));
frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)),
SIZE2FRAMES(ballocs.size));
 
/* Black list first frame, as allocating NULL would
* fail in some places */
1106,7 → 1125,8
for (i = 0; i < zones.count; i++) {
zone = zones.info[i];
spinlock_lock(&zone->lock);
printf("%-2d %12p %12zd %12zd\n", i, PFN2ADDR(zone->base), zone->free_count, zone->busy_count);
printf("%-2d %12p %12zd %12zd\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
spinlock_unlock(&zone->lock);
}
spinlock_unlock(&zones.lock);
1138,10 → 1158,14
spinlock_lock(&zone->lock);
printf("Memory zone information\n");
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2, PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10);
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10);
printf("Available space: %zd frames (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10);
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2,
PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zdK)\n", zone->count,
((zone->count) * FRAME_SIZE) >> 10);
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count,
(zone->busy_count * FRAME_SIZE) >> 10);
printf("Available space: %zd frames (%zdK)\n", zone->free_count,
(zone->free_count * FRAME_SIZE) >> 10);
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE);
spinlock_unlock(&zone->lock);
1152,3 → 1176,4
 
/** @}
*/
 
/branches/rcu/kernel/generic/src/mm/backend_elf.c
71,7 → 71,8
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
*/
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
80,11 → 81,13
btree_node_t *leaf;
uintptr_t base, frame;
index_t i;
bool dirty = false;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
 
ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
ASSERT((addr >= entry->p_vaddr) &&
(addr < entry->p_vaddr + entry->p_memsz));
i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
base = (uintptr_t) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
107,7 → 110,8
*/
 
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
if (leaf->key[i] ==
ALIGN_DOWN(addr, PAGE_SIZE)) {
found = true;
break;
}
115,8 → 119,10
}
if (frame || found) {
frame_reference_add(ADDR2PFN(frame));
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
page_mapping_insert(AS, addr, frame,
as_area_get_flags(area));
if (!used_space_insert(area,
ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
mutex_unlock(&area->sh_info->lock);
return AS_PF_OK;
124,10 → 130,12
}
/*
* The area is either not shared or the pagemap does not contain the mapping.
* The area is either not shared or the pagemap does not contain the
* mapping.
*/
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE <
entry->p_vaddr + entry->p_filesz) {
/*
* Initialized portion of the segment. The memory is backed
* directly by the content of the ELF image. Pages are
138,18 → 146,22
*/
if (entry->p_flags & PF_W) {
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
memcpy((void *) PA2KA(frame),
(void *) (base + i * FRAME_SIZE), FRAME_SIZE);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
} else {
frame = KA2PA(base + i*FRAME_SIZE);
}
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >=
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
/*
* This is the uninitialized portion of the segment.
* It is not physically present in the ELF image.
158,11 → 170,13
*/
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
} else {
175,12 → 189,15
size = entry->p_filesz - (i<<PAGE_WIDTH);
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
memcpy((void *) PA2KA(frame), (void *) (base + i * FRAME_SIZE),
size);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
}
211,31 → 228,28
uintptr_t base;
index_t i;
ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
ASSERT((page >= entry->p_vaddr) &&
(page < entry->p_vaddr + entry->p_memsz));
i = (page - entry->p_vaddr) >> PAGE_WIDTH;
base = (uintptr_t) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (page + PAGE_SIZE <
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (entry->p_flags & PF_W) {
/*
* Free the frame with the copy of writable segment data.
* Free the frame with the copy of writable segment
* data.
*/
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
} else {
/*
* The frame is either anonymous memory or the mixed case (i.e. lower
* part is backed by the ELF image and the upper is anonymous).
* In any case, a frame needs to be freed.
*/
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
* The frame is either anonymous memory or the mixed case (i.e.
* lower part is backed by the ELF image and the upper is
* anonymous). In any case, a frame needs to be freed.
*/
frame_free(frame);
}
}
 
260,10 → 274,12
* Find the node in which to start linear search.
*/
if (area->flags & AS_AREA_WRITE) {
node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
node = list_get_instance(area->used_space.leaf_head.next,
btree_node_t, leaf_link);
} else {
(void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf);
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
leaf);
if (!node)
node = leaf;
}
272,7 → 288,8
* Copy used anonymous portions of the area to sh_info's page map.
*/
mutex_lock(&area->sh_info->lock);
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) {
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
cur = cur->next) {
int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
294,19 → 311,26
pte_t *pte;
/*
* Skip read-only pages that are backed by the ELF image.
* Skip read-only pages that are backed by the
* ELF image.
*/
if (!(area->flags & AS_AREA_WRITE))
if (base + (j + 1)*PAGE_SIZE <= start_anon)
if (base + (j + 1) * PAGE_SIZE <=
start_anon)
continue;
page_table_lock(area->as, false);
pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
pte = page_mapping_find(area->as,
base + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap,
(base + j * PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(area->as, false);
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
 
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
frame_reference_add(pfn);
}
}
/branches/rcu/kernel/Makefile
95,10 → 95,6
DEFS += -DCONFIG_VIRT_IDX_DCACHE
endif
 
ifeq ($(CONFIG_POWEROFF),y)
DEFS += -DCONFIG_POWEROFF
endif
 
ifeq ($(CONFIG_FB),y)
ifeq ($(ARCH),ia32)
DEFS += -DCONFIG_VESA_WIDTH=$(CONFIG_VESA_WIDTH)
/branches/rcu/kernel/arch/sparc64/include/stack.h
43,7 → 43,7
/**
* 16-extended-word save area for %i[0-7] and %l[0-7] registers.
*/
#define STACK_WINDOW_SAVE_AREA_SIZE (16*STACK_ITEM_SIZE)
#define STACK_WINDOW_SAVE_AREA_SIZE (16 * STACK_ITEM_SIZE)
 
/**
* By convention, the actual top of the stack is %sp + STACK_BIAS.
/branches/rcu/kernel/arch/sparc64/include/mm/frame.h
35,7 → 35,20
#ifndef KERN_sparc64_FRAME_H_
#define KERN_sparc64_FRAME_H_
 
#define FRAME_WIDTH 13 /* 8K */
/*
* Page size supported by the MMU.
* For 8K there is the nasty illegal virtual aliasing problem.
* Therefore, the kernel uses 8K only internally on the TLB and TSB levels.
*/
#define MMU_FRAME_WIDTH 13 /* 8K */
#define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH)
 
/*
* Page size exported to the generic memory management subsystems.
* This page size is not directly supported by the MMU, but we can emulate
* each 16K page with a pair of adjacent 8K pages.
*/
#define FRAME_WIDTH 14 /* 16K */
#define FRAME_SIZE (1 << FRAME_WIDTH)
 
#ifdef KERNEL
/branches/rcu/kernel/arch/sparc64/include/mm/page.h
37,11 → 37,27
 
#include <arch/mm/frame.h>
 
/*
* On the TLB and TSB level, we still use 8K pages, which are supported by the
* MMU.
*/
#define MMU_PAGE_WIDTH MMU_FRAME_WIDTH
#define MMU_PAGE_SIZE MMU_FRAME_SIZE
 
/*
* On the page table level, we use 16K pages. 16K pages are not supported by
* the MMU but we emulate them with pairs of 8K pages.
*/
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 1 /**< 14 - 13; 2^14 == 16K == alias boundary. */
#define MMU_PAGES_PER_PAGE (1 << (PAGE_WIDTH - MMU_PAGE_WIDTH))
 
/*
* With 16K pages, there is only one page color.
*/
#define PAGE_COLOR_BITS 0 /**< 14 - 14; 2^14 == 16K == alias boundary. */
 
#ifdef KERNEL
 
#ifndef __ASM__
/branches/rcu/kernel/arch/sparc64/include/mm/as.h
81,10 → 81,11
#include <genarch/mm/as_ht.h>
 
#ifdef CONFIG_TSB
# include <arch/mm/tsb.h>
# define as_invalidate_translation_cache(as, page, cnt) tsb_invalidate(as, page, cnt)
#include <arch/mm/tsb.h>
#define as_invalidate_translation_cache(as, page, cnt) \
tsb_invalidate((as), (page), (cnt))
#else
# define as_invalidate_translation_cache(as, page, cnt)
#define as_invalidate_translation_cache(as, page, cnt)
#endif
 
extern void as_arch_init(void);
/branches/rcu/kernel/arch/sparc64/include/mm/tsb.h
112,8 → 112,8
struct pte;
 
extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);
extern void itsb_pte_copy(struct pte *t);
extern void dtsb_pte_copy(struct pte *t, bool ro);
extern void itsb_pte_copy(struct pte *t, index_t index);
extern void dtsb_pte_copy(struct pte *t, index_t index, bool ro);
 
#endif /* !def __ASM__ */
 
/branches/rcu/kernel/arch/sparc64/include/cpu.h
39,6 → 39,10
#include <arch/register.h>
#include <arch/asm.h>
 
#ifdef CONFIG_SMP
#include <arch/mm/cache.h>
#endif
 
#define MANUF_FUJITSU 0x04
#define MANUF_ULTRASPARC 0x17 /**< UltraSPARC I, UltraSPARC II */
#define MANUF_SUN 0x3e
53,12 → 57,13
#define IMPL_SPARC64V 0x5
 
typedef struct {
uint32_t mid; /**< Processor ID as read from UPA_CONFIG. */
uint32_t mid; /**< Processor ID as read from
UPA_CONFIG. */
ver_reg_t ver;
uint32_t clock_frequency; /**< Processor frequency in Hz. */
uint64_t next_tick_cmpr; /**< Next clock interrupt should be
generated when the TICK register
matches this value. */
generated when the TICK register
matches this value. */
} cpu_arch_t;
#endif
/branches/rcu/kernel/arch/sparc64/Makefile.inc
83,8 → 83,7
arch/$(ARCH)/src/fpu_context.c \
arch/$(ARCH)/src/dummy.s \
arch/$(ARCH)/src/mm/as.c \
arch/$(ARCH)/src/mm/cache.c \
arch/$(ARCH)/src/mm/cache_asm.S \
arch/$(ARCH)/src/mm/cache.S \
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c \
/branches/rcu/kernel/arch/sparc64/src/smp/ipi.c
78,9 → 78,9
func);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_1, 0);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_2, 0);
asi_u64_write(ASI_UDB_INTR_W, (mid <<
INTR_VEC_DISPATCH_MID_SHIFT) | ASI_UDB_INTR_W_DISPATCH,
0);
asi_u64_write(ASI_UDB_INTR_W,
(mid << INTR_VEC_DISPATCH_MID_SHIFT) |
ASI_UDB_INTR_W_DISPATCH, 0);
membar();
/branches/rcu/kernel/arch/sparc64/src/trap/interrupt.c
83,9 → 83,9
} else if (data0 > config.base) {
/*
* This is a cross-call.
* data0 contains address of kernel function.
* data0 contains address of the kernel function.
* We call the function only after we verify
* it is on of the supported ones.
* it is one of the supported ones.
*/
#ifdef CONFIG_SMP
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {
/branches/rcu/kernel/arch/sparc64/src/mm/tlb.c
54,14 → 54,14
#include <arch/mm/tsb.h>
#endif
 
static void dtlb_pte_copy(pte_t *t, bool ro);
static void itlb_pte_copy(pte_t *t);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const
char *str);
static void dtlb_pte_copy(pte_t *t, index_t index, bool ro);
static void itlb_pte_copy(pte_t *t, index_t index);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str);
static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
tlb_tag_access_reg_t tag, const char *str);
static void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
tlb_tag_access_reg_t tag, const char *str);
 
char *context_encoding[] = {
"Primary",
92,8 → 92,8
* @param locked True for permanent mappings, false otherwise.
* @param cacheable True if the mapping is cacheable, false otherwise.
*/
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool
locked, bool cacheable)
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
bool locked, bool cacheable)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
126,11 → 126,12
 
/** Copy PTE to TLB.
*
* @param t Page Table Entry to be copied.
* @param ro If true, the entry will be created read-only, regardless of its w
* field.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the entry will be created read-only, regardless of its
* w field.
*/
void dtlb_pte_copy(pte_t *t, bool ro)
void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
137,15 → 138,15
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
pg.address = t->page + (index << MMU_PAGE_WIDTH);
fr.address = t->frame + (index << MMU_PAGE_WIDTH);
 
tag.value = 0;
tag.context = t->as->asid;
tag.vpn = pg.vpn;
 
dtlb_tag_access_write(tag.value);
 
data.value = 0;
data.v = true;
data.size = PAGESIZE_8K;
158,15 → 159,16
data.p = t->k; /* p like privileged */
data.w = ro ? false : t->w;
data.g = t->g;
 
dtlb_data_in_write(data.value);
}
 
/** Copy PTE to ITLB.
*
* @param t Page Table Entry to be copied.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
*/
void itlb_pte_copy(pte_t *t)
void itlb_pte_copy(pte_t *t, index_t index)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
173,8 → 175,8
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
pg.address = t->page + (index << MMU_PAGE_WIDTH);
fr.address = t->frame + (index << MMU_PAGE_WIDTH);
 
tag.value = 0;
tag.context = t->as->asid;
199,6 → 201,7
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
{
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
pte_t *t;
 
page_table_lock(AS, true);
209,9 → 212,9
* Insert it into ITLB.
*/
t->a = true;
itlb_pte_copy(t);
itlb_pte_copy(t, index);
#ifdef CONFIG_TSB
itsb_pte_copy(t);
itsb_pte_copy(t, index);
#endif
page_table_unlock(AS, true);
} else {
222,7 → 225,7
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
do_fast_instruction_access_mmu_miss_fault(istate,
__FUNCTION__);
__FUNCTION__);
}
}
}
236,19 → 239,21
{
tlb_tag_access_reg_t tag;
uintptr_t va;
index_t index;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
va = tag.vpn << PAGE_WIDTH;
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE;
 
if (tag.context == ASID_KERNEL) {
if (!tag.vpn) {
/* NULL access in kernel */
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
"kernel page fault.");
"kernel page fault.");
}
 
page_table_lock(AS, true);
259,19 → 264,20
* Insert it into DTLB.
*/
t->a = true;
dtlb_pte_copy(t, true);
dtlb_pte_copy(t, index, true);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, true);
dtsb_pte_copy(t, index, true);
#endif
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to the address space page fault handler.
* Forward the page fault to the address space page fault
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
}
}
281,10 → 287,12
{
tlb_tag_access_reg_t tag;
uintptr_t va;
index_t index;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
va = tag.vpn << PAGE_WIDTH;
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
296,10 → 304,11
*/
t->a = true;
t->d = true;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
dtlb_pte_copy(t, false);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
va + index * MMU_PAGE_SIZE);
dtlb_pte_copy(t, index, false);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, false);
dtsb_pte_copy(t, index, false);
#endif
page_table_unlock(AS, true);
} else {
310,7 → 319,7
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
do_fast_data_access_protection_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
}
}
328,10 → 337,10
t.value = itlb_tag_read_read(i);
 
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
printf("D-TLB contents:\n");
340,16 → 349,16
t.value = dtlb_tag_read_read(i);
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
}
 
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char
*str)
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str)
{
fault_if_from_uspace(istate, "%s\n", str);
dump_istate(istate);
356,29 → 365,29
panic("%s\n", str);
}
 
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
va = tag.vpn << MMU_PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
tag.context);
dump_istate(istate);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
panic("%s\n", str);
}
 
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
va = tag.vpn << MMU_PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
tag.context);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
dump_istate(istate);
panic("%s\n", str);
393,8 → 402,8
sfar = dtlb_sfar_read();
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
printf("DTLB SFAR: address=%p\n", sfar);
dtlb_sfsr_write(0);
481,11 → 490,11
ctx.context = asid;
mmu_primary_context_write(ctx.v);
for (i = 0; i < cnt; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
PAGE_SIZE);
for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * MMU_PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * MMU_PAGE_SIZE);
}
mmu_primary_context_write(pc_save.v);
/branches/rcu/kernel/arch/sparc64/src/mm/as.c
42,7 → 42,6
#ifdef CONFIG_TSB
#include <arch/mm/tsb.h>
#include <arch/memstr.h>
#include <synch/mutex.h>
#include <arch/asm.h>
#include <mm/frame.h>
#include <bitops.h>
61,8 → 60,12
int as_constructor_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
/*
* The order must be calculated with respect to the emulated
* 16K page size.
*/
int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH);
sizeof(tsb_entry_t)) >> FRAME_WIDTH);
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
 
if (!tsb)
70,9 → 73,9
 
as->arch.itsb = (tsb_entry_t *) tsb;
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
sizeof(tsb_entry_t));
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
* sizeof(tsb_entry_t), 0);
sizeof(tsb_entry_t));
memsetb((uintptr_t) as->arch.itsb,
(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
#endif
return 0;
}
80,8 → 83,12
int as_destructor_arch(as_t *as)
{
#ifdef CONFIG_TSB
/*
* The count must be calculated with respect to the emualted 16K page
* size.
*/
count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH;
sizeof(tsb_entry_t)) >> FRAME_WIDTH;
frame_free(KA2PA((uintptr_t) as->arch.itsb));
return cnt;
#else
92,13 → 99,7
int as_create_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
ipl_t ipl;
 
ipl = interrupts_disable();
mutex_lock_active(&as->lock); /* completely unnecessary, but polite */
tsb_invalidate(as, 0, (count_t) -1);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
#endif
return 0;
}
115,18 → 116,17
tlb_context_reg_t ctx;
/*
* Note that we don't lock the address space.
* That's correct - we can afford it here
* because we only read members that are
* currently read-only.
* Note that we don't and may not lock the address space. That's ok
* since we only read members that are currently read-only.
*
* Moreover, the as->asid is protected by asidlock, which is being held.
*/
/*
* Write ASID to secondary context register.
* The primary context register has to be set
* from TL>0 so it will be filled from the
* secondary context register from the TL=1
* code just before switch to userspace.
* Write ASID to secondary context register. The primary context
* register has to be set from TL>0 so it will be filled from the
* secondary context register from the TL=1 code just before switch to
* userspace.
*/
ctx.v = 0;
ctx.context = as->asid;
139,7 → 139,7
 
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
158,9 → 158,9
tsb_base.size = TSB_SIZE;
tsb_base.split = 0;
 
tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
itsb_base_write(tsb_base.value);
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
#endif
}
176,10 → 176,10
{
 
/*
* Note that we don't lock the address space.
* That's correct - we can afford it here
* because we only read members that are
* currently read-only.
* Note that we don't and may not lock the address space. That's ok
* since we only read members that are currently read-only.
*
* Moreover, the as->asid is protected by asidlock, which is being held.
*/
 
#ifdef CONFIG_TSB
189,7 → 189,7
 
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
/branches/rcu/kernel/arch/sparc64/src/mm/tsb.c
34,6 → 34,7
 
#include <arch/mm/tsb.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/barrier.h>
#include <mm/as.h>
#include <arch/types.h>
40,7 → 41,7
#include <macros.h>
#include <debug.h>
 
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - PAGE_WIDTH)) - 1)
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
 
/** Invalidate portion of TSB.
*
59,28 → 60,34
ASSERT(as->arch.itsb && as->arch.dtsb);
i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK;
cnt = min(pages, ITSB_ENTRY_COUNT);
i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
if (pages == (count_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT)
cnt = ITSB_ENTRY_COUNT;
else
cnt = pages * 2;
for (i = 0; i < cnt; i++) {
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
true;
true;
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
true;
true;
}
}
 
/** Copy software PTE to ITSB.
*
* @param t Software PTE.
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K subpage.
*/
void itsb_pte_copy(pte_t *t)
void itsb_pte_copy(pte_t *t, index_t index)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
as = t->as;
tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
tsb = &as->arch.itsb[entry];
 
/*
* We use write barriers to make sure that the TSB load
95,10 → 102,11
write_barrier();
 
tsb->tag.context = as->asid;
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >>
VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
tsb->data.p = t->k; /* p as privileged */
tsb->data.v = t->p;
110,16 → 118,19
 
/** Copy software PTE to DTSB.
*
* @param t Software PTE.
* @param ro If true, the mapping is copied read-only.
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the mapping is copied read-only.
*/
void dtsb_pte_copy(pte_t *t, bool ro)
void dtsb_pte_copy(pte_t *t, index_t index, bool ro)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
as = t->as;
tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
tsb = &as->arch.dtsb[entry];
 
/*
* We use write barriers to make sure that the TSB load
134,10 → 145,11
write_barrier();
 
tsb->tag.context = as->asid;
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >>
VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
#ifdef CONFIG_VIRT_IDX_DCACHE
tsb->data.cv = t->c;
148,7 → 160,7
write_barrier();
tsb->tag.invalid = true; /* mark the entry as valid */
tsb->tag.invalid = false; /* mark the entry as valid */
}
 
/** @}
/branches/rcu/kernel/arch/sparc64/src/mm/page.c
73,9 → 73,9
*/
for (i = 0; i < bsp_locked_dtlb_entries; i++) {
dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
}
#endif
 
107,26 → 107,26
size_t increment;
count_t count;
} sizemap[] = {
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * PAGE_SIZE, 2 } /* 8M */
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */
};
ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr);
ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr);
ASSERT(size <= 8 * 1024 * 1024);
if (size <= FRAME_SIZE)
if (size <= MMU_FRAME_SIZE)
order = 0;
else
order = (fnzb64(size - 1) + 1) - FRAME_WIDTH;
order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH;
 
/*
* Use virtual addresses that are beyond the limit of physical memory.
134,8 → 134,10
* by frame_alloc().
*/
ASSERT(PA2KA(last_frame));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), 1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size, 1 << (order + FRAME_WIDTH));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame),
1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size,
1 << (order + FRAME_WIDTH));
for (i = 0; i < sizemap[order].count; i++) {
/*
142,8 → 144,8
* First, insert the mapping into DTLB.
*/
dtlb_insert_mapping(virtaddr + i * sizemap[order].increment,
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
#ifdef CONFIG_SMP
/*
150,11 → 152,11
* Second, save the information about the mapping for APs.
*/
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
virtaddr + i * sizemap[order].increment;
virtaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
physaddr + i * sizemap[order].increment;
physaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
sizemap[order].pagesize_code;
sizemap[order].pagesize_code;
bsp_locked_dtlb_entries++;
#endif
}
/branches/rcu/kernel/arch/ia64/src/mm/as.c
36,11 → 36,10
#include <arch/mm/asid.h>
#include <arch/mm/page.h>
#include <genarch/mm/as_ht.h>
#include <genarch/mm/page_ht.h>
#include <genarch/mm/asid_fifo.h>
#include <mm/asid.h>
#include <arch.h>
#include <arch/barrier.h>
#include <synch/spinlock.h>
 
/** Architecture dependent address space init. */
void as_arch_init(void)
55,13 → 54,9
*/
void as_install_arch(as_t *as)
{
ipl_t ipl;
region_register rr;
int i;
ipl = interrupts_disable();
spinlock_lock(&as->lock);
ASSERT(as->asid != ASID_INVALID);
/*
80,9 → 75,6
}
srlz_d();
srlz_i();
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
 
/** @}
/branches/rcu/kernel/arch/ppc32/src/mm/as.c
54,12 → 54,8
void as_install_arch(as_t *as)
{
asid_t asid;
ipl_t ipl;
uint32_t sr;
 
ipl = interrupts_disable();
spinlock_lock(&as->lock);
asid = as->asid;
/* Lower 2 GB, user and supervisor access */
79,9 → 75,6
: "r" ((0x4000 << 16) + (asid << 4) + sr), "r" (sr << 28)
);
}
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
 
/** @}
/branches/rcu/kernel/arch/ppc32/src/interrupt.c
1,110 → 1,100
/*
* Copyright (c) 2006 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ppc32interrupt
* @{
*/
/** @file
*/
 
#include <ddi/irq.h>
#include <interrupt.h>
#include <arch/interrupt.h>
#include <arch/types.h>
#include <arch.h>
#include <time/clock.h>
#include <ipc/sysipc.h>
#include <arch/drivers/pic.h>
#include <arch/mm/tlb.h>
#include <print.h>
 
 
void start_decrementer(void)
{
asm volatile (
"mtdec %0\n"
:
: "r" (1000)
);
}
 
 
/** Handler of external interrupts */
static void exception_external(int n, istate_t *istate)
{
int inum;
while ((inum = pic_get_pending()) != -1) {
bool ack = false;
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Acknowledge the interrupt before processing */
pic_ack_interrupt(inum);
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
if (!ack)
pic_ack_interrupt(inum);
}
}
 
 
static void exception_decrementer(int n, istate_t *istate)
{
clock();
start_decrementer();
}
 
 
/* Initialize basic tables for exception dispatching */
void interrupt_init(void)
{
exc_register(VECTOR_DATA_STORAGE, "data_storage", pht_refill);
exc_register(VECTOR_INSTRUCTION_STORAGE, "instruction_storage", pht_refill);
exc_register(VECTOR_EXTERNAL, "external", exception_external);
exc_register(VECTOR_DECREMENTER, "timer", exception_decrementer);
}
 
/** @}
*/
/*
* Copyright (c) 2006 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ppc32interrupt
* @{
*/
/** @file
*/
 
#include <ddi/irq.h>
#include <interrupt.h>
#include <arch/interrupt.h>
#include <arch/types.h>
#include <arch.h>
#include <time/clock.h>
#include <ipc/sysipc.h>
#include <arch/drivers/pic.h>
#include <arch/mm/tlb.h>
#include <print.h>
 
 
void start_decrementer(void)
{
asm volatile (
"mtdec %0\n"
:
: "r" (1000)
);
}
 
 
/** Handler of external interrupts */
static void exception_external(int n, istate_t *istate)
{
int inum;
while ((inum = pic_get_pending()) != -1) {
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
pic_ack_interrupt(inum);
}
}
 
 
static void exception_decrementer(int n, istate_t *istate)
{
clock();
start_decrementer();
}
 
 
/* Initialize basic tables for exception dispatching */
void interrupt_init(void)
{
exc_register(VECTOR_DATA_STORAGE, "data_storage", pht_refill);
exc_register(VECTOR_INSTRUCTION_STORAGE, "instruction_storage", pht_refill);
exc_register(VECTOR_EXTERNAL, "external", exception_external);
exc_register(VECTOR_DECREMENTER, "timer", exception_decrementer);
}
 
/** @}
*/
/branches/rcu/kernel/arch/ia32xen/src/interrupt.c
1,249 → 1,239
/*
* Copyright (c) 2006 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32xen_interrupt
* @{
*/
/** @file
*/
 
#include <arch/interrupt.h>
#include <syscall/syscall.h>
#include <print.h>
#include <debug.h>
#include <panic.h>
#include <func.h>
#include <cpu.h>
#include <arch/asm.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <arch.h>
#include <symtab.h>
#include <proc/thread.h>
#include <proc/task.h>
#include <synch/spinlock.h>
#include <arch/ddi/ddi.h>
#include <ipc/sysipc.h>
#include <interrupt.h>
#include <ddi/irq.h>
 
/*
* Interrupt and exception dispatching.
*/
 
void (* disable_irqs_function)(uint16_t irqmask) = NULL;
void (* enable_irqs_function)(uint16_t irqmask) = NULL;
void (* eoi_function)(void) = NULL;
 
void decode_istate(istate_t *istate)
{
char *symbol = get_symtab_entry(istate->eip);
 
if (!symbol)
symbol = "";
 
if (CPU)
printf("----------------EXCEPTION OCCURED (cpu%d)----------------\n", CPU->id);
else
printf("----------------EXCEPTION OCCURED----------------\n");
printf("%%eip: %#x (%s)\n",istate->eip,symbol);
printf("ERROR_WORD=%#x\n", istate->error_word);
printf("%%cs=%#x,flags=%#x\n", istate->cs, istate->eflags);
printf("%%eax=%#x, %%ecx=%#x, %%edx=%#x, %%esp=%#x\n", istate->eax,istate->ecx,istate->edx,&istate->stack[0]);
#ifdef CONFIG_DEBUG_ALLREGS
printf("%%esi=%#x, %%edi=%#x, %%ebp=%#x, %%ebx=%#x\n", istate->esi,istate->edi,istate->ebp,istate->ebx);
#endif
printf("stack: %#x, %#x, %#x, %#x\n", istate->stack[0], istate->stack[1], istate->stack[2], istate->stack[3]);
printf(" %#x, %#x, %#x, %#x\n", istate->stack[4], istate->stack[5], istate->stack[6], istate->stack[7]);
}
 
static void trap_virtual_eoi(void)
{
if (eoi_function)
eoi_function();
else
panic("no eoi_function\n");
 
}
 
static void null_interrupt(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "unserviced interrupt: %d", n);
 
decode_istate(istate);
panic("unserviced interrupt: %d\n", n);
}
 
/** General Protection Fault. */
static void gp_fault(int n, istate_t *istate)
{
if (TASK) {
count_t ver;
spinlock_lock(&TASK->lock);
ver = TASK->arch.iomapver;
spinlock_unlock(&TASK->lock);
if (CPU->arch.iomapver_copy != ver) {
/*
* This fault can be caused by an early access
* to I/O port because of an out-dated
* I/O Permission bitmap installed on CPU.
* Install the fresh copy and restart
* the instruction.
*/
io_perm_bitmap_install();
return;
}
fault_if_from_uspace(istate, "general protection fault");
}
 
decode_istate(istate);
panic("general protection fault\n");
}
 
static void ss_fault(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "stack fault");
 
decode_istate(istate);
panic("stack fault\n");
}
 
static void simd_fp_exception(int n, istate_t *istate)
{
uint32_t mxcsr;
asm
(
"stmxcsr %0;\n"
:"=m"(mxcsr)
);
fault_if_from_uspace(istate, "SIMD FP exception(19), MXCSR: %#zx",
(unative_t)mxcsr);
 
decode_istate(istate);
printf("MXCSR: %#zx\n",(unative_t)(mxcsr));
panic("SIMD FP exception(19)\n");
}
 
static void nm_fault(int n, istate_t *istate)
{
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
fault_if_from_uspace(istate, "fpu fault");
panic("fpu fault");
#endif
}
 
#ifdef CONFIG_SMP
static void tlb_shootdown_ipi(int n, istate_t *istate)
{
trap_virtual_eoi();
tlb_shootdown_ipi_recv();
}
#endif
 
/** Handler of IRQ exceptions */
static void irq_interrupt(int n, istate_t *istate)
{
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
bool ack = false;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
trap_virtual_eoi();
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
if (!ack)
trap_virtual_eoi();
}
 
void interrupt_init(void)
{
int i;
for (i = 0; i < IVT_ITEMS; i++)
exc_register(i, "null", (iroutine) null_interrupt);
for (i = 0; i < IRQ_COUNT; i++) {
if ((i != IRQ_PIC_SPUR) && (i != IRQ_PIC1))
exc_register(IVT_IRQBASE + i, "irq", (iroutine) irq_interrupt);
}
exc_register(7, "nm_fault", (iroutine) nm_fault);
exc_register(12, "ss_fault", (iroutine) ss_fault);
exc_register(13, "gp_fault", (iroutine) gp_fault);
exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
#ifdef CONFIG_SMP
exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", (iroutine) tlb_shootdown_ipi);
#endif
}
 
void trap_virtual_enable_irqs(uint16_t irqmask)
{
if (enable_irqs_function)
enable_irqs_function(irqmask);
else
panic("no enable_irqs_function\n");
}
 
void trap_virtual_disable_irqs(uint16_t irqmask)
{
if (disable_irqs_function)
disable_irqs_function(irqmask);
else
panic("no disable_irqs_function\n");
}
 
/** @}
*/
 
/*
* Copyright (c) 2006 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32xen_interrupt
* @{
*/
/** @file
*/
 
#include <arch/interrupt.h>
#include <syscall/syscall.h>
#include <print.h>
#include <debug.h>
#include <panic.h>
#include <func.h>
#include <cpu.h>
#include <arch/asm.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <arch.h>
#include <symtab.h>
#include <proc/thread.h>
#include <proc/task.h>
#include <synch/spinlock.h>
#include <arch/ddi/ddi.h>
#include <ipc/sysipc.h>
#include <interrupt.h>
#include <ddi/irq.h>
 
/*
* Interrupt and exception dispatching.
*/
 
void (* disable_irqs_function)(uint16_t irqmask) = NULL;
void (* enable_irqs_function)(uint16_t irqmask) = NULL;
void (* eoi_function)(void) = NULL;
 
void decode_istate(istate_t *istate)
{
char *symbol = get_symtab_entry(istate->eip);
 
if (!symbol)
symbol = "";
 
if (CPU)
printf("----------------EXCEPTION OCCURED (cpu%d)----------------\n", CPU->id);
else
printf("----------------EXCEPTION OCCURED----------------\n");
printf("%%eip: %#x (%s)\n",istate->eip,symbol);
printf("ERROR_WORD=%#x\n", istate->error_word);
printf("%%cs=%#x,flags=%#x\n", istate->cs, istate->eflags);
printf("%%eax=%#x, %%ecx=%#x, %%edx=%#x, %%esp=%#x\n", istate->eax,istate->ecx,istate->edx,&istate->stack[0]);
#ifdef CONFIG_DEBUG_ALLREGS
printf("%%esi=%#x, %%edi=%#x, %%ebp=%#x, %%ebx=%#x\n", istate->esi,istate->edi,istate->ebp,istate->ebx);
#endif
printf("stack: %#x, %#x, %#x, %#x\n", istate->stack[0], istate->stack[1], istate->stack[2], istate->stack[3]);
printf(" %#x, %#x, %#x, %#x\n", istate->stack[4], istate->stack[5], istate->stack[6], istate->stack[7]);
}
 
static void trap_virtual_eoi(void)
{
if (eoi_function)
eoi_function();
else
panic("no eoi_function\n");
 
}
 
static void null_interrupt(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "unserviced interrupt: %d", n);
 
decode_istate(istate);
panic("unserviced interrupt: %d\n", n);
}
 
/** General Protection Fault. */
static void gp_fault(int n, istate_t *istate)
{
if (TASK) {
count_t ver;
spinlock_lock(&TASK->lock);
ver = TASK->arch.iomapver;
spinlock_unlock(&TASK->lock);
if (CPU->arch.iomapver_copy != ver) {
/*
* This fault can be caused by an early access
* to I/O port because of an out-dated
* I/O Permission bitmap installed on CPU.
* Install the fresh copy and restart
* the instruction.
*/
io_perm_bitmap_install();
return;
}
fault_if_from_uspace(istate, "general protection fault");
}
 
decode_istate(istate);
panic("general protection fault\n");
}
 
static void ss_fault(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "stack fault");
 
decode_istate(istate);
panic("stack fault\n");
}
 
static void simd_fp_exception(int n, istate_t *istate)
{
uint32_t mxcsr;
asm
(
"stmxcsr %0;\n"
:"=m"(mxcsr)
);
fault_if_from_uspace(istate, "SIMD FP exception(19), MXCSR: %#zx",
(unative_t)mxcsr);
 
decode_istate(istate);
printf("MXCSR: %#zx\n",(unative_t)(mxcsr));
panic("SIMD FP exception(19)\n");
}
 
static void nm_fault(int n, istate_t *istate)
{
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
fault_if_from_uspace(istate, "fpu fault");
panic("fpu fault");
#endif
}
 
#ifdef CONFIG_SMP
static void tlb_shootdown_ipi(int n, istate_t *istate)
{
trap_virtual_eoi();
tlb_shootdown_ipi_recv();
}
#endif
 
/** Handler of IRQ exceptions */
static void irq_interrupt(int n, istate_t *istate)
{
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
 
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
trap_virtual_eoi();
}
 
void interrupt_init(void)
{
int i;
for (i = 0; i < IVT_ITEMS; i++)
exc_register(i, "null", (iroutine) null_interrupt);
for (i = 0; i < IRQ_COUNT; i++) {
if ((i != IRQ_PIC_SPUR) && (i != IRQ_PIC1))
exc_register(IVT_IRQBASE + i, "irq", (iroutine) irq_interrupt);
}
exc_register(7, "nm_fault", (iroutine) nm_fault);
exc_register(12, "ss_fault", (iroutine) ss_fault);
exc_register(13, "gp_fault", (iroutine) gp_fault);
exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
#ifdef CONFIG_SMP
exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", (iroutine) tlb_shootdown_ipi);
#endif
}
 
void trap_virtual_enable_irqs(uint16_t irqmask)
{
if (enable_irqs_function)
enable_irqs_function(irqmask);
else
panic("no enable_irqs_function\n");
}
 
void trap_virtual_disable_irqs(uint16_t irqmask)
{
if (disable_irqs_function)
disable_irqs_function(irqmask);
else
panic("no disable_irqs_function\n");
}
 
/** @}
*/
/branches/rcu/kernel/arch/amd64/src/interrupt.c
1,228 → 1,219
/*
* Copyright (c) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64interrupt
* @{
*/
/** @file
*/
 
#include <arch/interrupt.h>
#include <print.h>
#include <debug.h>
#include <panic.h>
#include <arch/drivers/i8259.h>
#include <func.h>
#include <cpu.h>
#include <arch/asm.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <arch.h>
#include <symtab.h>
#include <arch/asm.h>
#include <proc/scheduler.h>
#include <proc/thread.h>
#include <proc/task.h>
#include <synch/spinlock.h>
#include <arch/ddi/ddi.h>
#include <interrupt.h>
#include <ddi/irq.h>
 
/*
* Interrupt and exception dispatching.
*/
 
void (* disable_irqs_function)(uint16_t irqmask) = NULL;
void (* enable_irqs_function)(uint16_t irqmask) = NULL;
void (* eoi_function)(void) = NULL;
 
void decode_istate(int n, istate_t *istate)
{
char *symbol;
/* uint64_t *x = &istate->stack[0]; */
 
if (!(symbol=get_symtab_entry(istate->rip)))
symbol = "";
 
printf("-----EXCEPTION(%d) OCCURED----- ( %s )\n",n, __FUNCTION__);
printf("%%rip: %#llx (%s)\n",istate->rip, symbol);
printf("ERROR_WORD=%#llx\n", istate->error_word);
printf("%%rcs=%#llx, flags=%#llx, %%cr0=%#llx\n", istate->cs, istate->rflags, read_cr0());
printf("%%rax=%#llx, %%rcx=%#llx, %%rdx=%#llx\n", istate->rax, istate->rcx, istate->rdx);
printf("%%rsi=%#llx, %%rdi=%#llx, %%r8 =%#llx\n", istate->rsi, istate->rdi, istate->r8);
printf("%%r9 =%#llx, %%r10 =%#llx, %%r11=%#llx\n", istate->r9, istate->r10, istate->r11);
#ifdef CONFIG_DEBUG_ALLREGS
printf("%%r12=%#llx, %%r13=%#llx, %%r14=%#llx\n", istate->r12, istate->r13, istate->r14);
printf("%%r15=%#llx, %%rbx=%#llx, %%rbp=%#llx\n", istate->r15, istate->rbx, &istate->rbp);
#endif
printf("%%rsp=%#llx\n", &istate->stack[0]);
}
 
static void trap_virtual_eoi(void)
{
if (eoi_function)
eoi_function();
else
panic("no eoi_function\n");
 
}
 
static void null_interrupt(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "unserviced interrupt: %d", n);
decode_istate(n, istate);
panic("unserviced interrupt\n");
}
 
/** General Protection Fault. */
static void gp_fault(int n, istate_t *istate)
{
if (TASK) {
count_t ver;
 
spinlock_lock(&TASK->lock);
ver = TASK->arch.iomapver;
spinlock_unlock(&TASK->lock);
 
if (CPU->arch.iomapver_copy != ver) {
/*
* This fault can be caused by an early access
* to I/O port because of an out-dated
* I/O Permission bitmap installed on CPU.
* Install the fresh copy and restart
* the instruction.
*/
io_perm_bitmap_install();
return;
}
fault_if_from_uspace(istate, "general protection fault");
}
 
decode_istate(n, istate);
panic("general protection fault\n");
}
 
static void ss_fault(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "stack fault");
decode_istate(n, istate);
panic("stack fault\n");
}
 
static void nm_fault(int n, istate_t *istate)
{
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
fault_if_from_uspace(istate, "fpu fault");
panic("fpu fault");
#endif
}
 
static void tlb_shootdown_ipi(int n, istate_t *istate)
{
trap_virtual_eoi();
tlb_shootdown_ipi_recv();
}
 
/** Handler of IRQ exceptions */
static void irq_interrupt(int n, istate_t *istate)
{
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
bool ack = false;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
trap_virtual_eoi();
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
if (!ack)
trap_virtual_eoi();
}
 
void interrupt_init(void)
{
int i;
for (i = 0; i < IVT_ITEMS; i++)
exc_register(i, "null", (iroutine) null_interrupt);
for (i = 0; i < IRQ_COUNT; i++) {
if ((i != IRQ_PIC_SPUR) && (i != IRQ_PIC1))
exc_register(IVT_IRQBASE + i, "irq", (iroutine) irq_interrupt);
}
exc_register(7, "nm_fault", (iroutine) nm_fault);
exc_register(12, "ss_fault", (iroutine) ss_fault);
exc_register(13, "gp_fault", (iroutine) gp_fault);
exc_register(14, "ident_mapper", (iroutine) ident_page_fault);
#ifdef CONFIG_SMP
exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", (iroutine) tlb_shootdown_ipi);
#endif
}
 
void trap_virtual_enable_irqs(uint16_t irqmask)
{
if (enable_irqs_function)
enable_irqs_function(irqmask);
else
panic("no enable_irqs_function\n");
}
 
void trap_virtual_disable_irqs(uint16_t irqmask)
{
if (disable_irqs_function)
disable_irqs_function(irqmask);
else
panic("no disable_irqs_function\n");
}
 
/** @}
*/
/*
* Copyright (c) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64interrupt
* @{
*/
/** @file
*/
 
#include <arch/interrupt.h>
#include <print.h>
#include <debug.h>
#include <panic.h>
#include <arch/drivers/i8259.h>
#include <func.h>
#include <cpu.h>
#include <arch/asm.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <arch.h>
#include <symtab.h>
#include <arch/asm.h>
#include <proc/scheduler.h>
#include <proc/thread.h>
#include <proc/task.h>
#include <synch/spinlock.h>
#include <arch/ddi/ddi.h>
#include <interrupt.h>
#include <ddi/irq.h>
 
/*
* Interrupt and exception dispatching.
*/
 
void (* disable_irqs_function)(uint16_t irqmask) = NULL;
void (* enable_irqs_function)(uint16_t irqmask) = NULL;
void (* eoi_function)(void) = NULL;
 
void decode_istate(int n, istate_t *istate)
{
char *symbol;
/* uint64_t *x = &istate->stack[0]; */
 
if (!(symbol=get_symtab_entry(istate->rip)))
symbol = "";
 
printf("-----EXCEPTION(%d) OCCURED----- ( %s )\n",n, __FUNCTION__);
printf("%%rip: %#llx (%s)\n",istate->rip, symbol);
printf("ERROR_WORD=%#llx\n", istate->error_word);
printf("%%rcs=%#llx, flags=%#llx, %%cr0=%#llx\n", istate->cs, istate->rflags, read_cr0());
printf("%%rax=%#llx, %%rcx=%#llx, %%rdx=%#llx\n", istate->rax, istate->rcx, istate->rdx);
printf("%%rsi=%#llx, %%rdi=%#llx, %%r8 =%#llx\n", istate->rsi, istate->rdi, istate->r8);
printf("%%r9 =%#llx, %%r10 =%#llx, %%r11=%#llx\n", istate->r9, istate->r10, istate->r11);
#ifdef CONFIG_DEBUG_ALLREGS
printf("%%r12=%#llx, %%r13=%#llx, %%r14=%#llx\n", istate->r12, istate->r13, istate->r14);
printf("%%r15=%#llx, %%rbx=%#llx, %%rbp=%#llx\n", istate->r15, istate->rbx, &istate->rbp);
#endif
printf("%%rsp=%#llx\n", &istate->stack[0]);
}
 
static void trap_virtual_eoi(void)
{
if (eoi_function)
eoi_function();
else
panic("no eoi_function\n");
 
}
 
static void null_interrupt(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "unserviced interrupt: %d", n);
decode_istate(n, istate);
panic("unserviced interrupt\n");
}
 
/** General Protection Fault. */
static void gp_fault(int n, istate_t *istate)
{
if (TASK) {
count_t ver;
 
spinlock_lock(&TASK->lock);
ver = TASK->arch.iomapver;
spinlock_unlock(&TASK->lock);
 
if (CPU->arch.iomapver_copy != ver) {
/*
* This fault can be caused by an early access
* to I/O port because of an out-dated
* I/O Permission bitmap installed on CPU.
* Install the fresh copy and restart
* the instruction.
*/
io_perm_bitmap_install();
return;
}
fault_if_from_uspace(istate, "general protection fault");
}
 
decode_istate(n, istate);
panic("general protection fault\n");
}
 
static void ss_fault(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "stack fault");
decode_istate(n, istate);
panic("stack fault\n");
}
 
static void nm_fault(int n, istate_t *istate)
{
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
fault_if_from_uspace(istate, "fpu fault");
panic("fpu fault");
#endif
}
 
static void tlb_shootdown_ipi(int n, istate_t *istate)
{
trap_virtual_eoi();
tlb_shootdown_ipi_recv();
}
 
/** Handler of IRQ exceptions */
static void irq_interrupt(int n, istate_t *istate)
{
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
 
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
trap_virtual_eoi();
}
 
void interrupt_init(void)
{
int i;
for (i = 0; i < IVT_ITEMS; i++)
exc_register(i, "null", (iroutine) null_interrupt);
for (i = 0; i < IRQ_COUNT; i++) {
if ((i != IRQ_PIC_SPUR) && (i != IRQ_PIC1))
exc_register(IVT_IRQBASE + i, "irq", (iroutine) irq_interrupt);
}
exc_register(7, "nm_fault", (iroutine) nm_fault);
exc_register(12, "ss_fault", (iroutine) ss_fault);
exc_register(13, "gp_fault", (iroutine) gp_fault);
exc_register(14, "ident_mapper", (iroutine) ident_page_fault);
#ifdef CONFIG_SMP
exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", (iroutine) tlb_shootdown_ipi);
#endif
}
 
void trap_virtual_enable_irqs(uint16_t irqmask)
{
if (enable_irqs_function)
enable_irqs_function(irqmask);
else
panic("no enable_irqs_function\n");
}
 
void trap_virtual_disable_irqs(uint16_t irqmask)
{
if (disable_irqs_function)
disable_irqs_function(irqmask);
else
panic("no disable_irqs_function\n");
}
 
/** @}
*/
/branches/rcu/kernel/arch/ppc64/src/mm/as.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ppc64mm
/** @addtogroup ppc64mm
* @{
*/
/** @file
41,6 → 41,6
as_operations = &as_pt_operations;
}
 
/** @}
/** @}
*/
 
/branches/rcu/kernel/arch/ppc64/src/interrupt.c
1,108 → 1,98
/*
* Copyright (c) 2006 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ppc64interrupt
* @{
*/
/** @file
*/
 
#include <ddi/irq.h>
#include <interrupt.h>
#include <arch/interrupt.h>
#include <arch/types.h>
#include <arch.h>
#include <time/clock.h>
#include <ipc/sysipc.h>
#include <arch/drivers/pic.h>
#include <arch/mm/tlb.h>
#include <print.h>
 
 
void start_decrementer(void)
{
asm volatile (
"mtdec %0\n"
:
: "r" (1000)
);
}
 
 
/** Handler of external interrupts */
static void exception_external(int n, istate_t *istate)
{
int inum;
while ((inum = pic_get_pending()) != -1) {
bool ack = false;
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Acknowledge the interrupt before processing */
pic_ack_interrupt(inum);
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
if (!ack)
pic_ack_interrupt(inum);
}
}
 
 
static void exception_decrementer(int n, istate_t *istate)
{
clock();
start_decrementer();
}
 
 
/* Initialize basic tables for exception dispatching */
void interrupt_init(void)
{
exc_register(VECTOR_EXTERNAL, "external", exception_external);
exc_register(VECTOR_DECREMENTER, "timer", exception_decrementer);
}
 
/** @}
*/
/*
* Copyright (c) 2006 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ppc64interrupt
* @{
*/
/** @file
*/
 
#include <ddi/irq.h>
#include <interrupt.h>
#include <arch/interrupt.h>
#include <arch/types.h>
#include <arch.h>
#include <time/clock.h>
#include <ipc/sysipc.h>
#include <arch/drivers/pic.h>
#include <arch/mm/tlb.h>
#include <print.h>
 
 
void start_decrementer(void)
{
asm volatile (
"mtdec %0\n"
:
: "r" (1000)
);
}
 
 
/** Handler of external interrupts */
static void exception_external(int n, istate_t *istate)
{
int inum;
while ((inum = pic_get_pending()) != -1) {
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
pic_ack_interrupt(inum);
}
}
 
 
static void exception_decrementer(int n, istate_t *istate)
{
clock();
start_decrementer();
}
 
 
/* Initialize basic tables for exception dispatching */
void interrupt_init(void)
{
exc_register(VECTOR_EXTERNAL, "external", exception_external);
exc_register(VECTOR_DECREMENTER, "timer", exception_decrementer);
}
 
/** @}
*/
/branches/rcu/kernel/arch/mips32/src/mm/as.c
34,12 → 34,12
 
#include <arch/mm/as.h>
#include <genarch/mm/as_pt.h>
#include <genarch/mm/page_pt.h>
#include <genarch/mm/asid_fifo.h>
#include <arch/mm/tlb.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <arch/cp0.h>
#include <arch.h>
 
/** Architecture dependent address space init. */
void as_arch_init(void)
57,7 → 57,6
void as_install_arch(as_t *as)
{
entry_hi_t hi;
ipl_t ipl;
 
/*
* Install ASID.
64,12 → 63,8
*/
hi.value = cp0_entry_hi_read();
 
ipl = interrupts_disable();
spinlock_lock(&as->lock);
hi.asid = as->asid;
cp0_entry_hi_write(hi.value);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
 
/** @}
/branches/rcu/kernel/arch/ia32/src/smp/apic.c
1,595 → 1,587
/*
* Copyright (c) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32
* @{
*/
/** @file
*/
 
#include <arch/types.h>
#include <arch/smp/apic.h>
#include <arch/smp/ap.h>
#include <arch/smp/mps.h>
#include <arch/boot/boot.h>
#include <mm/page.h>
#include <time/delay.h>
#include <interrupt.h>
#include <arch/interrupt.h>
#include <print.h>
#include <arch/asm.h>
#include <arch.h>
#include <ddi/irq.h>
#include <ddi/device.h>
 
#ifdef CONFIG_SMP
 
/*
* Advanced Programmable Interrupt Controller for SMP systems.
* Tested on:
* Bochs 2.0.2 - Bochs 2.2.6 with 2-8 CPUs
* Simics 2.0.28 - Simics 2.2.19 2-15 CPUs
* VMware Workstation 5.5 with 2 CPUs
* QEMU 0.8.0 with 2-15 CPUs
* ASUS P/I-P65UP5 + ASUS C-P55T2D REV. 1.41 with 2x 200Mhz Pentium CPUs
* ASUS PCH-DL with 2x 3000Mhz Pentium 4 Xeon (HT) CPUs
* MSI K7D Master-L with 2x 2100MHz Athlon MP CPUs
*/
 
/*
* These variables either stay configured as initilalized, or are changed by
* the MP configuration code.
*
* Pay special attention to the volatile keyword. Without it, gcc -O2 would
* optimize the code too much and accesses to l_apic and io_apic, that must
* always be 32-bit, would use byte oriented instructions.
*/
volatile uint32_t *l_apic = (uint32_t *) 0xfee00000;
volatile uint32_t *io_apic = (uint32_t *) 0xfec00000;
 
uint32_t apic_id_mask = 0;
static irq_t l_apic_timer_irq;
 
static int apic_poll_errors(void);
 
#ifdef LAPIC_VERBOSE
static char *delmod_str[] = {
"Fixed",
"Lowest Priority",
"SMI",
"Reserved",
"NMI",
"INIT",
"STARTUP",
"ExtInt"
};
 
static char *destmod_str[] = {
"Physical",
"Logical"
};
 
static char *trigmod_str[] = {
"Edge",
"Level"
};
 
static char *mask_str[] = {
"Unmasked",
"Masked"
};
 
static char *delivs_str[] = {
"Idle",
"Send Pending"
};
 
static char *tm_mode_str[] = {
"One-shot",
"Periodic"
};
 
static char *intpol_str[] = {
"Polarity High",
"Polarity Low"
};
#endif /* LAPIC_VERBOSE */
 
/** APIC spurious interrupt handler.
*
* @param n Interrupt vector.
* @param istate Interrupted state.
*/
static void apic_spurious(int n, istate_t *istate)
{
#ifdef CONFIG_DEBUG
printf("cpu%d: APIC spurious interrupt\n", CPU->id);
#endif
}
 
static irq_ownership_t l_apic_timer_claim(void)
{
return IRQ_ACCEPT;
}
 
static void l_apic_timer_irq_handler(irq_t *irq, void *arg, ...)
{
/*
* Holding a spinlock could prevent clock() from preempting
* the current thread. In this case, we don't need to hold the
* irq->lock so we just unlock it and then lock it again.
*/
spinlock_unlock(&irq->lock);
clock();
spinlock_lock(&irq->lock);
}
 
/** Initialize APIC on BSP. */
void apic_init(void)
{
io_apic_id_t idreg;
unsigned int i;
 
exc_register(VECTOR_APIC_SPUR, "apic_spurious", (iroutine) apic_spurious);
 
enable_irqs_function = io_apic_enable_irqs;
disable_irqs_function = io_apic_disable_irqs;
eoi_function = l_apic_eoi;
/*
* Configure interrupt routing.
* IRQ 0 remains masked as the time signal is generated by l_apic's themselves.
* Other interrupts will be forwarded to the lowest priority CPU.
*/
io_apic_disable_irqs(0xffff);
irq_initialize(&l_apic_timer_irq);
l_apic_timer_irq.preack = true;
l_apic_timer_irq.devno = device_assign_devno();
l_apic_timer_irq.inr = IRQ_CLK;
l_apic_timer_irq.claim = l_apic_timer_claim;
l_apic_timer_irq.handler = l_apic_timer_irq_handler;
irq_register(&l_apic_timer_irq);
for (i = 0; i < IRQ_COUNT; i++) {
int pin;
if ((pin = smp_irq_to_pin(i)) != -1)
io_apic_change_ioredtbl(pin, DEST_ALL, IVT_IRQBASE + i, LOPRI);
}
/*
* Ensure that io_apic has unique ID.
*/
idreg.value = io_apic_read(IOAPICID);
if ((1 << idreg.apic_id) & apic_id_mask) { /* see if IO APIC ID is used already */
for (i = 0; i < APIC_ID_COUNT; i++) {
if (!((1 << i) & apic_id_mask)) {
idreg.apic_id = i;
io_apic_write(IOAPICID, idreg.value);
break;
}
}
}
 
/*
* Configure the BSP's lapic.
*/
l_apic_init();
 
l_apic_debug();
}
 
/** Poll for APIC errors.
*
* Examine Error Status Register and report all errors found.
*
* @return 0 on error, 1 on success.
*/
int apic_poll_errors(void)
{
esr_t esr;
esr.value = l_apic[ESR];
if (esr.send_checksum_error)
printf("Send Checksum Error\n");
if (esr.receive_checksum_error)
printf("Receive Checksum Error\n");
if (esr.send_accept_error)
printf("Send Accept Error\n");
if (esr.receive_accept_error)
printf("Receive Accept Error\n");
if (esr.send_illegal_vector)
printf("Send Illegal Vector\n");
if (esr.received_illegal_vector)
printf("Received Illegal Vector\n");
if (esr.illegal_register_address)
printf("Illegal Register Address\n");
 
return !esr.err_bitmap;
}
 
/** Send all CPUs excluding CPU IPI vector.
*
* @param vector Interrupt vector to be sent.
*
* @return 0 on failure, 1 on success.
*/
int l_apic_broadcast_custom_ipi(uint8_t vector)
{
icr_t icr;
 
icr.lo = l_apic[ICRlo];
icr.delmod = DELMOD_FIXED;
icr.destmod = DESTMOD_LOGIC;
icr.level = LEVEL_ASSERT;
icr.shorthand = SHORTHAND_ALL_EXCL;
icr.trigger_mode = TRIGMOD_LEVEL;
icr.vector = vector;
 
l_apic[ICRlo] = icr.lo;
 
icr.lo = l_apic[ICRlo];
if (icr.delivs == DELIVS_PENDING) {
#ifdef CONFIG_DEBUG
printf("IPI is pending.\n");
#endif
}
 
return apic_poll_errors();
}
 
/** Universal Start-up Algorithm for bringing up the AP processors.
*
* @param apicid APIC ID of the processor to be brought up.
*
* @return 0 on failure, 1 on success.
*/
int l_apic_send_init_ipi(uint8_t apicid)
{
icr_t icr;
int i;
 
/*
* Read the ICR register in and zero all non-reserved fields.
*/
icr.lo = l_apic[ICRlo];
icr.hi = l_apic[ICRhi];
icr.delmod = DELMOD_INIT;
icr.destmod = DESTMOD_PHYS;
icr.level = LEVEL_ASSERT;
icr.trigger_mode = TRIGMOD_LEVEL;
icr.shorthand = SHORTHAND_NONE;
icr.vector = 0;
icr.dest = apicid;
l_apic[ICRhi] = icr.hi;
l_apic[ICRlo] = icr.lo;
 
/*
* According to MP Specification, 20us should be enough to
* deliver the IPI.
*/
delay(20);
 
if (!apic_poll_errors())
return 0;
 
icr.lo = l_apic[ICRlo];
if (icr.delivs == DELIVS_PENDING) {
#ifdef CONFIG_DEBUG
printf("IPI is pending.\n");
#endif
}
 
icr.delmod = DELMOD_INIT;
icr.destmod = DESTMOD_PHYS;
icr.level = LEVEL_DEASSERT;
icr.shorthand = SHORTHAND_NONE;
icr.trigger_mode = TRIGMOD_LEVEL;
icr.vector = 0;
l_apic[ICRlo] = icr.lo;
 
/*
* Wait 10ms as MP Specification specifies.
*/
delay(10000);
 
if (!is_82489DX_apic(l_apic[LAVR])) {
/*
* If this is not 82489DX-based l_apic we must send two STARTUP IPI's.
*/
for (i = 0; i<2; i++) {
icr.lo = l_apic[ICRlo];
icr.vector = ((uintptr_t) ap_boot) / 4096; /* calculate the reset vector */
icr.delmod = DELMOD_STARTUP;
icr.destmod = DESTMOD_PHYS;
icr.level = LEVEL_ASSERT;
icr.shorthand = SHORTHAND_NONE;
icr.trigger_mode = TRIGMOD_LEVEL;
l_apic[ICRlo] = icr.lo;
delay(200);
}
}
return apic_poll_errors();
}
 
/** Initialize Local APIC. */
void l_apic_init(void)
{
lvt_error_t error;
lvt_lint_t lint;
tpr_t tpr;
svr_t svr;
icr_t icr;
tdcr_t tdcr;
lvt_tm_t tm;
ldr_t ldr;
dfr_t dfr;
uint32_t t1, t2;
 
/* Initialize LVT Error register. */
error.value = l_apic[LVT_Err];
error.masked = true;
l_apic[LVT_Err] = error.value;
 
/* Initialize LVT LINT0 register. */
lint.value = l_apic[LVT_LINT0];
lint.masked = true;
l_apic[LVT_LINT0] = lint.value;
 
/* Initialize LVT LINT1 register. */
lint.value = l_apic[LVT_LINT1];
lint.masked = true;
l_apic[LVT_LINT1] = lint.value;
 
/* Task Priority Register initialization. */
tpr.value = l_apic[TPR];
tpr.pri_sc = 0;
tpr.pri = 0;
l_apic[TPR] = tpr.value;
/* Spurious-Interrupt Vector Register initialization. */
svr.value = l_apic[SVR];
svr.vector = VECTOR_APIC_SPUR;
svr.lapic_enabled = true;
svr.focus_checking = true;
l_apic[SVR] = svr.value;
 
if (CPU->arch.family >= 6)
enable_l_apic_in_msr();
/* Interrupt Command Register initialization. */
icr.lo = l_apic[ICRlo];
icr.delmod = DELMOD_INIT;
icr.destmod = DESTMOD_PHYS;
icr.level = LEVEL_DEASSERT;
icr.shorthand = SHORTHAND_ALL_INCL;
icr.trigger_mode = TRIGMOD_LEVEL;
l_apic[ICRlo] = icr.lo;
/* Timer Divide Configuration Register initialization. */
tdcr.value = l_apic[TDCR];
tdcr.div_value = DIVIDE_1;
l_apic[TDCR] = tdcr.value;
 
/* Program local timer. */
tm.value = l_apic[LVT_Tm];
tm.vector = VECTOR_CLK;
tm.mode = TIMER_PERIODIC;
tm.masked = false;
l_apic[LVT_Tm] = tm.value;
 
/*
* Measure and configure the timer to generate timer
* interrupt with period 1s/HZ seconds.
*/
t1 = l_apic[CCRT];
l_apic[ICRT] = 0xffffffff;
 
while (l_apic[CCRT] == t1)
;
t1 = l_apic[CCRT];
delay(1000000/HZ);
t2 = l_apic[CCRT];
l_apic[ICRT] = t1-t2;
/* Program Logical Destination Register. */
ldr.value = l_apic[LDR];
if (CPU->id < sizeof(CPU->id)*8) /* size in bits */
ldr.id = (1<<CPU->id);
l_apic[LDR] = ldr.value;
/* Program Destination Format Register for Flat mode. */
dfr.value = l_apic[DFR];
dfr.model = MODEL_FLAT;
l_apic[DFR] = dfr.value;
}
 
/** Local APIC End of Interrupt. */
void l_apic_eoi(void)
{
l_apic[EOI] = 0;
}
 
/** Dump content of Local APIC registers. */
void l_apic_debug(void)
{
#ifdef LAPIC_VERBOSE
lvt_tm_t tm;
lvt_lint_t lint;
lvt_error_t error;
printf("LVT on cpu%d, LAPIC ID: %d\n", CPU->id, l_apic_id());
 
tm.value = l_apic[LVT_Tm];
printf("LVT Tm: vector=%hhd, %s, %s, %s\n", tm.vector, delivs_str[tm.delivs], mask_str[tm.masked], tm_mode_str[tm.mode]);
lint.value = l_apic[LVT_LINT0];
printf("LVT LINT0: vector=%hhd, %s, %s, %s, irr=%d, %s, %s\n", tm.vector, delmod_str[lint.delmod], delivs_str[lint.delivs], intpol_str[lint.intpol], lint.irr, trigmod_str[lint.trigger_mode], mask_str[lint.masked]);
lint.value = l_apic[LVT_LINT1];
printf("LVT LINT1: vector=%hhd, %s, %s, %s, irr=%d, %s, %s\n", tm.vector, delmod_str[lint.delmod], delivs_str[lint.delivs], intpol_str[lint.intpol], lint.irr, trigmod_str[lint.trigger_mode], mask_str[lint.masked]);
error.value = l_apic[LVT_Err];
printf("LVT Err: vector=%hhd, %s, %s\n", error.vector, delivs_str[error.delivs], mask_str[error.masked]);
#endif
}
 
/** Get Local APIC ID.
*
* @return Local APIC ID.
*/
uint8_t l_apic_id(void)
{
l_apic_id_t idreg;
idreg.value = l_apic[L_APIC_ID];
return idreg.apic_id;
}
 
/** Read from IO APIC register.
*
* @param address IO APIC register address.
*
* @return Content of the addressed IO APIC register.
*/
uint32_t io_apic_read(uint8_t address)
{
io_regsel_t regsel;
regsel.value = io_apic[IOREGSEL];
regsel.reg_addr = address;
io_apic[IOREGSEL] = regsel.value;
return io_apic[IOWIN];
}
 
/** Write to IO APIC register.
*
* @param address IO APIC register address.
* @param x Content to be written to the addressed IO APIC register.
*/
void io_apic_write(uint8_t address, uint32_t x)
{
io_regsel_t regsel;
regsel.value = io_apic[IOREGSEL];
regsel.reg_addr = address;
io_apic[IOREGSEL] = regsel.value;
io_apic[IOWIN] = x;
}
 
/** Change some attributes of one item in I/O Redirection Table.
*
* @param pin IO APIC pin number.
* @param dest Interrupt destination address.
* @param v Interrupt vector to trigger.
* @param flags Flags.
*/
void io_apic_change_ioredtbl(int pin, int dest, uint8_t v, int flags)
{
io_redirection_reg_t reg;
int dlvr = DELMOD_FIXED;
if (flags & LOPRI)
dlvr = DELMOD_LOWPRI;
 
reg.lo = io_apic_read(IOREDTBL + pin*2);
reg.hi = io_apic_read(IOREDTBL + pin*2 + 1);
reg.dest = dest;
reg.destmod = DESTMOD_LOGIC;
reg.trigger_mode = TRIGMOD_EDGE;
reg.intpol = POLARITY_HIGH;
reg.delmod = dlvr;
reg.intvec = v;
 
io_apic_write(IOREDTBL + pin*2, reg.lo);
io_apic_write(IOREDTBL + pin*2 + 1, reg.hi);
}
 
/** Mask IRQs in IO APIC.
*
* @param irqmask Bitmask of IRQs to be masked (0 = do not mask, 1 = mask).
*/
void io_apic_disable_irqs(uint16_t irqmask)
{
io_redirection_reg_t reg;
unsigned int i;
int pin;
for (i = 0; i < 16; i++) {
if (irqmask & (1 << i)) {
/*
* Mask the signal input in IO APIC if there is a
* mapping for the respective IRQ number.
*/
pin = smp_irq_to_pin(i);
if (pin != -1) {
reg.lo = io_apic_read(IOREDTBL + pin * 2);
reg.masked = true;
io_apic_write(IOREDTBL + pin * 2, reg.lo);
}
}
}
}
 
/** Unmask IRQs in IO APIC.
*
* @param irqmask Bitmask of IRQs to be unmasked (0 = do not unmask, 1 = unmask).
*/
void io_apic_enable_irqs(uint16_t irqmask)
{
unsigned int i;
int pin;
io_redirection_reg_t reg;
for (i = 0;i < 16; i++) {
if (irqmask & (1 << i)) {
/*
* Unmask the signal input in IO APIC if there is a
* mapping for the respective IRQ number.
*/
pin = smp_irq_to_pin(i);
if (pin != -1) {
reg.lo = io_apic_read(IOREDTBL + pin * 2);
reg.masked = false;
io_apic_write(IOREDTBL + pin * 2, reg.lo);
}
}
}
}
 
#endif /* CONFIG_SMP */
 
/** @}
*/
/*
* Copyright (c) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32
* @{
*/
/** @file
*/
 
#include <arch/types.h>
#include <arch/smp/apic.h>
#include <arch/smp/ap.h>
#include <arch/smp/mps.h>
#include <arch/boot/boot.h>
#include <mm/page.h>
#include <time/delay.h>
#include <interrupt.h>
#include <arch/interrupt.h>
#include <print.h>
#include <arch/asm.h>
#include <arch.h>
#include <ddi/irq.h>
#include <ddi/device.h>
 
#ifdef CONFIG_SMP
 
/*
* Advanced Programmable Interrupt Controller for SMP systems.
* Tested on:
* Bochs 2.0.2 - Bochs 2.2.6 with 2-8 CPUs
* Simics 2.0.28 - Simics 2.2.19 2-15 CPUs
* VMware Workstation 5.5 with 2 CPUs
* QEMU 0.8.0 with 2-15 CPUs
* ASUS P/I-P65UP5 + ASUS C-P55T2D REV. 1.41 with 2x 200Mhz Pentium CPUs
* ASUS PCH-DL with 2x 3000Mhz Pentium 4 Xeon (HT) CPUs
* MSI K7D Master-L with 2x 2100MHz Athlon MP CPUs
*/
 
/*
* These variables either stay configured as initilalized, or are changed by
* the MP configuration code.
*
* Pay special attention to the volatile keyword. Without it, gcc -O2 would
* optimize the code too much and accesses to l_apic and io_apic, that must
* always be 32-bit, would use byte oriented instructions.
*/
volatile uint32_t *l_apic = (uint32_t *) 0xfee00000;
volatile uint32_t *io_apic = (uint32_t *) 0xfec00000;
 
uint32_t apic_id_mask = 0;
static irq_t l_apic_timer_irq;
 
static int apic_poll_errors(void);
 
#ifdef LAPIC_VERBOSE
static char *delmod_str[] = {
"Fixed",
"Lowest Priority",
"SMI",
"Reserved",
"NMI",
"INIT",
"STARTUP",
"ExtInt"
};
 
static char *destmod_str[] = {
"Physical",
"Logical"
};
 
static char *trigmod_str[] = {
"Edge",
"Level"
};
 
static char *mask_str[] = {
"Unmasked",
"Masked"
};
 
static char *delivs_str[] = {
"Idle",
"Send Pending"
};
 
static char *tm_mode_str[] = {
"One-shot",
"Periodic"
};
 
static char *intpol_str[] = {
"Polarity High",
"Polarity Low"
};
#endif /* LAPIC_VERBOSE */
 
/** APIC spurious interrupt handler.
*
* @param n Interrupt vector.
* @param istate Interrupted state.
*/
static void apic_spurious(int n, istate_t *istate)
{
#ifdef CONFIG_DEBUG
printf("cpu%d: APIC spurious interrupt\n", CPU->id);
#endif
}
 
static irq_ownership_t l_apic_timer_claim(void)
{
return IRQ_ACCEPT;
}
 
static void l_apic_timer_irq_handler(irq_t *irq, void *arg, ...)
{
clock();
}
 
/** Initialize APIC on BSP. */
void apic_init(void)
{
io_apic_id_t idreg;
unsigned int i;
 
exc_register(VECTOR_APIC_SPUR, "apic_spurious", (iroutine) apic_spurious);
 
enable_irqs_function = io_apic_enable_irqs;
disable_irqs_function = io_apic_disable_irqs;
eoi_function = l_apic_eoi;
/*
* Configure interrupt routing.
* IRQ 0 remains masked as the time signal is generated by l_apic's themselves.
* Other interrupts will be forwarded to the lowest priority CPU.
*/
io_apic_disable_irqs(0xffff);
irq_initialize(&l_apic_timer_irq);
l_apic_timer_irq.devno = device_assign_devno();
l_apic_timer_irq.inr = IRQ_CLK;
l_apic_timer_irq.claim = l_apic_timer_claim;
l_apic_timer_irq.handler = l_apic_timer_irq_handler;
irq_register(&l_apic_timer_irq);
for (i = 0; i < IRQ_COUNT; i++) {
int pin;
if ((pin = smp_irq_to_pin(i)) != -1)
io_apic_change_ioredtbl(pin, DEST_ALL, IVT_IRQBASE + i, LOPRI);
}
/*
* Ensure that io_apic has unique ID.
*/
idreg.value = io_apic_read(IOAPICID);
if ((1 << idreg.apic_id) & apic_id_mask) { /* see if IO APIC ID is used already */
for (i = 0; i < APIC_ID_COUNT; i++) {
if (!((1 << i) & apic_id_mask)) {
idreg.apic_id = i;
io_apic_write(IOAPICID, idreg.value);
break;
}
}
}
 
/*
* Configure the BSP's lapic.
*/
l_apic_init();
 
l_apic_debug();
}
 
/** Poll for APIC errors.
*
* Examine Error Status Register and report all errors found.
*
* @return 0 on error, 1 on success.
*/
int apic_poll_errors(void)
{
esr_t esr;
esr.value = l_apic[ESR];
if (esr.send_checksum_error)
printf("Send Checksum Error\n");
if (esr.receive_checksum_error)
printf("Receive Checksum Error\n");
if (esr.send_accept_error)
printf("Send Accept Error\n");
if (esr.receive_accept_error)
printf("Receive Accept Error\n");
if (esr.send_illegal_vector)
printf("Send Illegal Vector\n");
if (esr.received_illegal_vector)
printf("Received Illegal Vector\n");
if (esr.illegal_register_address)
printf("Illegal Register Address\n");
 
return !esr.err_bitmap;
}
 
/** Send all CPUs excluding CPU IPI vector.
*
* @param vector Interrupt vector to be sent.
*
* @return 0 on failure, 1 on success.
*/
int l_apic_broadcast_custom_ipi(uint8_t vector)
{
icr_t icr;
 
icr.lo = l_apic[ICRlo];
icr.delmod = DELMOD_FIXED;
icr.destmod = DESTMOD_LOGIC;
icr.level = LEVEL_ASSERT;
icr.shorthand = SHORTHAND_ALL_EXCL;
icr.trigger_mode = TRIGMOD_LEVEL;
icr.vector = vector;
 
l_apic[ICRlo] = icr.lo;
 
icr.lo = l_apic[ICRlo];
if (icr.delivs == DELIVS_PENDING) {
#ifdef CONFIG_DEBUG
printf("IPI is pending.\n");
#endif
}
 
return apic_poll_errors();
}
 
/** Universal Start-up Algorithm for bringing up the AP processors.
*
* @param apicid APIC ID of the processor to be brought up.
*
* @return 0 on failure, 1 on success.
*/
int l_apic_send_init_ipi(uint8_t apicid)
{
icr_t icr;
int i;
 
/*
* Read the ICR register in and zero all non-reserved fields.
*/
icr.lo = l_apic[ICRlo];
icr.hi = l_apic[ICRhi];
icr.delmod = DELMOD_INIT;
icr.destmod = DESTMOD_PHYS;
icr.level = LEVEL_ASSERT;
icr.trigger_mode = TRIGMOD_LEVEL;
icr.shorthand = SHORTHAND_NONE;
icr.vector = 0;
icr.dest = apicid;
l_apic[ICRhi] = icr.hi;
l_apic[ICRlo] = icr.lo;
 
/*
* According to MP Specification, 20us should be enough to
* deliver the IPI.
*/
delay(20);
 
if (!apic_poll_errors())
return 0;
 
icr.lo = l_apic[ICRlo];
if (icr.delivs == DELIVS_PENDING) {
#ifdef CONFIG_DEBUG
printf("IPI is pending.\n");
#endif
}
 
icr.delmod = DELMOD_INIT;
icr.destmod = DESTMOD_PHYS;
icr.level = LEVEL_DEASSERT;
icr.shorthand = SHORTHAND_NONE;
icr.trigger_mode = TRIGMOD_LEVEL;
icr.vector = 0;
l_apic[ICRlo] = icr.lo;
 
/*
* Wait 10ms as MP Specification specifies.
*/
delay(10000);
 
if (!is_82489DX_apic(l_apic[LAVR])) {
/*
* If this is not 82489DX-based l_apic we must send two STARTUP IPI's.
*/
for (i = 0; i<2; i++) {
icr.lo = l_apic[ICRlo];
icr.vector = ((uintptr_t) ap_boot) / 4096; /* calculate the reset vector */
icr.delmod = DELMOD_STARTUP;
icr.destmod = DESTMOD_PHYS;
icr.level = LEVEL_ASSERT;
icr.shorthand = SHORTHAND_NONE;
icr.trigger_mode = TRIGMOD_LEVEL;
l_apic[ICRlo] = icr.lo;
delay(200);
}
}
return apic_poll_errors();
}
 
/** Initialize Local APIC. */
void l_apic_init(void)
{
lvt_error_t error;
lvt_lint_t lint;
tpr_t tpr;
svr_t svr;
icr_t icr;
tdcr_t tdcr;
lvt_tm_t tm;
ldr_t ldr;
dfr_t dfr;
uint32_t t1, t2;
 
/* Initialize LVT Error register. */
error.value = l_apic[LVT_Err];
error.masked = true;
l_apic[LVT_Err] = error.value;
 
/* Initialize LVT LINT0 register. */
lint.value = l_apic[LVT_LINT0];
lint.masked = true;
l_apic[LVT_LINT0] = lint.value;
 
/* Initialize LVT LINT1 register. */
lint.value = l_apic[LVT_LINT1];
lint.masked = true;
l_apic[LVT_LINT1] = lint.value;
 
/* Task Priority Register initialization. */
tpr.value = l_apic[TPR];
tpr.pri_sc = 0;
tpr.pri = 0;
l_apic[TPR] = tpr.value;
/* Spurious-Interrupt Vector Register initialization. */
svr.value = l_apic[SVR];
svr.vector = VECTOR_APIC_SPUR;
svr.lapic_enabled = true;
svr.focus_checking = true;
l_apic[SVR] = svr.value;
 
if (CPU->arch.family >= 6)
enable_l_apic_in_msr();
/* Interrupt Command Register initialization. */
icr.lo = l_apic[ICRlo];
icr.delmod = DELMOD_INIT;
icr.destmod = DESTMOD_PHYS;
icr.level = LEVEL_DEASSERT;
icr.shorthand = SHORTHAND_ALL_INCL;
icr.trigger_mode = TRIGMOD_LEVEL;
l_apic[ICRlo] = icr.lo;
/* Timer Divide Configuration Register initialization. */
tdcr.value = l_apic[TDCR];
tdcr.div_value = DIVIDE_1;
l_apic[TDCR] = tdcr.value;
 
/* Program local timer. */
tm.value = l_apic[LVT_Tm];
tm.vector = VECTOR_CLK;
tm.mode = TIMER_PERIODIC;
tm.masked = false;
l_apic[LVT_Tm] = tm.value;
 
/*
* Measure and configure the timer to generate timer
* interrupt with period 1s/HZ seconds.
*/
t1 = l_apic[CCRT];
l_apic[ICRT] = 0xffffffff;
 
while (l_apic[CCRT] == t1)
;
t1 = l_apic[CCRT];
delay(1000000/HZ);
t2 = l_apic[CCRT];
l_apic[ICRT] = t1-t2;
/* Program Logical Destination Register. */
ldr.value = l_apic[LDR];
if (CPU->id < sizeof(CPU->id)*8) /* size in bits */
ldr.id = (1<<CPU->id);
l_apic[LDR] = ldr.value;
/* Program Destination Format Register for Flat mode. */
dfr.value = l_apic[DFR];
dfr.model = MODEL_FLAT;
l_apic[DFR] = dfr.value;
}
 
/** Local APIC End of Interrupt. */
void l_apic_eoi(void)
{
l_apic[EOI] = 0;
}
 
/** Dump content of Local APIC registers. */
void l_apic_debug(void)
{
#ifdef LAPIC_VERBOSE
lvt_tm_t tm;
lvt_lint_t lint;
lvt_error_t error;
printf("LVT on cpu%d, LAPIC ID: %d\n", CPU->id, l_apic_id());
 
tm.value = l_apic[LVT_Tm];
printf("LVT Tm: vector=%hhd, %s, %s, %s\n", tm.vector, delivs_str[tm.delivs], mask_str[tm.masked], tm_mode_str[tm.mode]);
lint.value = l_apic[LVT_LINT0];
printf("LVT LINT0: vector=%hhd, %s, %s, %s, irr=%d, %s, %s\n", tm.vector, delmod_str[lint.delmod], delivs_str[lint.delivs], intpol_str[lint.intpol], lint.irr, trigmod_str[lint.trigger_mode], mask_str[lint.masked]);
lint.value = l_apic[LVT_LINT1];
printf("LVT LINT1: vector=%hhd, %s, %s, %s, irr=%d, %s, %s\n", tm.vector, delmod_str[lint.delmod], delivs_str[lint.delivs], intpol_str[lint.intpol], lint.irr, trigmod_str[lint.trigger_mode], mask_str[lint.masked]);
error.value = l_apic[LVT_Err];
printf("LVT Err: vector=%hhd, %s, %s\n", error.vector, delivs_str[error.delivs], mask_str[error.masked]);
#endif
}
 
/** Get Local APIC ID.
*
* @return Local APIC ID.
*/
uint8_t l_apic_id(void)
{
l_apic_id_t idreg;
idreg.value = l_apic[L_APIC_ID];
return idreg.apic_id;
}
 
/** Read from IO APIC register.
*
* @param address IO APIC register address.
*
* @return Content of the addressed IO APIC register.
*/
uint32_t io_apic_read(uint8_t address)
{
io_regsel_t regsel;
regsel.value = io_apic[IOREGSEL];
regsel.reg_addr = address;
io_apic[IOREGSEL] = regsel.value;
return io_apic[IOWIN];
}
 
/** Write to IO APIC register.
*
* @param address IO APIC register address.
* @param x Content to be written to the addressed IO APIC register.
*/
void io_apic_write(uint8_t address, uint32_t x)
{
io_regsel_t regsel;
regsel.value = io_apic[IOREGSEL];
regsel.reg_addr = address;
io_apic[IOREGSEL] = regsel.value;
io_apic[IOWIN] = x;
}
 
/** Change some attributes of one item in I/O Redirection Table.
*
* @param pin IO APIC pin number.
* @param dest Interrupt destination address.
* @param v Interrupt vector to trigger.
* @param flags Flags.
*/
void io_apic_change_ioredtbl(int pin, int dest, uint8_t v, int flags)
{
io_redirection_reg_t reg;
int dlvr = DELMOD_FIXED;
if (flags & LOPRI)
dlvr = DELMOD_LOWPRI;
 
reg.lo = io_apic_read(IOREDTBL + pin*2);
reg.hi = io_apic_read(IOREDTBL + pin*2 + 1);
reg.dest = dest;
reg.destmod = DESTMOD_LOGIC;
reg.trigger_mode = TRIGMOD_EDGE;
reg.intpol = POLARITY_HIGH;
reg.delmod = dlvr;
reg.intvec = v;
 
io_apic_write(IOREDTBL + pin*2, reg.lo);
io_apic_write(IOREDTBL + pin*2 + 1, reg.hi);
}
 
/** Mask IRQs in IO APIC.
*
* @param irqmask Bitmask of IRQs to be masked (0 = do not mask, 1 = mask).
*/
void io_apic_disable_irqs(uint16_t irqmask)
{
io_redirection_reg_t reg;
unsigned int i;
int pin;
for (i = 0; i < 16; i++) {
if (irqmask & (1 << i)) {
/*
* Mask the signal input in IO APIC if there is a
* mapping for the respective IRQ number.
*/
pin = smp_irq_to_pin(i);
if (pin != -1) {
reg.lo = io_apic_read(IOREDTBL + pin * 2);
reg.masked = true;
io_apic_write(IOREDTBL + pin * 2, reg.lo);
}
}
}
}
 
/** Unmask IRQs in IO APIC.
*
* @param irqmask Bitmask of IRQs to be unmasked (0 = do not unmask, 1 = unmask).
*/
void io_apic_enable_irqs(uint16_t irqmask)
{
unsigned int i;
int pin;
io_redirection_reg_t reg;
for (i = 0;i < 16; i++) {
if (irqmask & (1 << i)) {
/*
* Unmask the signal input in IO APIC if there is a
* mapping for the respective IRQ number.
*/
pin = smp_irq_to_pin(i);
if (pin != -1) {
reg.lo = io_apic_read(IOREDTBL + pin * 2);
reg.masked = false;
io_apic_write(IOREDTBL + pin * 2, reg.lo);
}
}
}
}
 
#endif /* CONFIG_SMP */
 
/** @}
*/
/branches/rcu/kernel/arch/ia32/src/interrupt.c
1,248 → 1,239
/*
* Copyright (c) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32interrupt
* @{
*/
/** @file
*/
 
#include <arch/interrupt.h>
#include <syscall/syscall.h>
#include <print.h>
#include <debug.h>
#include <panic.h>
#include <arch/drivers/i8259.h>
#include <func.h>
#include <cpu.h>
#include <arch/asm.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <arch.h>
#include <symtab.h>
#include <proc/thread.h>
#include <proc/task.h>
#include <synch/spinlock.h>
#include <arch/ddi/ddi.h>
#include <ipc/sysipc.h>
#include <interrupt.h>
#include <ddi/irq.h>
 
/*
* Interrupt and exception dispatching.
*/
 
void (* disable_irqs_function)(uint16_t irqmask) = NULL;
void (* enable_irqs_function)(uint16_t irqmask) = NULL;
void (* eoi_function)(void) = NULL;
 
void decode_istate(istate_t *istate)
{
char *symbol = get_symtab_entry(istate->eip);
 
if (!symbol)
symbol = "";
 
if (CPU)
printf("----------------EXCEPTION OCCURED (cpu%d)----------------\n", CPU->id);
else
printf("----------------EXCEPTION OCCURED----------------\n");
printf("%%eip: %#x (%s)\n",istate->eip,symbol);
printf("ERROR_WORD=%#x\n", istate->error_word);
printf("%%cs=%#x,flags=%#x\n", istate->cs, istate->eflags);
printf("%%eax=%#x, %%ecx=%#x, %%edx=%#x, %%esp=%#x\n", istate->eax,istate->ecx,istate->edx,&istate->stack[0]);
#ifdef CONFIG_DEBUG_ALLREGS
printf("%%esi=%#x, %%edi=%#x, %%ebp=%#x, %%ebx=%#x\n", istate->esi,istate->edi,istate->ebp,istate->ebx);
#endif
printf("stack: %#x, %#x, %#x, %#x\n", istate->stack[0], istate->stack[1], istate->stack[2], istate->stack[3]);
printf(" %#x, %#x, %#x, %#x\n", istate->stack[4], istate->stack[5], istate->stack[6], istate->stack[7]);
}
 
static void trap_virtual_eoi(void)
{
if (eoi_function)
eoi_function();
else
panic("no eoi_function\n");
 
}
 
static void null_interrupt(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "unserviced interrupt: %d", n);
 
decode_istate(istate);
panic("unserviced interrupt: %d\n", n);
}
 
/** General Protection Fault. */
static void gp_fault(int n, istate_t *istate)
{
if (TASK) {
count_t ver;
spinlock_lock(&TASK->lock);
ver = TASK->arch.iomapver;
spinlock_unlock(&TASK->lock);
if (CPU->arch.iomapver_copy != ver) {
/*
* This fault can be caused by an early access
* to I/O port because of an out-dated
* I/O Permission bitmap installed on CPU.
* Install the fresh copy and restart
* the instruction.
*/
io_perm_bitmap_install();
return;
}
fault_if_from_uspace(istate, "general protection fault");
}
 
decode_istate(istate);
panic("general protection fault\n");
}
 
static void ss_fault(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "stack fault");
 
decode_istate(istate);
panic("stack fault\n");
}
 
static void simd_fp_exception(int n, istate_t *istate)
{
uint32_t mxcsr;
asm (
"stmxcsr %0;\n"
:"=m"(mxcsr)
);
fault_if_from_uspace(istate, "SIMD FP exception(19), MXCSR: %#zx",
(unative_t)mxcsr);
 
decode_istate(istate);
printf("MXCSR: %#zx\n",(unative_t)(mxcsr));
panic("SIMD FP exception(19)\n");
}
 
static void nm_fault(int n, istate_t *istate)
{
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
fault_if_from_uspace(istate, "fpu fault");
panic("fpu fault");
#endif
}
 
#ifdef CONFIG_SMP
static void tlb_shootdown_ipi(int n, istate_t *istate)
{
trap_virtual_eoi();
tlb_shootdown_ipi_recv();
}
#endif
 
/** Handler of IRQ exceptions */
static void irq_interrupt(int n, istate_t *istate)
{
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
bool ack = false;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
trap_virtual_eoi();
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
if (!ack)
trap_virtual_eoi();
}
 
void interrupt_init(void)
{
int i;
for (i = 0; i < IVT_ITEMS; i++)
exc_register(i, "null", (iroutine) null_interrupt);
for (i = 0; i < IRQ_COUNT; i++) {
if ((i != IRQ_PIC_SPUR) && (i != IRQ_PIC1))
exc_register(IVT_IRQBASE + i, "irq", (iroutine) irq_interrupt);
}
exc_register(7, "nm_fault", (iroutine) nm_fault);
exc_register(12, "ss_fault", (iroutine) ss_fault);
exc_register(13, "gp_fault", (iroutine) gp_fault);
exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
#ifdef CONFIG_SMP
exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", (iroutine) tlb_shootdown_ipi);
#endif
}
 
void trap_virtual_enable_irqs(uint16_t irqmask)
{
if (enable_irqs_function)
enable_irqs_function(irqmask);
else
panic("no enable_irqs_function\n");
}
 
void trap_virtual_disable_irqs(uint16_t irqmask)
{
if (disable_irqs_function)
disable_irqs_function(irqmask);
else
panic("no disable_irqs_function\n");
}
 
/** @}
*/
/*
* Copyright (c) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32interrupt
* @{
*/
/** @file
*/
 
#include <arch/interrupt.h>
#include <syscall/syscall.h>
#include <print.h>
#include <debug.h>
#include <panic.h>
#include <arch/drivers/i8259.h>
#include <func.h>
#include <cpu.h>
#include <arch/asm.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <arch.h>
#include <symtab.h>
#include <proc/thread.h>
#include <proc/task.h>
#include <synch/spinlock.h>
#include <arch/ddi/ddi.h>
#include <ipc/sysipc.h>
#include <interrupt.h>
#include <ddi/irq.h>
 
/*
* Interrupt and exception dispatching.
*/
 
void (* disable_irqs_function)(uint16_t irqmask) = NULL;
void (* enable_irqs_function)(uint16_t irqmask) = NULL;
void (* eoi_function)(void) = NULL;
 
void decode_istate(istate_t *istate)
{
char *symbol = get_symtab_entry(istate->eip);
 
if (!symbol)
symbol = "";
 
if (CPU)
printf("----------------EXCEPTION OCCURED (cpu%d)----------------\n", CPU->id);
else
printf("----------------EXCEPTION OCCURED----------------\n");
printf("%%eip: %#x (%s)\n",istate->eip,symbol);
printf("ERROR_WORD=%#x\n", istate->error_word);
printf("%%cs=%#x,flags=%#x\n", istate->cs, istate->eflags);
printf("%%eax=%#x, %%ecx=%#x, %%edx=%#x, %%esp=%#x\n", istate->eax,istate->ecx,istate->edx,&istate->stack[0]);
#ifdef CONFIG_DEBUG_ALLREGS
printf("%%esi=%#x, %%edi=%#x, %%ebp=%#x, %%ebx=%#x\n", istate->esi,istate->edi,istate->ebp,istate->ebx);
#endif
printf("stack: %#x, %#x, %#x, %#x\n", istate->stack[0], istate->stack[1], istate->stack[2], istate->stack[3]);
printf(" %#x, %#x, %#x, %#x\n", istate->stack[4], istate->stack[5], istate->stack[6], istate->stack[7]);
}
 
static void trap_virtual_eoi(void)
{
if (eoi_function)
eoi_function();
else
panic("no eoi_function\n");
 
}
 
static void null_interrupt(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "unserviced interrupt: %d", n);
 
decode_istate(istate);
panic("unserviced interrupt: %d\n", n);
}
 
/** General Protection Fault. */
static void gp_fault(int n, istate_t *istate)
{
if (TASK) {
count_t ver;
spinlock_lock(&TASK->lock);
ver = TASK->arch.iomapver;
spinlock_unlock(&TASK->lock);
if (CPU->arch.iomapver_copy != ver) {
/*
* This fault can be caused by an early access
* to I/O port because of an out-dated
* I/O Permission bitmap installed on CPU.
* Install the fresh copy and restart
* the instruction.
*/
io_perm_bitmap_install();
return;
}
fault_if_from_uspace(istate, "general protection fault");
}
 
decode_istate(istate);
panic("general protection fault\n");
}
 
static void ss_fault(int n, istate_t *istate)
{
fault_if_from_uspace(istate, "stack fault");
 
decode_istate(istate);
panic("stack fault\n");
}
 
static void simd_fp_exception(int n, istate_t *istate)
{
uint32_t mxcsr;
asm (
"stmxcsr %0;\n"
:"=m"(mxcsr)
);
fault_if_from_uspace(istate, "SIMD FP exception(19), MXCSR: %#zx",
(unative_t)mxcsr);
 
decode_istate(istate);
printf("MXCSR: %#zx\n",(unative_t)(mxcsr));
panic("SIMD FP exception(19)\n");
}
 
static void nm_fault(int n, istate_t *istate)
{
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
fault_if_from_uspace(istate, "fpu fault");
panic("fpu fault");
#endif
}
 
#ifdef CONFIG_SMP
static void tlb_shootdown_ipi(int n, istate_t *istate)
{
trap_virtual_eoi();
tlb_shootdown_ipi_recv();
}
#endif
 
/** Handler of IRQ exceptions */
static void irq_interrupt(int n, istate_t *istate)
{
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
 
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
trap_virtual_eoi();
}
 
void interrupt_init(void)
{
int i;
for (i = 0; i < IVT_ITEMS; i++)
exc_register(i, "null", (iroutine) null_interrupt);
for (i = 0; i < IRQ_COUNT; i++) {
if ((i != IRQ_PIC_SPUR) && (i != IRQ_PIC1))
exc_register(IVT_IRQBASE + i, "irq", (iroutine) irq_interrupt);
}
exc_register(7, "nm_fault", (iroutine) nm_fault);
exc_register(12, "ss_fault", (iroutine) ss_fault);
exc_register(13, "gp_fault", (iroutine) gp_fault);
exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
#ifdef CONFIG_SMP
exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", (iroutine) tlb_shootdown_ipi);
#endif
}
 
void trap_virtual_enable_irqs(uint16_t irqmask)
{
if (enable_irqs_function)
enable_irqs_function(irqmask);
else
panic("no enable_irqs_function\n");
}
 
void trap_virtual_disable_irqs(uint16_t irqmask)
{
if (disable_irqs_function)
disable_irqs_function(irqmask);
else
panic("no disable_irqs_function\n");
}
 
/** @}
*/
/branches/rcu/kernel/arch/ia32/src/drivers/i8254.c
1,162 → 1,161
/*
* Copyright (c) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32
* @{
*/
/**
* @file
* @brief i8254 chip driver.
*
* Low level time functions.
*/
 
#include <arch/types.h>
#include <time/clock.h>
#include <time/delay.h>
#include <arch/cycle.h>
#include <arch/interrupt.h>
#include <arch/drivers/i8259.h>
#include <arch/drivers/i8254.h>
#include <cpu.h>
#include <config.h>
#include <arch/pm.h>
#include <arch/asm.h>
#include <arch/cpuid.h>
#include <arch.h>
#include <time/delay.h>
#include <ddi/irq.h>
#include <ddi/device.h>
 
#define CLK_PORT1 0x40
#define CLK_PORT4 0x43
 
#define CLK_CONST 1193180
#define MAGIC_NUMBER 1194
 
static irq_t i8254_irq;
 
static irq_ownership_t i8254_claim(void)
{
return IRQ_ACCEPT;
}
 
static void i8254_irq_handler(irq_t *irq, void *arg, ...)
{
/*
* This IRQ is responsible for kernel preemption.
* Nevertheless, we are now holding a spinlock which prevents
* preemption. For this particular IRQ, we don't need the
* lock. We just release it, call clock() and then reacquire it again.
*/
spinlock_unlock(&irq->lock);
clock();
spinlock_lock(&irq->lock);
}
 
void i8254_init(void)
{
irq_initialize(&i8254_irq);
i8254_irq.preack = true;
i8254_irq.devno = device_assign_devno();
i8254_irq.inr = IRQ_CLK;
i8254_irq.claim = i8254_claim;
i8254_irq.handler = i8254_irq_handler;
irq_register(&i8254_irq);
i8254_normal_operation();
}
 
void i8254_normal_operation(void)
{
outb(CLK_PORT4, 0x36);
pic_disable_irqs(1 << IRQ_CLK);
outb(CLK_PORT1, (CLK_CONST / HZ) & 0xf);
outb(CLK_PORT1, (CLK_CONST / HZ) >> 8);
pic_enable_irqs(1 << IRQ_CLK);
}
 
#define LOOPS 150000
#define SHIFT 11
void i8254_calibrate_delay_loop(void)
{
uint64_t clk1, clk2;
uint32_t t1, t2, o1, o2;
uint8_t not_ok;
 
 
/*
* One-shot timer. Count-down from 0xffff at 1193180Hz
* MAGIC_NUMBER is the magic value for 1ms.
*/
outb(CLK_PORT4, 0x30);
outb(CLK_PORT1, 0xff);
outb(CLK_PORT1, 0xff);
 
do {
/* will read both status and count */
outb(CLK_PORT4, 0xc2);
not_ok = (inb(CLK_PORT1)>>6)&1;
t1 = inb(CLK_PORT1);
t1 |= inb(CLK_PORT1) << 8;
} while (not_ok);
 
asm_delay_loop(LOOPS);
 
outb(CLK_PORT4, 0xd2);
t2 = inb(CLK_PORT1);
t2 |= inb(CLK_PORT1) << 8;
 
/*
* We want to determine the overhead of the calibrating mechanism.
*/
outb(CLK_PORT4, 0xd2);
o1 = inb(CLK_PORT1);
o1 |= inb(CLK_PORT1) << 8;
 
asm_fake_loop(LOOPS);
 
outb(CLK_PORT4, 0xd2);
o2 = inb(CLK_PORT1);
o2 |= inb(CLK_PORT1) << 8;
 
CPU->delay_loop_const =
((MAGIC_NUMBER * LOOPS) / 1000) / ((t1 - t2) - (o1 - o2)) +
(((MAGIC_NUMBER * LOOPS) / 1000) % ((t1 - t2) - (o1 - o2)) ? 1 : 0);
 
clk1 = get_cycle();
delay(1 << SHIFT);
clk2 = get_cycle();
CPU->frequency_mhz = (clk2 - clk1) >> SHIFT;
 
return;
}
 
/** @}
*/
/*
* Copyright (c) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32
* @{
*/
/**
* @file
* @brief i8254 chip driver.
*
* Low level time functions.
*/
 
#include <arch/types.h>
#include <time/clock.h>
#include <time/delay.h>
#include <arch/cycle.h>
#include <arch/interrupt.h>
#include <arch/drivers/i8259.h>
#include <arch/drivers/i8254.h>
#include <cpu.h>
#include <config.h>
#include <arch/pm.h>
#include <arch/asm.h>
#include <arch/cpuid.h>
#include <arch.h>
#include <time/delay.h>
#include <ddi/irq.h>
#include <ddi/device.h>
 
#define CLK_PORT1 0x40
#define CLK_PORT4 0x43
 
#define CLK_CONST 1193180
#define MAGIC_NUMBER 1194
 
static irq_t i8254_irq;
 
static irq_ownership_t i8254_claim(void)
{
return IRQ_ACCEPT;
}
 
static void i8254_irq_handler(irq_t *irq, void *arg, ...)
{
/*
* This IRQ is responsible for kernel preemption.
* Nevertheless, we are now holding a spinlock which prevents
* preemption. For this particular IRQ, we don't need the
* lock. We just release it, call clock() and then reacquire it again.
*/
spinlock_unlock(&irq->lock);
clock();
spinlock_lock(&irq->lock);
}
 
void i8254_init(void)
{
irq_initialize(&i8254_irq);
i8254_irq.devno = device_assign_devno();
i8254_irq.inr = IRQ_CLK;
i8254_irq.claim = i8254_claim;
i8254_irq.handler = i8254_irq_handler;
irq_register(&i8254_irq);
i8254_normal_operation();
}
 
void i8254_normal_operation(void)
{
outb(CLK_PORT4, 0x36);
pic_disable_irqs(1 << IRQ_CLK);
outb(CLK_PORT1, (CLK_CONST / HZ) & 0xf);
outb(CLK_PORT1, (CLK_CONST / HZ) >> 8);
pic_enable_irqs(1 << IRQ_CLK);
}
 
#define LOOPS 150000
#define SHIFT 11
void i8254_calibrate_delay_loop(void)
{
uint64_t clk1, clk2;
uint32_t t1, t2, o1, o2;
uint8_t not_ok;
 
 
/*
* One-shot timer. Count-down from 0xffff at 1193180Hz
* MAGIC_NUMBER is the magic value for 1ms.
*/
outb(CLK_PORT4, 0x30);
outb(CLK_PORT1, 0xff);
outb(CLK_PORT1, 0xff);
 
do {
/* will read both status and count */
outb(CLK_PORT4, 0xc2);
not_ok = (inb(CLK_PORT1)>>6)&1;
t1 = inb(CLK_PORT1);
t1 |= inb(CLK_PORT1) << 8;
} while (not_ok);
 
asm_delay_loop(LOOPS);
 
outb(CLK_PORT4, 0xd2);
t2 = inb(CLK_PORT1);
t2 |= inb(CLK_PORT1) << 8;
 
/*
* We want to determine the overhead of the calibrating mechanism.
*/
outb(CLK_PORT4, 0xd2);
o1 = inb(CLK_PORT1);
o1 |= inb(CLK_PORT1) << 8;
 
asm_fake_loop(LOOPS);
 
outb(CLK_PORT4, 0xd2);
o2 = inb(CLK_PORT1);
o2 |= inb(CLK_PORT1) << 8;
 
CPU->delay_loop_const =
((MAGIC_NUMBER * LOOPS) / 1000) / ((t1 - t2) - (o1 - o2)) +
(((MAGIC_NUMBER * LOOPS) / 1000) % ((t1 - t2) - (o1 - o2)) ? 1 : 0);
 
clk1 = get_cycle();
delay(1 << SHIFT);
clk2 = get_cycle();
CPU->frequency_mhz = (clk2 - clk1) >> SHIFT;
 
return;
}
 
/** @}
*/
/branches/rcu/uspace/ns/ns.c
83,10 → 83,9
static void *clockaddr = NULL;
static void *klogaddr = NULL;
 
static void get_as_area(ipc_callid_t callid, ipc_call_t *call, char *name, char *colstr, void **addr)
static void get_as_area(ipc_callid_t callid, ipc_call_t *call, char *name, void **addr)
{
void *ph_addr;
int ph_color;
 
if (!*addr) {
ph_addr = (void *) sysinfo_value(name);
94,8 → 93,7
ipc_answer_fast(callid, ENOENT, 0, 0);
return;
}
ph_color = (int) sysinfo_value(colstr);
*addr = as_get_mappable_page(PAGE_SIZE, ph_color);
*addr = as_get_mappable_page(PAGE_SIZE);
physmem_map(ph_addr, *addr, 1, AS_AREA_READ | AS_AREA_CACHEABLE);
}
ipc_answer_fast(callid, 0, (ipcarg_t) *addr, AS_AREA_READ);
119,11 → 117,11
switch (IPC_GET_ARG3(call)) {
case SERVICE_MEM_REALTIME:
get_as_area(callid, &call, "clock.faddr",
"clock.fcolor", &clockaddr);
&clockaddr);
break;
case SERVICE_MEM_KLOG:
get_as_area(callid, &call, "klog.faddr",
"klog.fcolor", &klogaddr);
&klogaddr);
break;
default:
ipc_answer_fast(callid, ENOENT, 0, 0);
/branches/rcu/uspace/fb/main.c
43,8 → 43,7
{
void *dest;
 
dest = as_get_mappable_page(IPC_GET_ARG2(*call),
PAGE_COLOR(IPC_GET_ARG1(*call)));
dest = as_get_mappable_page(IPC_GET_ARG2(*call));
if (ipc_answer_fast(callid, 0, (sysarg_t) dest, 0) == 0) {
if (*area)
as_area_destroy(*area);
/branches/rcu/uspace/fb/fb.c
755,8 → 755,7
case IPC_M_AS_AREA_SEND:
/* We accept one area for data interchange */
if (IPC_GET_ARG1(*call) == shm_id) {
void *dest = as_get_mappable_page(IPC_GET_ARG2(*call),
PAGE_COLOR(IPC_GET_ARG1(*call)));
void *dest = as_get_mappable_page(IPC_GET_ARG2(*call));
shm_size = IPC_GET_ARG2(*call);
if (!ipc_answer_fast(callid, 0, (sysarg_t) dest, 0))
shm = dest;
1369,8 → 1368,7
fb_invert_colors = sysinfo_value("fb.invert-colors");
 
asz = fb_scanline * fb_height;
fb_addr = as_get_mappable_page(asz, (int)
sysinfo_value("fb.address.color"));
fb_addr = as_get_mappable_page(asz);
physmem_map(fb_ph_addr, fb_addr, ALIGN_UP(asz, PAGE_SIZE) >>
PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE);
/branches/rcu/uspace/fb/ega.c
315,8 → 315,7
iospace_enable(task_get_id(), (void *) EGA_IO_ADDRESS, 2);
 
sz = scr_width * scr_height * 2;
scr_addr = as_get_mappable_page(sz, (int)
sysinfo_value("fb.address.color"));
scr_addr = as_get_mappable_page(sz);
 
physmem_map(ega_ph_addr, scr_addr, ALIGN_UP(sz, PAGE_SIZE) >>
PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE);
/branches/rcu/uspace/klog/klog.c
62,7 → 62,7
 
printf("Kernel console output.\n");
mapping = as_get_mappable_page(PAGE_SIZE, sysinfo_value("klog.fcolor"));
mapping = as_get_mappable_page(PAGE_SIZE);
res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV,
(sysarg_t) mapping, PAGE_SIZE, SERVICE_MEM_KLOG,
NULL, NULL, NULL);
/branches/rcu/uspace/rd/rd.c
73,12 → 73,11
{
size_t rd_size = sysinfo_value("rd.size");
void * rd_ph_addr = (void *) sysinfo_value("rd.address.physical");
int rd_color = (int) sysinfo_value("rd.address.color");
if (rd_size == 0)
return false;
void * rd_addr = as_get_mappable_page(rd_size, rd_color);
void * rd_addr = as_get_mappable_page(rd_size);
physmem_map(rd_ph_addr, rd_addr, ALIGN_UP(rd_size, PAGE_SIZE) >> PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE);
/branches/rcu/uspace/libc/include/as.h
40,13 → 40,11
#include <kernel/mm/as.h>
#include <libarch/config.h>
 
#define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1))
 
extern void *as_area_create(void *address, size_t size, int flags);
extern int as_area_resize(void *address, size_t size, int flags);
extern int as_area_destroy(void *address);
extern void *set_maxheapsize(size_t mhs);
extern void * as_get_mappable_page(size_t sz, int color);
extern void * as_get_mappable_page(size_t sz);
 
#endif
 
/branches/rcu/uspace/libc/generic/time.c
72,12 → 72,11
int res;
 
if (!ktime) {
mapping = as_get_mappable_page(PAGE_SIZE, (int)
sysinfo_value("clock.fcolor"));
mapping = as_get_mappable_page(PAGE_SIZE);
/* Get the mapping of kernel clock */
res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV, (sysarg_t)
mapping, PAGE_SIZE, SERVICE_MEM_REALTIME, NULL, &rights,
NULL);
res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV,
(sysarg_t) mapping, PAGE_SIZE, SERVICE_MEM_REALTIME, NULL,
&rights, NULL);
if (res) {
printf("Failed to initialize timeofday memarea\n");
_exit(1);
/branches/rcu/uspace/libc/generic/as.c
55,7 → 55,7
void *as_area_create(void *address, size_t size, int flags)
{
return (void *) __SYSCALL3(SYS_AS_AREA_CREATE, (sysarg_t ) address,
(sysarg_t) size, (sysarg_t) flags);
(sysarg_t) size, (sysarg_t) flags);
}
 
/** Resize address space area.
69,8 → 69,8
*/
int as_area_resize(void *address, size_t size, int flags)
{
return __SYSCALL3(SYS_AS_AREA_RESIZE, (sysarg_t ) address, (sysarg_t)
size, (sysarg_t) flags);
return __SYSCALL3(SYS_AS_AREA_RESIZE, (sysarg_t ) address,
(sysarg_t) size, (sysarg_t) flags);
}
 
/** Destroy address space area.
143,7 → 143,6
/** Return pointer to some unmapped area, where fits new as_area
*
* @param sz Requested size of the allocation.
* @param color Requested virtual color of the allocation.
*
* @return Pointer to the beginning
*
150,7 → 149,7
* TODO: make some first_fit/... algorithm, we are now just incrementing
* the pointer to last area
*/
void *as_get_mappable_page(size_t sz, int color)
void *as_get_mappable_page(size_t sz)
{
void *res;
uint64_t asz;
166,21 → 165,16
set_maxheapsize(MAX_HEAP_SIZE);
/*
* Make sure we allocate from naturally aligned address and a page of
* appropriate color.
* Make sure we allocate from naturally aligned address.
*/
i = 0;
do {
if (!last_allocated) {
last_allocated = (void *) ALIGN_UP((void *) &_heap +
maxheapsize, asz);
} else {
last_allocated = (void *) ALIGN_UP(((uintptr_t)
last_allocated) + (int) (i > 0), asz);
}
} while ((asz < (1 << (PAGE_COLOR_BITS + PAGE_WIDTH))) &&
(PAGE_COLOR((uintptr_t) last_allocated) != color) &&
(++i < (1 << PAGE_COLOR_BITS)));
if (!last_allocated) {
last_allocated = (void *) ALIGN_UP((void *) &_heap +
maxheapsize, asz);
} else {
last_allocated = (void *) ALIGN_UP(((uintptr_t)
last_allocated) + (int) (i > 0), asz);
}
 
res = last_allocated;
last_allocated += ALIGN_UP(sz, PAGE_SIZE);
/branches/rcu/uspace/libc/generic/mman.c
36,10 → 36,11
#include <as.h>
#include <unistd.h>
 
void *mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset)
void *mmap(void *start, size_t length, int prot, int flags, int fd,
off_t offset)
{
if (!start)
start = as_get_mappable_page(length, 0);
start = as_get_mappable_page(length);
// if (! ((flags & MAP_SHARED) ^ (flags & MAP_PRIVATE)))
// return MAP_FAILED;
/branches/rcu/uspace/libc/arch/sparc64/_link.ld.in
7,9 → 7,9
}
 
SECTIONS {
. = 0x2000;
. = 0x4000;
 
.init ALIGN(0x2000) : SUBALIGN(0x2000) {
.init ALIGN(0x4000) : SUBALIGN(0x4000) {
*(.init);
} :text
.text : {
17,11 → 17,11
*(.rodata*);
} :text
.got ALIGN(0x2000) : SUBALIGN(0x2000) {
.got ALIGN(0x4000) : SUBALIGN(0x4000) {
_gp = .;
*(.got*);
} :data
.data ALIGN(0x2000) : SUBALIGN(0x2000) {
.data ALIGN(0x4000) : SUBALIGN(0x4000) {
*(.data);
*(.sdata);
} :data
41,7 → 41,7
*(.bss);
} :data
 
. = ALIGN(0x2000);
. = ALIGN(0x4000);
_heap = .;
/DISCARD/ : {
/branches/rcu/uspace/libc/arch/sparc64/include/config.h
35,9 → 35,9
#ifndef LIBC_sparc64_CONFIG_H_
#define LIBC_sparc64_CONFIG_H_
 
#define PAGE_WIDTH 13
#define PAGE_SIZE (1<<PAGE_WIDTH)
#define PAGE_COLOR_BITS 1 /**< Bit 13 is the page color. */
#define PAGE_WIDTH 14
#define PAGE_SIZE (1 << PAGE_WIDTH)
#define PAGE_COLOR_BITS 0 /**< Only one page color. */
 
#endif
 
/branches/rcu/uspace/libc/arch/sparc64/include/stack.h
43,7 → 43,7
/**
* 16-extended-word save area for %i[0-7] and %l[0-7] registers.
*/
#define STACK_WINDOW_SAVE_AREA_SIZE (16*STACK_ITEM_SIZE)
#define STACK_WINDOW_SAVE_AREA_SIZE (16 * STACK_ITEM_SIZE)
 
/**
* By convention, the actual top of the stack is %sp + STACK_BIAS.
/branches/rcu/boot/arch/sparc64/loader/asm.h
33,8 → 33,8
#include "types.h"
#include "main.h"
 
#define PAGE_SIZE 8192
#define PAGE_WIDTH 13
#define PAGE_WIDTH 14
#define PAGE_SIZE (1 << PAGE_WIDTH)
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
/branches/rcu/boot/arch/sparc64/loader/Makefile
90,7 → 90,7
 
-include Makefile.depend
 
image.boot: depend _components.h _link.ld $(COMPONENT_OBJECTS) $(OBJECTS) kernel.o
image.boot: depend _components.h _link.ld $(COMPONENT_OBJECTS) $(OBJECTS)
$(LD) -no-check-sections -N -T _link.ld $(COMPONENT_OBJECTS) $(OBJECTS) -o $@
 
depend:
/branches/rcu/boot/arch/ppc32/loader/Makefile
87,7 → 87,7
 
-include Makefile.depend
 
image.boot: depend _components.h _link.ld $(COMPONENT_OBJECTS) $(OBJECTS) kernel.o
image.boot: depend _components.h _link.ld $(COMPONENT_OBJECTS) $(OBJECTS)
$(LD) -no-check-sections -N -T _link.ld $(COMPONENT_OBJECTS) $(OBJECTS) -o $@
 
depend:
/branches/rcu/boot/arch/ppc64/loader/Makefile
87,7 → 87,7
 
-include Makefile.depend
 
image.boot: depend _components.h _link.ld $(COMPONENT_OBJECTS) $(OBJECTS) kernel.o
image.boot: depend _components.h _link.ld $(COMPONENT_OBJECTS) $(OBJECTS)
$(LD) -no-check-sections -N -T _link.ld $(COMPONENT_OBJECTS) $(OBJECTS) -o $@
 
depend:
/branches/rcu/boot/arch/mips32/loader/Makefile
86,7 → 86,7
 
-include Makefile.depend
 
image.boot: depend _components.h _link.ld $(COMPONENT_OBJECTS) $(OBJECTS) kernel.o
image.boot: depend _components.h _link.ld $(COMPONENT_OBJECTS) $(OBJECTS)
$(LD) -no-check-sections -N -T _link.ld $(COMPONENT_OBJECTS) $(OBJECTS) -o $@
 
depend: