Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3673 → Rev 3674

/branches/dynload/kernel/test/debug/mips1.c
38,10 → 38,10
 
#include <arch.h>
 
char * test_mips1(bool quiet)
char *test_mips1(bool quiet)
{
if (!quiet)
printf("You should enter kconsole debug mode now.\n");
printf("If kconsole is compiled in, you should enter debug mode now.\n");
asm volatile (
"break\n"
/branches/dynload/kernel/kernel.config
76,6 → 76,11
@ "opteron" Opteron
! [ARCH=amd64] MACHINE (choice)
 
# CPU type
@ "us" UltraSPARC I-II subarchitecture
@ "us3" UltraSPARC III-IV subarchitecture
! [ARCH=sparc64] MACHINE (choice)
 
# Machine type
@ "msim" MSIM Simulator
@ "simics" Virtutech Simics simulator
138,10 → 143,21
# Support for Z8530 serial port
! [ARCH=sparc64] CONFIG_Z8530 (y/n)
 
# Support for NS16550 serial port
! [ARCH=sparc64|ARCH=ia64] CONFIG_NS16550 (y/n)
# Support for NS16550 serial port
! [ARCH=sparc64|(ARCH=ia64&MACHINE!=ski)] CONFIG_NS16550 (n/y)
 
# Support for Serengeti console
! [ARCH=sparc64] CONFIG_SGCN (y/n)
 
# IOSapic on default address support
! [ARCH=ia64&MACHINE!=ski] CONFIG_IOSAPIC (y/n)
 
# Interrupt-driven driver for Legacy Keyboard?
! [CONFIG_NS16550=n&CONFIG_IOSAPIC=y&MACHINE!=ski] CONFIG_I8042_INTERRUPT_DRIVEN (y/n)
 
# Interrupt-driven driver for NS16550?
! [CONFIG_NS16550=y&((ARCH!=ia64)|CONFIG_IOSAPIC=y)&MACHINE!=ski] CONFIG_NS16550_INTERRUPT_DRIVEN (y/n)
 
# Virtually indexed D-cache support
! [ARCH=sparc64] CONFIG_VIRT_IDX_DCACHE (y/n)
 
150,12 → 166,15
 
## Debugging configuration directives
 
# General debuging and assert checking
# General debugging and assert checking
! CONFIG_DEBUG (y/n)
 
# Extensive debugging output
! [CONFIG_DEBUG=y] CONFIG_EDEBUG (n/y)
# Kernel console support
! CONFIG_KCONSOLE (y/n)
 
# Detailed kernel logging
! CONFIG_LOG (n/y)
 
# Deadlock detection support for spinlocks
! [CONFIG_DEBUG=y&CONFIG_SMP=y] CONFIG_DEBUG_SPINLOCK (y/n)
 
/branches/dynload/kernel/genarch/include/kbd/z8530.h
39,17 → 39,18
 
#include <console/chardev.h>
#include <ipc/irq.h>
#include <ddi/irq.h>
 
extern bool z8530_belongs_to_kernel;
 
extern void z8530_init(devno_t devno, inr_t inr, uintptr_t vaddr);
extern void z8530_init(devno_t, uintptr_t, inr_t, cir_t, void *);
extern void z8530_poll(void);
extern void z8530_grab(void);
extern void z8530_release(void);
extern void z8530_interrupt(void);
extern char z8530_key_read(chardev_t *d);
extern char z8530_key_read(chardev_t *);
extern irq_ownership_t z8530_claim(void);
extern void z8530_irq_handler(irq_t *irq, void *arg, ...);
extern void z8530_irq_handler(irq_t *, void *, ...);
 
#endif
 
/branches/dynload/kernel/genarch/include/kbd/ns16550.h
38,15 → 38,16
#define KERN_NS16550_H_
 
#include <console/chardev.h>
#include <ddi/irq.h>
#include <ipc/irq.h>
 
extern void ns16550_init(devno_t devno, inr_t inr, uintptr_t vaddr);
extern void ns16550_init(devno_t, uintptr_t, inr_t, cir_t, void *);
extern void ns16550_poll(void);
extern void ns16550_grab(void);
extern void ns16550_release(void);
extern char ns16550_key_read(chardev_t *d);
extern char ns16550_key_read(chardev_t *);
extern irq_ownership_t ns16550_claim(void);
extern void ns16550_irq_handler(irq_t *irq, void *arg, ...);
extern void ns16550_irq_handler(irq_t *, void *, ...);
 
#include <arch/types.h>
#ifndef ia64
58,6 → 59,7
#define IIR_REG 2 /** Interrupt Ident Register (read). */
#define FCR_REG 2 /** FIFO control register (write). */
#define LCR_REG 3 /** Line Control register. */
#define MCR_REG 4 /** Modem Control Register. */
#define LSR_REG 5 /** Line Status Register. */
 
#define IER_ERBFI 0x01 /** Enable Receive Buffer Full Interrupt. */
64,57 → 66,68
 
#define LCR_DLAB 0x80 /** Divisor Latch Access bit. */
 
#define MCR_OUT2 0x08 /** OUT2. */
 
/** Structure representing the ns16550 device. */
typedef struct {
devno_t devno;
volatile ioport_t io_port; /** Memory mapped registers of the ns16550. */
/** Memory mapped registers of the ns16550. */
volatile ioport_t io_port;
} ns16550_t;
 
static inline uint8_t ns16550_rbr_read(ns16550_t *dev)
{
return inb(dev->io_port+RBR_REG);
return inb(dev->io_port + RBR_REG);
}
static inline void ns16550_rbr_write(ns16550_t *dev, uint8_t v)
{
outb(dev->io_port+RBR_REG,v);
outb(dev->io_port + RBR_REG, v);
}
 
static inline uint8_t ns16550_ier_read(ns16550_t *dev)
{
return inb(dev->io_port+IER_REG);
return inb(dev->io_port + IER_REG);
}
 
static inline void ns16550_ier_write(ns16550_t *dev, uint8_t v)
{
outb(dev->io_port+IER_REG,v);
outb(dev->io_port + IER_REG, v);
}
 
static inline uint8_t ns16550_iir_read(ns16550_t *dev)
{
return inb(dev->io_port+IIR_REG);
return inb(dev->io_port + IIR_REG);
}
 
static inline void ns16550_fcr_write(ns16550_t *dev, uint8_t v)
{
outb(dev->io_port+FCR_REG,v);
outb(dev->io_port + FCR_REG, v);
}
 
static inline uint8_t ns16550_lcr_read(ns16550_t *dev)
{
return inb(dev->io_port+LCR_REG);
return inb(dev->io_port + LCR_REG);
}
 
static inline void ns16550_lcr_write(ns16550_t *dev, uint8_t v)
{
outb(dev->io_port+LCR_REG,v);
outb(dev->io_port + LCR_REG, v);
}
 
static inline uint8_t ns16550_lsr_read(ns16550_t *dev)
{
return inb(dev->io_port+LSR_REG);
return inb(dev->io_port + LSR_REG);
}
 
static inline uint8_t ns16550_mcr_read(ns16550_t *dev)
{
return inb(dev->io_port + MCR_REG);
}
 
static inline void ns16550_mcr_write(ns16550_t *dev, uint8_t v)
{
outb(dev->io_port + MCR_REG, v);
}
 
#endif
 
/branches/dynload/kernel/genarch/include/fb/visuals.h
44,6 → 44,7
#define VISUAL_RGB_0_8_8_8 5
 
#define VISUAL_BGR_0_8_8_8 6
#define VISUAL_SB1500_PALETTE 7
 
#endif
 
/branches/dynload/kernel/genarch/include/fb/fb.h
38,8 → 38,34
#include <arch/types.h>
#include <synch/spinlock.h>
 
/**
* Properties of the framebuffer device.
*/
typedef struct fb_properties {
/** Physical address of the framebuffer device. */
uintptr_t addr;
 
/**
* Address where the first (top left) pixel is mapped,
* relative to "addr".
*/
unsigned int offset;
 
/** Screen width in pixels. */
unsigned int x;
 
/** Screen height in pixels. */
unsigned int y;
 
/** Bytes per one scanline. */
unsigned int scan;
 
/** Color model. */
unsigned int visual;
} fb_properties_t;
 
SPINLOCK_EXTERN(fb_lock);
void fb_init(uintptr_t addr, unsigned int x, unsigned int y, unsigned int scan, unsigned int visual);
void fb_init(fb_properties_t *props);
 
#endif
 
/branches/dynload/kernel/genarch/include/ofw/ofw_tree.h
30,6 → 30,7
#define KERN_OFW_TREE_H_
 
#include <arch/types.h>
#include <ddi/irq.h>
#include <typedefs.h>
 
#define OFW_TREE_PROPERTY_MAX_NAMELEN 32
43,11 → 44,11
ofw_tree_node_t *peer;
ofw_tree_node_t *child;
 
uint32_t node_handle; /**< Old OpenFirmware node handle. */
uint32_t node_handle; /**< Old OpenFirmware node handle. */
 
char *da_name; /**< Disambigued name. */
char *da_name; /**< Disambigued name. */
 
unsigned properties; /**< Number of properties. */
unsigned properties; /**< Number of properties. */
ofw_tree_property_t *property;
/**
105,7 → 106,7
uint32_t child_space;
uint32_t child_base;
uint32_t parent_space;
uint64_t parent_base; /* group phys.mid and phys.lo together */
uint64_t parent_base; /* group phys.mid and phys.lo together */
uint32_t size;
} __attribute__ ((packed));
typedef struct ofw_ebus_range ofw_ebus_range_t;
127,8 → 128,8
typedef struct ofw_ebus_intr_mask ofw_ebus_intr_mask_t;
 
struct ofw_pci_reg {
uint32_t space; /* needs to be masked to obtain pure space id */
uint64_t addr; /* group phys.mid and phys.lo together */
uint32_t space; /* needs to be masked to obtain pure space id */
uint64_t addr; /* group phys.mid and phys.lo together */
uint64_t size;
} __attribute__ ((packed));
typedef struct ofw_pci_reg ofw_pci_reg_t;
135,7 → 136,7
 
struct ofw_pci_range {
uint32_t space;
uint64_t child_base; /* group phys.mid and phys.lo together */
uint64_t child_base; /* group phys.mid and phys.lo together */
uint64_t parent_base;
uint64_t size;
} __attribute__ ((packed));
160,27 → 161,43
} __attribute__ ((packed));
typedef struct ofw_upa_reg ofw_upa_reg_t;
 
extern void ofw_tree_init(ofw_tree_node_t *root);
extern void ofw_tree_init(ofw_tree_node_t *);
extern void ofw_tree_print(void);
extern const char *ofw_tree_node_name(const ofw_tree_node_t *node);
extern ofw_tree_node_t *ofw_tree_lookup(const char *path);
extern ofw_tree_property_t *ofw_tree_getprop(const ofw_tree_node_t *node, const char *name);
extern ofw_tree_node_t *ofw_tree_find_child(ofw_tree_node_t *node, const char *name);
extern ofw_tree_node_t *ofw_tree_find_child_by_device_type(ofw_tree_node_t *node, const char *device_type);
extern ofw_tree_node_t *ofw_tree_find_peer_by_device_type(ofw_tree_node_t *node, const char *device_type);
extern ofw_tree_node_t *ofw_tree_find_node_by_handle(ofw_tree_node_t *root, uint32_t handle);
extern const char *ofw_tree_node_name(const ofw_tree_node_t *);
extern ofw_tree_node_t *ofw_tree_lookup(const char *);
extern ofw_tree_property_t *ofw_tree_getprop(const ofw_tree_node_t *,
const char *);
extern ofw_tree_node_t *ofw_tree_find_child(ofw_tree_node_t *, const char *);
extern ofw_tree_node_t *ofw_tree_find_child_by_device_type(ofw_tree_node_t *,
const char *);
extern ofw_tree_node_t *ofw_tree_find_peer_by_device_type(ofw_tree_node_t *,
const char *);
extern ofw_tree_node_t *ofw_tree_find_peer_by_name(ofw_tree_node_t *node,
const char *name);
extern ofw_tree_node_t *ofw_tree_find_node_by_handle(ofw_tree_node_t *,
uint32_t);
 
extern bool ofw_fhc_apply_ranges(ofw_tree_node_t *node, ofw_fhc_reg_t *reg, uintptr_t *pa);
extern bool ofw_central_apply_ranges(ofw_tree_node_t *node, ofw_central_reg_t *reg, uintptr_t *pa);
extern bool ofw_ebus_apply_ranges(ofw_tree_node_t *node, ofw_ebus_reg_t *reg, uintptr_t *pa);
extern bool ofw_pci_apply_ranges(ofw_tree_node_t *node, ofw_pci_reg_t *reg, uintptr_t *pa);
extern bool ofw_sbus_apply_ranges(ofw_tree_node_t *node, ofw_sbus_reg_t *reg, uintptr_t *pa);
extern bool ofw_upa_apply_ranges(ofw_tree_node_t *node, ofw_upa_reg_t *reg, uintptr_t *pa);
extern bool ofw_fhc_apply_ranges(ofw_tree_node_t *, ofw_fhc_reg_t *,
uintptr_t *);
extern bool ofw_central_apply_ranges(ofw_tree_node_t *, ofw_central_reg_t *,
uintptr_t *);
extern bool ofw_ebus_apply_ranges(ofw_tree_node_t *, ofw_ebus_reg_t *,
uintptr_t *);
extern bool ofw_pci_apply_ranges(ofw_tree_node_t *, ofw_pci_reg_t *,
uintptr_t *);
extern bool ofw_sbus_apply_ranges(ofw_tree_node_t *, ofw_sbus_reg_t *,
uintptr_t *);
extern bool ofw_upa_apply_ranges(ofw_tree_node_t *, ofw_upa_reg_t *,
uintptr_t *);
 
extern bool ofw_pci_reg_absolutize(ofw_tree_node_t *node, ofw_pci_reg_t *reg, ofw_pci_reg_t *out);
extern bool ofw_pci_reg_absolutize(ofw_tree_node_t *, ofw_pci_reg_t *,
ofw_pci_reg_t *);
 
extern bool ofw_fhc_map_interrupt(ofw_tree_node_t *node, ofw_fhc_reg_t *reg, uint32_t interrupt, int *inr);
extern bool ofw_ebus_map_interrupt(ofw_tree_node_t *node, ofw_ebus_reg_t *reg, uint32_t interrupt, int *inr);
extern bool ofw_pci_map_interrupt(ofw_tree_node_t *node, ofw_pci_reg_t *reg, int ino, int *inr);
extern bool ofw_fhc_map_interrupt(ofw_tree_node_t *, ofw_fhc_reg_t *,
uint32_t, int *, cir_t *, void **);
extern bool ofw_ebus_map_interrupt(ofw_tree_node_t *, ofw_ebus_reg_t *,
uint32_t, int *, cir_t *, void **);
extern bool ofw_pci_map_interrupt(ofw_tree_node_t *, ofw_pci_reg_t *,
int, int *, cir_t *, void **);
 
#endif
/branches/dynload/kernel/genarch/src/kbd/ns16550.c
38,8 → 38,8
#include <genarch/kbd/key.h>
#include <genarch/kbd/scanc.h>
#include <genarch/kbd/scanc_sun.h>
#include <arch/drivers/kbd.h>
#ifndef ia64
#include <arch/drivers/kbd.h>
#include <arch/drivers/ns16550.h>
#endif
#include <ddi/irq.h>
107,11 → 107,14
 
/** Initialize ns16550.
*
* @param devno Device number.
* @param inr Interrupt number.
* @param vaddr Virtual address of device's registers.
* @param devno Device number.
* @param port Virtual/IO address of device's registers.
* @param inr Interrupt number.
* @param cir Clear interrupt function.
* @param cir_arg First argument to cir.
*/
void ns16550_init(devno_t devno, inr_t inr, ioport_t port)
void
ns16550_init(devno_t devno, ioport_t port, inr_t inr, cir_t cir, void *cir_arg)
{
chardev_initialize("ns16550_kbd", &kbrd, &ops);
stdin = &kbrd;
124,18 → 127,31
ns16550_irq.inr = inr;
ns16550_irq.claim = ns16550_claim;
ns16550_irq.handler = ns16550_irq_handler;
ns16550_irq.cir = cir;
ns16550_irq.cir_arg = cir_arg;
irq_register(&ns16550_irq);
 
 
while ((ns16550_lsr_read(&ns16550) & LSR_DATA_READY))
ns16550_rbr_read(&ns16550);
 
sysinfo_set_item_val("kbd", NULL, true);
#ifndef ia64
sysinfo_set_item_val("kbd.type", NULL, KBD_NS16550);
#endif
sysinfo_set_item_val("kbd.devno", NULL, devno);
sysinfo_set_item_val("kbd.inr", NULL, inr);
sysinfo_set_item_val("kbd.address.virtual", NULL, port);
sysinfo_set_item_val("kbd.port", NULL, port);
 
#ifdef CONFIG_NS16550_INTERRUPT_DRIVEN
/* Enable interrupts */
ns16550_ier_write(&ns16550, IER_ERBFI);
ns16550_mcr_write(&ns16550, MCR_OUT2);
#endif
 
#ifdef ia64
uint8_t c;
// This switches rbr & ier to mode when accept baudrate constant
c = ns16550_lcr_read(&ns16550);
ns16550_lcr_write(&ns16550, 0x80 | c);
ns16550_rbr_write(&ns16550, 0x0c);
149,10 → 165,7
/** Process ns16550 interrupt. */
void ns16550_interrupt(void)
{
/* TODO
*
* ns16550 works in the polled mode so far.
*/
ns16550_poll();
}
 
/* Called from getc(). */
201,6 → 214,7
*/
void ns16550_poll(void)
{
#ifndef CONFIG_NS16550_INTERRUPT_DRIVEN
ipl_t ipl;
 
ipl = interrupts_disable();
220,6 → 234,7
 
spinlock_unlock(&ns16550_irq.lock);
interrupts_restore(ipl);
#endif
 
while (ns16550_lsr_read(&ns16550) & LSR_DATA_READY) {
uint8_t x;
251,7 → 266,10
 
void ns16550_irq_handler(irq_t *irq, void *arg, ...)
{
panic("Not yet implemented, ns16550 works in polled mode.\n");
if (irq->notif_cfg.notify && irq->notif_cfg.answerbox)
ipc_irq_send_notif(irq);
else
ns16550_interrupt();
}
 
/** @}
/branches/dynload/kernel/genarch/src/kbd/i8042.c
37,6 → 37,9
*/
 
#include <genarch/kbd/i8042.h>
#ifdef ia64
#include <arch/drivers/kbd.h>
#endif
#include <genarch/kbd/key.h>
#include <genarch/kbd/scanc.h>
#include <genarch/kbd/scanc_pc.h>
184,7 → 187,9
sysinfo_set_item_val("kbd", NULL, true);
sysinfo_set_item_val("kbd.devno", NULL, kbd_devno);
sysinfo_set_item_val("kbd.inr", NULL, kbd_inr);
#ifdef KBD_LEGACY
sysinfo_set_item_val("kbd.type", NULL, KBD_LEGACY);
#endif
sysinfo_set_item_val("mouse", NULL, true);
sysinfo_set_item_val("mouse.devno", NULL, mouse_devno);
sysinfo_set_item_val("mouse.inr", NULL, mouse_inr);
/branches/dynload/kernel/genarch/src/kbd/z8530.c
43,7 → 43,6
#include <ipc/irq.h>
#include <arch/interrupt.h>
#include <arch/drivers/kbd.h>
#include <arch/drivers/fhc.h>
#include <cpu.h>
#include <arch/asm.h>
#include <arch.h>
83,12 → 82,14
*/
z8530_write_a(&z8530, WR0, WR0_TX_IP_RST);
 
z8530_write_a(&z8530, WR1, WR1_IARCSC); /* interrupt on all characters */
/* interrupt on all characters */
z8530_write_a(&z8530, WR1, WR1_IARCSC);
 
/* 8 bits per character and enable receiver */
z8530_write_a(&z8530, WR3, WR3_RX8BITSCH | WR3_RX_ENABLE);
z8530_write_a(&z8530, WR9, WR9_MIE); /* Master Interrupt Enable. */
/* Master Interrupt Enable. */
z8530_write_a(&z8530, WR9, WR9_MIE);
spinlock_lock(&z8530_irq.lock);
z8530_irq.notif_cfg.notify = false;
108,7 → 109,8
}
 
/** Initialize z8530. */
void z8530_init(devno_t devno, inr_t inr, uintptr_t vaddr)
void
z8530_init(devno_t devno, uintptr_t vaddr, inr_t inr, cir_t cir, void *cir_arg)
{
chardev_initialize("z8530_kbd", &kbrd, &ops);
stdin = &kbrd;
121,6 → 123,8
z8530_irq.inr = inr;
z8530_irq.claim = z8530_claim;
z8530_irq.handler = z8530_irq_handler;
z8530_irq.cir = cir;
z8530_irq.cir_arg = cir_arg;
irq_register(&z8530_irq);
 
sysinfo_set_item_val("kbd", NULL, true);
197,18 → 201,10
 
void z8530_irq_handler(irq_t *irq, void *arg, ...)
{
/*
* So far, we know we got this interrupt through the FHC.
* Since we don't have enough documentation about the FHC
* and because the interrupt looks like level sensitive,
* we cannot handle it by scheduling one of the level
* interrupt traps. Process the interrupt directly.
*/
if (irq->notif_cfg.notify && irq->notif_cfg.answerbox)
ipc_irq_send_notif(irq);
else
z8530_interrupt();
fhc_clear_interrupt(central_fhc, irq->inr);
}
 
/** @}
/branches/dynload/kernel/genarch/src/fb/fb.c
191,6 → 191,26
BLUE(rgb, 3);
}
 
static void sb1500rgb_byte8(void *dst, int rgb)
{
if (RED(rgb, 1) && GREEN(rgb, 1) && BLUE(rgb, 1))
*((uint8_t *) dst) = 255;
else if (RED(rgb, 1) && GREEN(rgb, 1))
*((uint8_t *) dst) = 150;
else if (GREEN(rgb, 1) && BLUE(rgb, 1))
*((uint8_t *) dst) = 47;
else if (RED(rgb, 1) && BLUE(rgb, 1))
*((uint8_t *) dst) = 48;
else if (RED(rgb, 1))
*((uint8_t *) dst) = 32;
else if (GREEN(rgb, 1))
*((uint8_t *) dst) = 47;
else if (BLUE(rgb, 1))
*((uint8_t *) dst) = 2;
else
*((uint8_t *) dst) = 1;
}
 
/** Return pixel color - 8-bit depth (color palette/3:2:3)
*
* See the comment for rgb_byte().
436,22 → 456,21
 
/** Initialize framebuffer as a chardev output device
*
* @param addr Physical address of the framebuffer
* @param x Screen width in pixels
* @param y Screen height in pixels
* @param scan Bytes per one scanline
* @param visual Color model
*
* @param props Properties of the framebuffer device.
*/
void fb_init(uintptr_t addr, unsigned int x, unsigned int y, unsigned int scan,
unsigned int visual)
void fb_init(fb_properties_t *props)
{
switch (visual) {
switch (props->visual) {
case VISUAL_INDIRECT_8:
rgb2scr = rgb_byte8;
scr2rgb = byte8_rgb;
pixelbytes = 1;
break;
case VISUAL_SB1500_PALETTE:
rgb2scr = sb1500rgb_byte8;
scr2rgb = byte8_rgb;
pixelbytes = 1;
break;
case VISUAL_RGB_5_5_5:
rgb2scr = rgb_byte555;
scr2rgb = byte555_rgb;
486,19 → 505,20
panic("Unsupported visual.\n");
}
unsigned int fbsize = scan * y;
unsigned int fbsize = props->scan * props->y + props->offset;
/* Map the framebuffer */
fbaddress = (uint8_t *) hw_map((uintptr_t) addr, fbsize);
fbaddress = (uint8_t *) hw_map((uintptr_t) props->addr, fbsize);
fbaddress += props->offset;
xres = x;
yres = y;
scanline = scan;
xres = props->x;
yres = props->y;
scanline = props->scan;
rows = y / FONT_SCANLINES;
columns = x / COL_WIDTH;
rows = props->y / FONT_SCANLINES;
columns = props->x / COL_WIDTH;
 
fb_parea.pbase = (uintptr_t) addr;
fb_parea.pbase = (uintptr_t) props->addr;
fb_parea.vbase = (uintptr_t) fbaddress;
fb_parea.frames = SIZE2FRAMES(fbsize);
fb_parea.cacheable = false;
508,9 → 528,9
sysinfo_set_item_val("fb.kind", NULL, 1);
sysinfo_set_item_val("fb.width", NULL, xres);
sysinfo_set_item_val("fb.height", NULL, yres);
sysinfo_set_item_val("fb.scanline", NULL, scan);
sysinfo_set_item_val("fb.visual", NULL, visual);
sysinfo_set_item_val("fb.address.physical", NULL, addr);
sysinfo_set_item_val("fb.scanline", NULL, props->scan);
sysinfo_set_item_val("fb.visual", NULL, props->visual);
sysinfo_set_item_val("fb.address.physical", NULL, props->addr);
sysinfo_set_item_val("fb.invert-colors", NULL, invert_colors);
 
/* Allocate double buffer */
524,6 → 544,7
blankline = (uint8_t *) malloc(ROW_BYTES, FRAME_ATOMIC);
if (!blankline)
panic("Failed to allocate blank line for framebuffer.");
unsigned int x, y;
for (y = 0; y < FONT_SCANLINES; y++)
for (x = 0; x < xres; x++)
(*rgb2scr)(&blankline[POINTPOS(x, y)], COLOR(BGCOLOR));
/branches/dynload/kernel/genarch/src/ofw/ebus.c
44,7 → 44,8
#include <macros.h>
 
/** Apply EBUS ranges to EBUS register. */
bool ofw_ebus_apply_ranges(ofw_tree_node_t *node, ofw_ebus_reg_t *reg, uintptr_t *pa)
bool
ofw_ebus_apply_ranges(ofw_tree_node_t *node, ofw_ebus_reg_t *reg, uintptr_t *pa)
{
ofw_tree_property_t *prop;
ofw_ebus_range_t *range;
62,11 → 63,13
for (i = 0; i < ranges; i++) {
if (reg->space != range[i].child_space)
continue;
if (overlaps(reg->addr, reg->size, range[i].child_base, range[i].size)) {
if (overlaps(reg->addr, reg->size, range[i].child_base,
range[i].size)) {
ofw_pci_reg_t pci_reg;
pci_reg.space = range[i].parent_space;
pci_reg.addr = range[i].parent_base + (reg->addr - range[i].child_base);
pci_reg.addr = range[i].parent_base +
(reg->addr - range[i].child_base);
pci_reg.size = reg->size;
return ofw_pci_apply_ranges(node->parent, &pci_reg, pa);
76,7 → 79,9
return false;
}
 
bool ofw_ebus_map_interrupt(ofw_tree_node_t *node, ofw_ebus_reg_t *reg, uint32_t interrupt, int *inr)
bool
ofw_ebus_map_interrupt(ofw_tree_node_t *node, ofw_ebus_reg_t *reg,
uint32_t interrupt, int *inr, cir_t *cir, void **cir_arg)
{
ofw_tree_property_t *prop;
ofw_tree_node_t *controller;
104,8 → 109,8
unsigned int i;
for (i = 0; i < count; i++) {
if ((intr_map[i].space == space) && (intr_map[i].addr == addr)
&& (intr_map[i].intr == intr))
if ((intr_map[i].space == space) &&
(intr_map[i].addr == addr) && (intr_map[i].intr == intr))
goto found;
}
return false;
113,10 → 118,12
found:
/*
* We found the device that functions as an interrupt controller
* for the interrupt. We also found partial mapping from interrupt to INO.
* for the interrupt. We also found partial mapping from interrupt to
* INO.
*/
 
controller = ofw_tree_find_node_by_handle(ofw_tree_lookup("/"), intr_map[i].controller_handle);
controller = ofw_tree_find_node_by_handle(ofw_tree_lookup("/"),
intr_map[i].controller_handle);
if (!controller)
return false;
130,7 → 137,8
/*
* Let the PCI do the next step in mapping the interrupt.
*/
if (!ofw_pci_map_interrupt(controller, NULL, intr_map[i].controller_ino, inr))
if (!ofw_pci_map_interrupt(controller, NULL, intr_map[i].controller_ino,
inr, cir, cir_arg))
return false;
 
return true;
/branches/dynload/kernel/genarch/src/ofw/fhc.c
109,7 → 109,9
return false;
}
 
bool ofw_fhc_map_interrupt(ofw_tree_node_t *node, ofw_fhc_reg_t *reg, uint32_t interrupt, int *inr)
bool
ofw_fhc_map_interrupt(ofw_tree_node_t *node, ofw_fhc_reg_t *reg,
uint32_t interrupt, int *inr, cir_t *cir, void **cir_arg)
{
fhc_t *fhc = NULL;
if (!node->device) {
126,6 → 128,8
fhc_enable_interrupt(fhc, interrupt);
*inr = interrupt;
*cir = fhc_clear_interrupt;
*cir_arg = fhc;
return true;
}
 
/branches/dynload/kernel/genarch/src/ofw/ofw_tree.c
54,12 → 54,14
 
/** Get OpenFirmware node property.
*
* @param node Node in which to lookup the property.
* @param name Name of the property.
* @param node Node in which to lookup the property.
* @param name Name of the property.
*
* @return Pointer to the property structure or NULL if no such property.
* @return Pointer to the property structure or NULL if no such
* property.
*/
ofw_tree_property_t *ofw_tree_getprop(const ofw_tree_node_t *node, const char *name)
ofw_tree_property_t *
ofw_tree_getprop(const ofw_tree_node_t *node, const char *name)
{
unsigned int i;
73,9 → 75,9
 
/** Return value of the 'name' property.
*
* @param node Node of interest.
* @param node Node of interest.
*
* @return Value of the 'name' property belonging to the node.
* @return Value of the 'name' property belonging to the node.
*/
const char *ofw_tree_node_name(const ofw_tree_node_t *node)
{
93,10 → 95,11
 
/** Lookup child of given name.
*
* @param node Node whose child is being looked up.
* @param name Name of the child being looked up.
* @param node Node whose child is being looked up.
* @param name Name of the child being looked up.
*
* @return NULL if there is no such child or pointer to the matching child node.
* @return NULL if there is no such child or pointer to the
* matching child node.
*/
ofw_tree_node_t *ofw_tree_find_child(ofw_tree_node_t *node, const char *name)
{
127,12 → 130,14
 
/** Lookup first child of given device type.
*
* @param node Node whose child is being looked up.
* @param name Device type of the child being looked up.
* @param node Node whose child is being looked up.
* @param name Device type of the child being looked up.
*
* @return NULL if there is no such child or pointer to the matching child node.
* @return NULL if there is no such child or pointer to the
* matching child node.
*/
ofw_tree_node_t *ofw_tree_find_child_by_device_type(ofw_tree_node_t *node, const char *name)
ofw_tree_node_t *
ofw_tree_find_child_by_device_type(ofw_tree_node_t *node, const char *name)
{
ofw_tree_node_t *cur;
ofw_tree_property_t *prop;
153,12 → 158,14
* Child nodes are looked up recursively contrary to peer nodes that
* are looked up iteratively to avoid stack overflow.
*
* @param root Root of the searched subtree.
* @param handle OpenFirmware handle.
* @param root Root of the searched subtree.
* @param handle OpenFirmware handle.
*
* @return NULL if there is no such node or pointer to the matching node.
* @return NULL if there is no such node or pointer to the matching
* node.
*/
ofw_tree_node_t *ofw_tree_find_node_by_handle(ofw_tree_node_t *root, uint32_t handle)
ofw_tree_node_t *
ofw_tree_find_node_by_handle(ofw_tree_node_t *root, uint32_t handle)
{
ofw_tree_node_t *cur;
 
180,12 → 187,14
 
/** Lookup first peer of given device type.
*
* @param node Node whose peer is being looked up.
* @param name Device type of the child being looked up.
* @param node Node whose peer is being looked up.
* @param name Device type of the child being looked up.
*
* @return NULL if there is no such child or pointer to the matching child node.
* @return NULL if there is no such child or pointer to the
* matching child node.
*/
ofw_tree_node_t *ofw_tree_find_peer_by_device_type(ofw_tree_node_t *node, const char *name)
ofw_tree_node_t *
ofw_tree_find_peer_by_device_type(ofw_tree_node_t *node, const char *name)
{
ofw_tree_node_t *cur;
ofw_tree_property_t *prop;
202,15 → 211,41
}
 
 
/** Lookup first peer of given name.
*
* @param node Node whose peer is being looked up.
* @param name Name of the child being looked up.
*
* @return NULL if there is no such peer or pointer to the matching
* peer node.
*/
ofw_tree_node_t *
ofw_tree_find_peer_by_name(ofw_tree_node_t *node, const char *name)
{
ofw_tree_node_t *cur;
ofw_tree_property_t *prop;
for (cur = node->peer; cur; cur = cur->peer) {
prop = ofw_tree_getprop(cur, "name");
if (!prop || !prop->value)
continue;
if (strcmp(prop->value, name) == 0)
return cur;
}
return NULL;
}
 
/** Lookup OpenFirmware node by its path.
*
* @param path Path to the node.
* @param path Path to the node.
*
* @return NULL if there is no such node or pointer to the leaf node.
* @return NULL if there is no such node or pointer to the leaf
* node.
*/
ofw_tree_node_t *ofw_tree_lookup(const char *path)
{
char buf[NAME_BUF_LEN+1];
char buf[NAME_BUF_LEN + 1];
ofw_tree_node_t *node = ofw_root;
index_t i, j;
236,8 → 271,8
* Child nodes are processed recursively and peer nodes are processed
* iteratively in order to avoid stack overflow.
*
* @param node Root of the subtree.
* @param path Current path, NULL for the very root of the entire tree.
* @param node Root of the subtree.
* @param path Current path, NULL for the very root of the entire tree.
*/
static void ofw_tree_node_print(const ofw_tree_node_t *node, const char *path)
{
/branches/dynload/kernel/genarch/src/ofw/pci.c
49,7 → 49,8
 
#define PCI_IGN 0x1f
 
bool ofw_pci_apply_ranges(ofw_tree_node_t *node, ofw_pci_reg_t *reg, uintptr_t *pa)
bool
ofw_pci_apply_ranges(ofw_tree_node_t *node, ofw_pci_reg_t *reg, uintptr_t *pa)
{
ofw_tree_property_t *prop;
ofw_pci_range_t *range;
68,10 → 69,13
unsigned int i;
for (i = 0; i < ranges; i++) {
if ((reg->space & PCI_SPACE_MASK) != (range[i].space & PCI_SPACE_MASK))
if ((reg->space & PCI_SPACE_MASK) !=
(range[i].space & PCI_SPACE_MASK))
continue;
if (overlaps(reg->addr, reg->size, range[i].child_base, range[i].size)) {
*pa = range[i].parent_base + (reg->addr - range[i].child_base);
if (overlaps(reg->addr, reg->size, range[i].child_base,
range[i].size)) {
*pa = range[i].parent_base +
(reg->addr - range[i].child_base);
return true;
}
}
79,7 → 83,9
return false;
}
 
bool ofw_pci_reg_absolutize(ofw_tree_node_t *node, ofw_pci_reg_t *reg, ofw_pci_reg_t *out)
bool
ofw_pci_reg_absolutize(ofw_tree_node_t *node, ofw_pci_reg_t *reg,
ofw_pci_reg_t *out)
{
if (reg->space & PCI_ABS_MASK) {
/* already absolute */
103,7 → 109,8
unsigned int i;
for (i = 0; i < assigned_addresses; i++) {
if ((assigned_address[i].space & PCI_REG_MASK) == (reg->space & PCI_REG_MASK)) {
if ((assigned_address[i].space & PCI_REG_MASK) ==
(reg->space & PCI_REG_MASK)) {
out->space = assigned_address[i].space;
out->addr = reg->addr + assigned_address[i].addr;
out->size = reg->size;
119,7 → 126,9
* So far, we only know how to map interrupts of non-PCI devices connected
* to a PCI bridge.
*/
bool ofw_pci_map_interrupt(ofw_tree_node_t *node, ofw_pci_reg_t *reg, int ino, int *inr)
bool
ofw_pci_map_interrupt(ofw_tree_node_t *node, ofw_pci_reg_t *reg, int ino,
int *inr, cir_t *cir, void **cir_arg)
{
pci_t *pci = node->device;
if (!pci) {
132,6 → 141,8
pci_enable_interrupt(pci, ino);
 
*inr = (PCI_IGN << IGN_SHIFT) | ino;
*cir = pci_clear_interrupt;
*cir_arg = pci;
 
return true;
}
/branches/dynload/kernel/generic/include/byteorder.h
51,6 → 51,14
#define uint32_t_be2host(n) (n)
#define uint64_t_be2host(n) (n)
 
#define host2uint16_t_le(n) uint16_t_byteorder_swap(n)
#define host2uint32_t_le(n) uint32_t_byteorder_swap(n)
#define host2uint64_t_le(n) uint64_t_byteorder_swap(n)
 
#define host2uint16_t_be(n) (n)
#define host2uint32_t_be(n) (n)
#define host2uint64_t_be(n) (n)
 
#else
 
#define uint16_t_le2host(n) (n)
61,6 → 69,14
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n)
 
#define host2uint16_t_le(n) (n)
#define host2uint32_t_le(n) (n)
#define host2uint64_t_le(n) (n)
 
#define host2uint16_t_be(n) uint16_t_byteorder_swap(n)
#define host2uint32_t_be(n) uint32_t_byteorder_swap(n)
#define host2uint64_t_be(n) uint64_t_byteorder_swap(n)
 
#endif
 
static inline uint64_t uint64_t_byteorder_swap(uint64_t n)
/branches/dynload/kernel/generic/include/proc/task.h
53,6 → 53,7
#include <mm/tlb.h>
#include <proc/scheduler.h>
#include <udebug/udebug.h>
#include <ipc/kbox.h>
 
#define TASK_NAME_BUFLEN 20
 
98,19 → 99,13
atomic_t active_calls;
 
#ifdef CONFIG_UDEBUG
/** Debugging stuff */
/** Debugging stuff. */
udebug_task_t udebug;
 
/** Kernel answerbox */
answerbox_t kernel_box;
/** Thread used to service kernel answerbox */
struct thread *kb_thread;
/** Kbox thread creation vs. begin of cleanup mutual exclusion */
mutex_t kb_cleanup_lock;
/** True if cleanup of kbox has already started */
bool kb_finished;
/** Kernel answerbox. */
kbox_t kb;
#endif
 
/** Architecture specific task data. */
task_arch_t arch;
/branches/dynload/kernel/generic/include/udebug/udebug.h
147,9 → 147,7
/** BEGIN operation in progress (waiting for threads to stop) */
UDEBUG_TS_BEGINNING,
/** Debugger fully connected */
UDEBUG_TS_ACTIVE,
/** Task is shutting down, no more debug activities allowed */
UDEBUG_TS_SHUTDOWN
UDEBUG_TS_ACTIVE
} udebug_task_state_t;
 
/** Debugging part of task_t structure.
169,25 → 167,19
/** Debugging part of thread_t structure.
*/
typedef struct {
/**
* Prevent deadlock with udebug_before_thread_runs() in interrupt
* handler, without actually disabling interrupts.
* ==0 means "unlocked", >0 means "locked"
*/
atomic_t int_lock;
 
/** Synchronize debug ops on this thread / access to this structure */
/** Synchronize debug ops on this thread / access to this structure. */
mutex_t lock;
 
waitq_t go_wq;
call_t *go_call;
unative_t syscall_args[6];
istate_t *uspace_state;
 
/** What type of event are we stopped in or 0 if none */
udebug_event_t cur_event;
bool stop;
bool stoppable;
bool debug_active; /**< In a debugging session */
/** What type of event are we stopped in or 0 if none. */
udebug_event_t cur_event;
bool go; /**< thread is GO */
bool stoppable; /**< thread is stoppable */
bool debug_active; /**< thread is in a debugging session */
} udebug_thread_t;
 
struct task;
200,7 → 192,7
unative_t a4, unative_t a5, unative_t a6, unative_t id, unative_t rc,
bool end_variant);
 
void udebug_thread_b_event(struct thread *t);
void udebug_thread_b_event_attach(struct thread *t, struct task *ta);
void udebug_thread_e_event(void);
 
void udebug_stoppable_begin(void);
/branches/dynload/kernel/generic/include/ddi/irq.h
83,6 → 83,9
struct irq;
typedef void (* irq_handler_t)(struct irq *irq, void *arg, ...);
 
/** Type for function used to clear the interrupt. */
typedef void (* cir_t)(void *arg, inr_t inr);
 
/** IPC notification config structure.
*
* Primarily, this structure is encapsulated in the irq_t structure.
144,6 → 147,11
/** Argument for the handler. */
void *arg;
 
/** Clear interrupt routine. */
cir_t cir;
/** First argument to the clear interrupt routine. */
void *cir_arg;
 
/** Notification configuration structure. */
ipc_notif_cfg_t notif_cfg;
} irq_t;
/branches/dynload/kernel/generic/include/adt/bitmap.h
49,6 → 49,14
extern void bitmap_clear_range(bitmap_t *bitmap, index_t start, count_t bits);
extern void bitmap_copy(bitmap_t *dst, bitmap_t *src, count_t bits);
 
static inline int bitmap_get(bitmap_t *bitmap,index_t bit)
{
if(bit >= bitmap->bits)
return 0;
return !! ((bitmap->map)[bit/8] & (1 << (bit & 7)));
}
 
 
#endif
 
/** @}
/branches/dynload/kernel/generic/include/ipc/kbox.h
37,6 → 37,18
 
#include <typedefs.h>
 
/** Kernel answerbox structure. */
typedef struct kbox {
/** The answerbox itself. */
answerbox_t box;
/** Thread used to service the answerbox. */
struct thread *thread;
/** Kbox thread creation vs. begin of cleanup mutual exclusion. */
mutex_t cleanup_lock;
/** True if cleanup of kbox has already started. */
bool finished;
} kbox_t;
 
extern int ipc_connect_kbox(task_id_t);
extern void ipc_kbox_cleanup(void);
 
/branches/dynload/kernel/generic/src/synch/futex.c
115,6 → 115,7
uintptr_t paddr;
pte_t *t;
ipl_t ipl;
int rc;
ipl = interrupts_disable();
 
134,9 → 135,17
interrupts_restore(ipl);
 
futex = futex_find(paddr);
return (unative_t) waitq_sleep_timeout(&futex->wq, usec, flags |
 
#ifdef CONFIG_UDEBUG
udebug_stoppable_begin();
#endif
rc = waitq_sleep_timeout(&futex->wq, usec, flags |
SYNCH_FLAGS_INTERRUPTIBLE);
 
#ifdef CONFIG_UDEBUG
udebug_stoppable_end();
#endif
return (unative_t) rc;
}
 
/** Wakeup one thread waiting in futex wait queue.
/branches/dynload/kernel/generic/src/interrupt/interrupt.c
86,8 → 86,17
void exc_dispatch(int n, istate_t *istate)
{
ASSERT(n < IVT_ITEMS);
 
#ifdef CONFIG_UDEBUG
if (THREAD) THREAD->udebug.uspace_state = istate;
#endif
exc_table[n].f(n + IVT_FIRST, istate);
 
#ifdef CONFIG_UDEBUG
if (THREAD) THREAD->udebug.uspace_state = NULL;
#endif
 
/* This is a safe place to exit exiting thread */
if (THREAD && THREAD->interrupted && istate_from_uspace(istate))
thread_exit();
/branches/dynload/kernel/generic/src/time/clock.c
190,6 → 190,14
if (!ticks && !PREEMPTION_DISABLED) {
scheduler();
#ifdef CONFIG_UDEBUG
/*
* Give udebug chance to stop the thread
* before it begins executing.
*/
if (istate_from_uspace(THREAD->udebug.uspace_state))
udebug_before_thread_runs();
#endif
}
}
 
/branches/dynload/kernel/generic/src/ddi/irq.c
145,6 → 145,8
irq->claim = NULL;
irq->handler = NULL;
irq->arg = NULL;
irq->cir = NULL;
irq->cir_arg = NULL;
irq->notif_cfg.notify = false;
irq->notif_cfg.answerbox = NULL;
irq->notif_cfg.code = NULL;
/branches/dynload/kernel/generic/src/proc/task.c
164,10 → 164,10
udebug_task_init(&ta->udebug);
 
/* Init kbox stuff */
ipc_answerbox_init(&ta->kernel_box, ta);
ta->kb_thread = NULL;
mutex_initialize(&ta->kb_cleanup_lock, MUTEX_PASSIVE);
ta->kb_finished = false;
ipc_answerbox_init(&ta->kb.box, ta);
ta->kb.thread = NULL;
mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
ta->kb.finished = false;
#endif
 
ipc_answerbox_init(&ta->answerbox, ta);
/branches/dynload/kernel/generic/src/proc/thread.c
763,14 → 763,20
return (unative_t) rc;
}
}
#ifdef CONFIG_UDEBUG
/*
* Generate udebug THREAD_B event and attach the thread.
* This must be done atomically (with the debug locks held),
* otherwise we would either miss some thread or receive
* THREAD_B events for threads that already existed
* and could be detected with THREAD_READ before.
*/
udebug_thread_b_event_attach(t, TASK);
#else
thread_attach(t, TASK);
#endif
thread_ready(t);
 
#ifdef CONFIG_UDEBUG
/* Generate udebug THREAD_B event */
udebug_thread_b_event(t);
#endif
 
return 0;
} else
free(kernel_uarg);
/branches/dynload/kernel/generic/src/mm/tlb.c
134,9 → 134,7
 
void tlb_shootdown_ipi_send(void)
{
#ifndef ia64
ipi_broadcast(VECTOR_TLB_SHOOTDOWN_IPI);
#endif
}
 
/** Receive TLB shootdown message. */
/branches/dynload/kernel/generic/src/syscall/syscall.c
112,11 → 112,7
#ifdef CONFIG_UDEBUG
udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, 0, false);
#endif
 
if (id < SYSCALL_END) {
#ifdef CONFIG_UDEBUG
udebug_stoppable_begin();
#endif
rc = syscall_table[id](a1, a2, a3, a4, a5, a6);
} else {
printf("Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id);
129,9 → 125,14
 
#ifdef CONFIG_UDEBUG
udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, rc, true);
 
/*
* Stopping point needed for tasks that only invoke non-blocking
* system calls.
*/
udebug_stoppable_begin();
udebug_stoppable_end();
#endif
#endif
return rc;
}
 
/branches/dynload/kernel/generic/src/ipc/kbox.c
48,14 → 48,20
ipl_t ipl;
bool have_kb_thread;
 
/* Only hold kb_cleanup_lock while setting kb_finished - this is enough */
mutex_lock(&TASK->kb_cleanup_lock);
TASK->kb_finished = true;
mutex_unlock(&TASK->kb_cleanup_lock);
/*
* Only hold kb.cleanup_lock while setting kb.finished -
* this is enough.
*/
mutex_lock(&TASK->kb.cleanup_lock);
TASK->kb.finished = true;
mutex_unlock(&TASK->kb.cleanup_lock);
 
have_kb_thread = (TASK->kb_thread != NULL);
have_kb_thread = (TASK->kb.thread != NULL);
 
/* From now on nobody will try to connect phones or attach kbox threads */
/*
* From now on nobody will try to connect phones or attach
* kbox threads
*/
 
/*
* Disconnect all phones connected to our kbox. Passing true for
63,7 → 69,7
* disconnected phone. This ensures the kbox thread is going to
* wake up and terminate.
*/
ipc_answerbox_slam_phones(&TASK->kernel_box, have_kb_thread);
ipc_answerbox_slam_phones(&TASK->kb.box, have_kb_thread);
 
/*
* If the task was being debugged, clean up debugging session.
77,18 → 83,18
interrupts_restore(ipl);
if (have_kb_thread) {
LOG("join kb_thread..\n");
thread_join(TASK->kb_thread);
thread_detach(TASK->kb_thread);
LOG("join kb.thread..\n");
thread_join(TASK->kb.thread);
thread_detach(TASK->kb.thread);
LOG("join done\n");
TASK->kb_thread = NULL;
TASK->kb.thread = NULL;
}
 
/* Answer all messages in 'calls' and 'dispatched_calls' queues */
spinlock_lock(&TASK->kernel_box.lock);
ipc_cleanup_call_list(&TASK->kernel_box.dispatched_calls);
ipc_cleanup_call_list(&TASK->kernel_box.calls);
spinlock_unlock(&TASK->kernel_box.lock);
/* Answer all messages in 'calls' and 'dispatched_calls' queues. */
spinlock_lock(&TASK->kb.box.lock);
ipc_cleanup_call_list(&TASK->kb.box.dispatched_calls);
ipc_cleanup_call_list(&TASK->kb.box.calls);
spinlock_unlock(&TASK->kb.box.lock);
}
 
/** Handle hangup message in kbox.
105,7 → 111,7
 
/* Was it our debugger, who hung up? */
if (call->sender == TASK->udebug.debugger) {
/* Terminate debugging session (if any) */
/* Terminate debugging session (if any). */
LOG("kbox: terminate debug session\n");
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
118,7 → 124,7
 
LOG("kbox: continue with hangup message\n");
IPC_SET_RETVAL(call->data, 0);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
 
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
130,13 → 136,13
*/
 
/* Only detach kbox thread unless already terminating. */
mutex_lock(&TASK->kb_cleanup_lock);
if (&TASK->kb_finished == false) {
mutex_lock(&TASK->kb.cleanup_lock);
if (&TASK->kb.finished == false) {
/* Detach kbox thread so it gets freed from memory. */
thread_detach(TASK->kb_thread);
TASK->kb_thread = NULL;
thread_detach(TASK->kb.thread);
TASK->kb.thread = NULL;
}
mutex_unlock(&TASK->kb_cleanup_lock);
mutex_unlock(&TASK->kb.cleanup_lock);
 
LOG("phone list is empty\n");
*last = true;
166,7 → 172,7
done = false;
 
while (!done) {
call = ipc_wait_for_call(&TASK->kernel_box, SYNCH_NO_TIMEOUT,
call = ipc_wait_for_call(&TASK->kb.box, SYNCH_NO_TIMEOUT,
SYNCH_FLAGS_NONE);
 
if (call == NULL)
201,10 → 207,10
/**
* Connect phone to a task kernel-box specified by id.
*
* Note that this is not completely atomic. For optimisation reasons,
* The task might start cleaning up kbox after the phone has been connected
* and before a kbox thread has been created. This must be taken into account
* in the cleanup code.
* Note that this is not completely atomic. For optimisation reasons, the task
* might start cleaning up kbox after the phone has been connected and before
* a kbox thread has been created. This must be taken into account in the
* cleanup code.
*
* @return Phone id on success, or negative error code.
*/
230,44 → 236,45
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
 
mutex_lock(&ta->kb_cleanup_lock);
mutex_lock(&ta->kb.cleanup_lock);
 
if (atomic_predec(&ta->refcount) == 0) {
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
task_destroy(ta);
return ENOENT;
}
 
if (ta->kb_finished != false) {
mutex_unlock(&ta->kb_cleanup_lock);
if (ta->kb.finished != false) {
mutex_unlock(&ta->kb.cleanup_lock);
return EINVAL;
}
 
newphid = phone_alloc();
if (newphid < 0) {
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
return ELIMIT;
}
 
/* Connect the newly allocated phone to the kbox */
ipc_phone_connect(&TASK->phones[newphid], &ta->kernel_box);
ipc_phone_connect(&TASK->phones[newphid], &ta->kb.box);
 
if (ta->kb_thread != NULL) {
mutex_unlock(&ta->kb_cleanup_lock);
if (ta->kb.thread != NULL) {
mutex_unlock(&ta->kb.cleanup_lock);
return newphid;
}
 
/* Create a kbox thread */
kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0, "kbox", false);
kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0,
"kbox", false);
if (!kb_thread) {
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
return ENOMEM;
}
 
ta->kb_thread = kb_thread;
ta->kb.thread = kb_thread;
thread_ready(kb_thread);
 
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
 
return newphid;
}
/branches/dynload/kernel/generic/src/ipc/sysipc.c
455,10 → 455,17
IPC_SET_ARG5(call.data, 0);
 
if (!(res = request_preprocess(&call, phone))) {
#ifdef CONFIG_UDEBUG
udebug_stoppable_begin();
#endif
rc = ipc_call_sync(phone, &call);
#ifdef CONFIG_UDEBUG
udebug_stoppable_end();
#endif
if (rc != EOK)
return rc;
process_answer(&call);
 
} else {
IPC_SET_RETVAL(call.data, res);
}
495,7 → 502,13
GET_CHECK_PHONE(phone, phoneid, return ENOENT);
 
if (!(res = request_preprocess(&call, phone))) {
#ifdef CONFIG_UDEBUG
udebug_stoppable_begin();
#endif
rc = ipc_call_sync(phone, &call);
#ifdef CONFIG_UDEBUG
udebug_stoppable_end();
#endif
if (rc != EOK)
return rc;
process_answer(&call);
798,9 → 811,17
{
call_t *call;
 
restart:
restart:
 
#ifdef CONFIG_UDEBUG
udebug_stoppable_begin();
#endif
call = ipc_wait_for_call(&TASK->answerbox, usec,
flags | SYNCH_FLAGS_INTERRUPTIBLE);
 
#ifdef CONFIG_UDEBUG
udebug_stoppable_end();
#endif
if (!call)
return 0;
 
/branches/dynload/kernel/generic/src/ipc/irq.c
100,7 → 100,7
*((uint64_t *) code->cmds[i].addr) =
code->cmds[i].value;
break;
#if defined(ia32) || defined(amd64)
#if defined(ia32) || defined(amd64) || defined(ia64)
case CMD_PORT_READ_1:
dstval = inb((long) code->cmds[i].addr);
break;
/branches/dynload/kernel/generic/src/udebug/udebug_ipc.c
73,7 → 73,7
rc = udebug_begin(call);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
83,7 → 83,7
*/
if (rc != 0) {
IPC_SET_RETVAL(call->data, 0);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
}
 
99,7 → 99,7
rc = udebug_end();
 
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process a SET_EVMASK call.
116,7 → 116,7
rc = udebug_set_evmask(mask);
 
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
 
135,7 → 135,7
rc = udebug_go(t, call);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
}
154,7 → 154,7
 
rc = udebug_stop(t, call);
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process a THREAD_READ call.
182,7 → 182,7
rc = udebug_thread_read(&buffer, buf_size, &n);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
209,7 → 209,7
IPC_SET_ARG3(call->data, total_bytes);
call->buffer = buffer;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process an ARGS_READ call.
229,7 → 229,7
rc = udebug_args_read(t, &buffer);
if (rc != EOK) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
247,7 → 247,7
IPC_SET_ARG2(call->data, 6 * sizeof(unative_t));
call->buffer = buffer;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process an MEM_READ call.
270,7 → 270,7
rc = udebug_mem_read(uspace_src, size, &buffer);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
282,7 → 282,7
IPC_SET_ARG2(call->data, size);
call->buffer = buffer;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Handle a debug call received on the kernel answerbox.
306,7 → 306,7
*/
if (TASK->udebug.debugger != call->sender) {
IPC_SET_RETVAL(call->data, EINVAL);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
}
/branches/dynload/kernel/generic/src/udebug/udebug.c
35,18 → 35,6
* @brief Udebug hooks and data structure management.
*
* Udebug is an interface that makes userspace debuggers possible.
*
* Functions in this file are executed directly in each thread, which
* may or may not be the subject of debugging. The udebug_stoppable_begin/end()
* functions are also executed in the clock interrupt handler. To avoid
* deadlock, functions in this file are protected from the interrupt
* by locking the recursive lock THREAD->udebug.int_lock (just an atomic
* variable). This prevents udebug_stoppable_begin/end() from being
* executed in the interrupt handler (they are skipped).
*
* Functions in udebug_ops.c and udebug_ipc.c execute in different threads,
* so they needn't be protected from the (preemptible) interrupt-initiated
* code.
*/
#include <synch/waitq.h>
55,16 → 43,7
#include <errno.h>
#include <arch.h>
 
static inline void udebug_int_lock(void)
{
atomic_inc(&THREAD->udebug.int_lock);
}
 
static inline void udebug_int_unlock(void)
{
atomic_dec(&THREAD->udebug.int_lock);
}
 
/** Initialize udebug part of task structure.
*
* Called as part of task structure initialization.
89,13 → 68,9
mutex_initialize(&ut->lock, MUTEX_PASSIVE);
waitq_initialize(&ut->go_wq);
 
/*
* At the beginning the thread is stoppable, so int_lock be set, too.
*/
atomic_set(&ut->int_lock, 1);
 
ut->go_call = NULL;
ut->stop = true;
ut->uspace_state = NULL;
ut->go = false;
ut->stoppable = true;
ut->debug_active = false;
ut->cur_event = 0; /* none */
161,11 → 136,8
ASSERT(THREAD);
ASSERT(TASK);
 
udebug_int_lock();
 
/* Early check for undebugged tasks */
if (!udebug_thread_precheck()) {
udebug_int_unlock();
return;
}
 
198,7 → 170,8
* Active debugging session
*/
 
if (THREAD->udebug.debug_active && THREAD->udebug.stop) {
if (THREAD->udebug.debug_active == true &&
THREAD->udebug.go == false) {
/*
* Thread was requested to stop - answer go call
*/
230,7 → 203,6
{
/* Early check for undebugged tasks */
if (!udebug_thread_precheck()) {
udebug_int_unlock();
return;
}
 
239,7 → 211,7
mutex_lock(&THREAD->udebug.lock);
 
if (THREAD->udebug.debug_active &&
THREAD->udebug.stop == true) {
THREAD->udebug.go == false) {
TASK->udebug.begin_call = NULL;
mutex_unlock(&THREAD->udebug.lock);
mutex_unlock(&TASK->udebug.lock);
247,7 → 219,7
udebug_wait_for_go(&THREAD->udebug.go_wq);
 
goto restart;
/* must try again - have to lose stoppability atomically */
/* Must try again - have to lose stoppability atomically. */
} else {
++TASK->udebug.not_stoppable_count;
ASSERT(THREAD->udebug.stoppable == true);
256,44 → 228,17
mutex_unlock(&THREAD->udebug.lock);
mutex_unlock(&TASK->udebug.lock);
}
 
udebug_int_unlock();
}
 
/** Upon being scheduled to run, check if the current thread should stop.
*
* This function is called from clock(). Preemption is enabled.
* interrupts are disabled, but since this is called after
* being scheduled-in, we can enable them, if we're careful enough
* not to allow arbitrary recursion or deadlock with the thread context.
* This function is called from clock().
*/
void udebug_before_thread_runs(void)
{
ipl_t ipl;
 
return;
ASSERT(!PREEMPTION_DISABLED);
 
/*
* Prevent agains re-entering, such as when preempted inside this
* function.
*/
if (atomic_get(&THREAD->udebug.int_lock) != 0)
return;
 
udebug_int_lock();
 
ipl = interrupts_enable();
 
/* Now we're free to do whatever we need (lock mutexes, sleep, etc.) */
 
/* Check if we're supposed to stop */
udebug_stoppable_begin();
udebug_stoppable_end();
 
interrupts_restore(ipl);
 
udebug_int_unlock();
}
 
/** Syscall event hook.
310,11 → 255,8
 
etype = end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B;
 
udebug_int_lock();
 
/* Early check for undebugged tasks */
if (!udebug_thread_precheck()) {
udebug_int_unlock();
return;
}
 
321,9 → 263,9
mutex_lock(&TASK->udebug.lock);
mutex_lock(&THREAD->udebug.lock);
 
/* Must only generate events when in debugging session and have go */
/* Must only generate events when in debugging session and is go. */
if (THREAD->udebug.debug_active != true ||
THREAD->udebug.stop == true ||
THREAD->udebug.go == false ||
(TASK->udebug.evmask & UDEBUG_EVMASK(etype)) == 0) {
mutex_unlock(&THREAD->udebug.lock);
mutex_unlock(&TASK->udebug.lock);
348,11 → 290,11
THREAD->udebug.syscall_args[5] = a6;
 
/*
* Make sure udebug.stop is true when going to sleep
* Make sure udebug.go is false when going to sleep
* in case we get woken up by DEBUG_END. (At which
* point it must be back to the initial true value).
*/
THREAD->udebug.stop = true;
THREAD->udebug.go = false;
THREAD->udebug.cur_event = etype;
 
ipc_answer(&TASK->answerbox, call);
361,35 → 303,41
mutex_unlock(&TASK->udebug.lock);
 
udebug_wait_for_go(&THREAD->udebug.go_wq);
 
udebug_int_unlock();
}
 
/** Thread-creation event hook.
/** Thread-creation event hook combined with attaching the thread.
*
* Must be called when a new userspace thread is created in the debugged
* task. Generates a THREAD_B event.
* task. Generates a THREAD_B event. Also attaches the thread @a t
* to the task @a ta.
*
* This is necessary to avoid a race condition where the BEGIN and THREAD_READ
* requests would be handled inbetween attaching the thread and checking it
* for being in a debugging session to send the THREAD_B event. We could then
* either miss threads or get some threads both in the thread list
* and get a THREAD_B event for them.
*
* @param t Structure of the thread being created. Not locked, as the
* thread is not executing yet.
* @param ta Task to which the thread should be attached.
*/
void udebug_thread_b_event(struct thread *t)
void udebug_thread_b_event_attach(struct thread *t, struct task *ta)
{
call_t *call;
 
udebug_int_lock();
 
mutex_lock(&TASK->udebug.lock);
mutex_lock(&THREAD->udebug.lock);
 
thread_attach(t, ta);
 
LOG("udebug_thread_b_event\n");
LOG("- check state\n");
 
/* Must only generate events when in debugging session */
if (THREAD->udebug.debug_active != true) {
LOG("- debug_active: %s, udebug.stop: %s\n",
LOG("- debug_active: %s, udebug.go: %s\n",
THREAD->udebug.debug_active ? "yes(+)" : "no(-)",
THREAD->udebug.stop ? "yes(-)" : "no(+)");
THREAD->udebug.go ? "yes(-)" : "no(+)");
mutex_unlock(&THREAD->udebug.lock);
mutex_unlock(&TASK->udebug.lock);
return;
404,11 → 352,11
IPC_SET_ARG2(call->data, (unative_t)t);
 
/*
* Make sure udebug.stop is true when going to sleep
* Make sure udebug.go is false when going to sleep
* in case we get woken up by DEBUG_END. (At which
* point it must be back to the initial true value).
*/
THREAD->udebug.stop = true;
THREAD->udebug.go = false;
THREAD->udebug.cur_event = UDEBUG_EVENT_THREAD_B;
 
ipc_answer(&TASK->answerbox, call);
418,8 → 366,6
 
LOG("- sleep\n");
udebug_wait_for_go(&THREAD->udebug.go_wq);
 
udebug_int_unlock();
}
 
/** Thread-termination event hook.
431,8 → 377,6
{
call_t *call;
 
udebug_int_lock();
 
mutex_lock(&TASK->udebug.lock);
mutex_lock(&THREAD->udebug.lock);
 
439,11 → 383,11
LOG("udebug_thread_e_event\n");
LOG("- check state\n");
 
/* Must only generate events when in debugging session */
/* Must only generate events when in debugging session. */
if (THREAD->udebug.debug_active != true) {
/* printf("- debug_active: %s, udebug.stop: %s\n",
/* printf("- debug_active: %s, udebug.go: %s\n",
THREAD->udebug.debug_active ? "yes(+)" : "no(-)",
THREAD->udebug.stop ? "yes(-)" : "no(+)");*/
THREAD->udebug.go ? "yes(-)" : "no(+)");*/
mutex_unlock(&THREAD->udebug.lock);
mutex_unlock(&TASK->udebug.lock);
return;
456,10 → 400,10
IPC_SET_RETVAL(call->data, 0);
IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_E);
 
/* Prevent any further debug activity in thread */
/* Prevent any further debug activity in thread. */
THREAD->udebug.debug_active = false;
THREAD->udebug.cur_event = 0; /* none */
THREAD->udebug.stop = true; /* set to initial value */
THREAD->udebug.go = false; /* set to initial value */
 
ipc_answer(&TASK->answerbox, call);
 
466,8 → 410,10
mutex_unlock(&THREAD->udebug.lock);
mutex_unlock(&TASK->udebug.lock);
 
/* Leave int_lock enabled */
/* This event does not sleep - debugging has finished in this thread */
/*
* This event does not sleep - debugging has finished
* in this thread.
*/
}
 
/**
490,8 → 436,6
LOG("udebug_task_cleanup()\n");
LOG("task %" PRIu64 "\n", ta->taskid);
 
udebug_int_lock();
 
if (ta->udebug.dt_state != UDEBUG_TS_BEGINNING &&
ta->udebug.dt_state != UDEBUG_TS_ACTIVE) {
LOG("udebug_task_cleanup(): task not being debugged\n");
512,19 → 456,19
spinlock_unlock(&t->lock);
interrupts_restore(ipl);
 
/* Only process userspace threads */
/* Only process userspace threads. */
if ((flags & THREAD_FLAG_USPACE) != 0) {
/* Prevent any further debug activity in thread */
/* Prevent any further debug activity in thread. */
t->udebug.debug_active = false;
t->udebug.cur_event = 0; /* none */
 
/* Still has go? */
if (t->udebug.stop == false) {
/* Is the thread still go? */
if (t->udebug.go == true) {
/*
* Yes, so clear go. As debug_active == false,
* this doesn't affect anything.
*/
t->udebug.stop = true;
t->udebug.go = false;
 
/* Answer GO call */
LOG("answer GO call with EVENT_FINISHED\n");
553,8 → 497,6
ta->udebug.dt_state = UDEBUG_TS_INACTIVE;
ta->udebug.debugger = NULL;
 
udebug_int_unlock();
 
return 0;
}
 
/branches/dynload/kernel/generic/src/udebug/udebug_ops.c
57,7 → 57,7
*
* Specifically, verifies that thread t exists, is a userspace thread,
* and belongs to the current task (TASK). Verifies, that the thread
* has (or hasn't) go according to having_go (typically false).
* is (or is not) go according to being_go (typically false).
* It also locks t->udebug.lock, making sure that t->udebug.debug_active
* is true - that the thread is in a valid debugging session.
*
70,11 → 70,11
* the t->lock spinlock to the t->udebug.lock mutex.
*
* @param t Pointer, need not at all be valid.
* @param having_go Required thread state.
* @param being_go Required thread state.
*
* Returns EOK if all went well, or an error code otherwise.
*/
static int _thread_op_begin(thread_t *t, bool having_go)
static int _thread_op_begin(thread_t *t, bool being_go)
{
task_id_t taskid;
ipl_t ipl;
98,7 → 98,7
spinlock_lock(&t->lock);
spinlock_unlock(&threads_lock);
 
/* Verify that 't' is a userspace thread */
/* Verify that 't' is a userspace thread. */
if ((t->flags & THREAD_FLAG_USPACE) == 0) {
/* It's not, deny its existence */
spinlock_unlock(&t->lock);
107,7 → 107,7
return ENOENT;
}
 
/* Verify debugging state */
/* Verify debugging state. */
if (t->udebug.debug_active != true) {
/* Not in debugging session or undesired GO state */
spinlock_unlock(&t->lock);
124,9 → 124,9
spinlock_unlock(&t->lock);
interrupts_restore(ipl);
 
/* Only mutex TASK->udebug.lock left */
/* Only mutex TASK->udebug.lock left. */
/* Now verify that the thread belongs to the current task */
/* Now verify that the thread belongs to the current task. */
if (t->task != TASK) {
/* No such thread belonging this task*/
mutex_unlock(&TASK->udebug.lock);
139,18 → 139,18
*/
mutex_lock(&t->udebug.lock);
 
/* The big task mutex is no longer needed */
/* The big task mutex is no longer needed. */
mutex_unlock(&TASK->udebug.lock);
 
if (!t->udebug.stop != having_go) {
/* Not in debugging session or undesired GO state */
if (t->udebug.go != being_go) {
/* Not in debugging session or undesired GO state. */
mutex_unlock(&t->udebug.lock);
return EINVAL;
}
 
/* Only t->udebug.lock left */
/* Only t->udebug.lock left. */
 
return EOK; /* All went well */
return EOK; /* All went well. */
}
 
/** End debugging operation on a thread. */
204,7 → 204,7
reply = 0; /* no reply */
}
/* Set udebug.debug_active on all of the task's userspace threads */
/* Set udebug.debug_active on all of the task's userspace threads. */
 
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
t = list_get_instance(cur, thread_t, th_link);
273,7 → 273,7
 
/** Give thread GO.
*
* Upon recieving a go message, the thread is given GO. Having GO
* Upon recieving a go message, the thread is given GO. Being GO
* means the thread is allowed to execute userspace code (until
* a debugging event or STOP occurs, at which point the thread loses GO.
*
284,7 → 284,7
{
int rc;
 
/* On success, this will lock t->udebug.lock */
/* On success, this will lock t->udebug.lock. */
rc = _thread_op_begin(t, false);
if (rc != EOK) {
return rc;
291,11 → 291,11
}
 
t->udebug.go_call = call;
t->udebug.stop = false;
t->udebug.go = true;
t->udebug.cur_event = 0; /* none */
 
/*
* Neither t's lock nor threads_lock may be held during wakeup
* Neither t's lock nor threads_lock may be held during wakeup.
*/
waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST);
 
317,7 → 317,6
int rc;
 
LOG("udebug_stop()\n");
mutex_lock(&TASK->udebug.lock);
 
/*
* On success, this will lock t->udebug.lock. Note that this makes sure
328,21 → 327,21
return rc;
}
 
/* Take GO away from the thread */
t->udebug.stop = true;
/* Take GO away from the thread. */
t->udebug.go = false;
 
if (!t->udebug.stoppable) {
/* Answer will be sent when the thread becomes stoppable */
if (t->udebug.stoppable != true) {
/* Answer will be sent when the thread becomes stoppable. */
_thread_op_end(t);
return 0;
}
 
/*
* Answer GO call
* Answer GO call.
*/
LOG("udebug_stop - answering go call\n");
 
/* Make sure nobody takes this call away from us */
/* Make sure nobody takes this call away from us. */
call = t->udebug.go_call;
t->udebug.go_call = NULL;
 
354,6 → 353,7
 
_thread_op_end(t);
 
mutex_lock(&TASK->udebug.lock);
ipc_answer(&TASK->answerbox, call);
mutex_unlock(&TASK->udebug.lock);
 
422,7 → 422,7
flags = t->flags;
spinlock_unlock(&t->lock);
 
/* Not interested in kernel threads */
/* Not interested in kernel threads. */
if ((flags & THREAD_FLAG_USPACE) != 0) {
/* Using thread struct pointer as identification hash */
tid = (unative_t) t;
458,16 → 458,16
int rc;
unative_t *arg_buffer;
 
/* Prepare a buffer to hold the arguments */
/* Prepare a buffer to hold the arguments. */
arg_buffer = malloc(6 * sizeof(unative_t), 0);
 
/* On success, this will lock t->udebug.lock */
/* On success, this will lock t->udebug.lock. */
rc = _thread_op_begin(t, false);
if (rc != EOK) {
return rc;
}
 
/* Additionally we need to verify that we are inside a syscall */
/* Additionally we need to verify that we are inside a syscall. */
if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B &&
t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {
_thread_op_end(t);
474,7 → 474,7
return EINVAL;
}
 
/* Copy to a local buffer before releasing the lock */
/* Copy to a local buffer before releasing the lock. */
memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t));
 
_thread_op_end(t);
/branches/dynload/kernel/Makefile
126,6 → 126,18
DEFS += -DCONFIG_NS16550
endif
 
ifeq ($(CONFIG_I8042_INTERRUPT_DRIVEN),y)
DEFS += -DCONFIG_I8042_INTERRUPT_DRIVEN
endif
 
ifeq ($(CONFIG_NS16550_INTERRUPT_DRIVEN),y)
DEFS += -DCONFIG_NS16550_INTERRUPT_DRIVEN
endif
 
ifeq ($(CONFIG_IOSAPIC),y)
DEFS += -DCONFIG_IOSAPIC
endif
 
ifeq ($(CONFIG_VIRT_IDX_DCACHE),y)
DEFS += -DCONFIG_VIRT_IDX_DCACHE
endif
/branches/dynload/kernel/arch/sparc64/include/regdef.h
55,8 → 55,11
#define WSTATE_NORMAL(n) (n)
#define WSTATE_OTHER(n) ((n) << 3)
 
#define UPA_CONFIG_MID_SHIFT 17
#define UPA_CONFIG_MID_MASK 0x1f
/*
* The following definitions concern the UPA_CONFIG register on US and the
* FIREPLANE_CONFIG register on US3.
*/
#define ICBUS_CONFIG_MID_SHIFT 17
 
#endif
 
/branches/dynload/kernel/arch/sparc64/include/cpu_node.h
0,0 → 1,58
/*
* Copyright (c) 2005 Pavel Rimsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64
* @{
*/
/** @file
*/
 
#ifndef KERN_sparc64_CPU_NODE_H_
#define KERN_sparc64_CPU_NODE_H_
 
#include <genarch/ofw/ofw_tree.h>
 
 
/** Finds the parent node of all the CPU nodes (nodes named "cpu" or "cmp").
*
* Depending on the machine type (and possibly the OFW version), CPUs can be
* at "/" or at "/ssm@0,0".
*/
static inline ofw_tree_node_t *cpus_parent(void)
{
ofw_tree_node_t *parent;
parent = ofw_tree_find_child(ofw_tree_lookup("/"), "ssm@0,0");
if (parent == NULL)
parent = ofw_tree_lookup("/");
return parent;
}
 
#endif
 
/** @}
*/
/branches/dynload/kernel/arch/sparc64/include/arch.h
41,7 → 41,7
#define ASI_AIUS 0x11 /** Access to secondary context with user privileges. */
#define ASI_NUCLEUS_QUAD_LDD 0x24 /** ASI for 16-byte atomic loads. */
#define ASI_DCACHE_TAG 0x47 /** ASI D-Cache Tag. */
#define ASI_UPA_CONFIG 0x4a /** ASI of the UPA_CONFIG register. */
#define ASI_ICBUS_CONFIG 0x4a /** ASI of the UPA_CONFIG/FIREPLANE_CONFIG register. */
 
#define NWINDOWS 8 /** Number of register window sets. */
 
/branches/dynload/kernel/arch/sparc64/include/asm.h
136,6 → 136,28
asm volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0));
}
 
/** Read STICK_compare Register.
*
* @return Value of STICK_compare register.
*/
static inline uint64_t stick_compare_read(void)
{
uint64_t v;
asm volatile ("rd %%asr25, %0\n" : "=r" (v));
return v;
}
 
/** Write STICK_compare Register.
*
* @param v New value of STICK_comapre register.
*/
static inline void stick_compare_write(uint64_t v)
{
asm volatile ("wr %0, %1, %%asr25\n" : : "r" (v), "i" (0));
}
 
/** Read TICK Register.
*
* @return Value of TICK register.
407,15 → 429,6
asm volatile ("wrpr %g0, %g0, %tl\n");
}
 
/** Read UPA_CONFIG register.
*
* @return Value of the UPA_CONFIG register.
*/
static inline uint64_t upa_config_read(void)
{
return asi_u64_read(ASI_UPA_CONFIG, 0);
}
 
extern void cpu_halt(void);
extern void cpu_sleep(void);
extern void asm_delay_loop(const uint32_t usec);
/branches/dynload/kernel/arch/sparc64/include/trap/interrupt.h
49,21 → 49,43
 
 
/* Interrupt ASI registers. */
#define ASI_UDB_INTR_W 0x77
#define ASI_INTR_W 0x77
#define ASI_INTR_DISPATCH_STATUS 0x48
#define ASI_UDB_INTR_R 0x7f
#define ASI_INTR_R 0x7f
#define ASI_INTR_RECEIVE 0x49
 
/* VA's used with ASI_UDB_INTR_W register. */
/* VA's used with ASI_INTR_W register. */
#if defined (US)
#define ASI_UDB_INTR_W_DATA_0 0x40
#define ASI_UDB_INTR_W_DATA_1 0x50
#define ASI_UDB_INTR_W_DATA_2 0x60
#define ASI_UDB_INTR_W_DISPATCH 0x70
#elif defined (US3)
#define VA_INTR_W_DATA_0 0x40
#define VA_INTR_W_DATA_1 0x48
#define VA_INTR_W_DATA_2 0x50
#define VA_INTR_W_DATA_3 0x58
#define VA_INTR_W_DATA_4 0x60
#define VA_INTR_W_DATA_5 0x68
#define VA_INTR_W_DATA_6 0x80
#define VA_INTR_W_DATA_7 0x88
#endif
#define VA_INTR_W_DISPATCH 0x70
 
/* VA's used with ASI_UDB_INTR_R register. */
/* VA's used with ASI_INTR_R register. */
#if defined(US)
#define ASI_UDB_INTR_R_DATA_0 0x40
#define ASI_UDB_INTR_R_DATA_1 0x50
#define ASI_UDB_INTR_R_DATA_2 0x60
#elif defined (US3)
#define VA_INTR_R_DATA_0 0x40
#define VA_INTR_R_DATA_1 0x48
#define VA_INTR_R_DATA_2 0x50
#define VA_INTR_R_DATA_3 0x58
#define VA_INTR_R_DATA_4 0x60
#define VA_INTR_R_DATA_5 0x68
#define VA_INTR_R_DATA_6 0x80
#define VA_INTR_R_DATA_7 0x88
#endif
 
/* Shifts in the Interrupt Vector Dispatch virtual address. */
#define INTR_VEC_DISPATCH_MID_SHIFT 14
/branches/dynload/kernel/arch/sparc64/include/mm/frame.h
59,8 → 59,13
union frame_address {
uintptr_t address;
struct {
#if defined (US)
unsigned : 23;
uint64_t pfn : 28; /**< Physical Frame Number. */
#elif defined (US3)
unsigned : 21;
uint64_t pfn : 30; /**< Physical Frame Number. */
#endif
unsigned offset : 13; /**< Offset. */
} __attribute__ ((packed));
};
/branches/dynload/kernel/arch/sparc64/include/mm/tte.h
50,6 → 50,7
 
#include <arch/types.h>
 
/* TTE tag's VA_tag field contains bits <63:VA_TAG_PAGE_SHIFT> of the VA */
#define VA_TAG_PAGE_SHIFT 22
 
/** Translation Table Entry - Tag. */
75,8 → 76,13
unsigned nfo : 1; /**< No-Fault-Only. */
unsigned ie : 1; /**< Invert Endianness. */
unsigned soft2 : 9; /**< Software defined field. */
#if defined (US)
unsigned diag : 9; /**< Diagnostic data. */
unsigned pfn : 28; /**< Physical Address bits, bits 40:13. */
#elif defined (US3)
unsigned : 7; /**< Reserved. */
unsigned pfn : 30; /**< Physical Address bits, bits 42:13 */
#endif
unsigned soft : 6; /**< Software defined field. */
unsigned l : 1; /**< Lock. */
unsigned cp : 1; /**< Cacheable in physically indexed cache. */
/branches/dynload/kernel/arch/sparc64/include/mm/cache_spec.h
38,19 → 38,20
/*
* The following macros are valid for the following processors:
*
* UltraSPARC, UltraSPARC II, UltraSPARC IIi
* UltraSPARC, UltraSPARC II, UltraSPARC IIi, UltraSPARC III,
* UltraSPARC III+, UltraSPARC IV, UltraSPARC IV+
*
* Should we support other UltraSPARC processors, we need to make sure that
* the macros are defined correctly for them.
*/
 
#if defined (US)
#define DCACHE_SIZE (16 * 1024)
#elif defined (US3)
#define DCACHE_SIZE (64 * 1024)
#endif
#define DCACHE_LINE_SIZE 32
 
#define ICACHE_SIZE (16 * 1024)
#define ICACHE_WAYS 2
#define ICACHE_LINE_SIZE 32
 
#endif
 
/** @}
/branches/dynload/kernel/arch/sparc64/include/mm/mmu.h
35,8 → 35,10
#ifndef KERN_sparc64_MMU_H_
#define KERN_sparc64_MMU_H_
 
#if defined(US)
/* LSU Control Register ASI. */
#define ASI_LSU_CONTROL_REG 0x45 /**< Load/Store Unit Control Register. */
#endif
 
/* I-MMU ASIs. */
#define ASI_IMMU 0x50
52,7 → 54,12
#define VA_IMMU_SFSR 0x18 /**< IMMU sync fault status register. */
#define VA_IMMU_TSB_BASE 0x28 /**< IMMU TSB base register. */
#define VA_IMMU_TAG_ACCESS 0x30 /**< IMMU TLB tag access register. */
#if defined (US3)
#define VA_IMMU_PRIMARY_EXTENSION 0x48 /**< IMMU TSB primary extension register */
#define VA_IMMU_NUCLEUS_EXTENSION 0x58 /**< IMMU TSB nucleus extension register */
#endif
 
 
/* D-MMU ASIs. */
#define ASI_DMMU 0x58
#define ASI_DMMU_TSB_8KB_PTR_REG 0x59
73,6 → 80,11
#define VA_DMMU_TAG_ACCESS 0x30 /**< DMMU TLB tag access register. */
#define VA_DMMU_VA_WATCHPOINT_REG 0x38 /**< DMMU VA data watchpoint register. */
#define VA_DMMU_PA_WATCHPOINT_REG 0x40 /**< DMMU PA data watchpoint register. */
#if defined (US3)
#define VA_DMMU_PRIMARY_EXTENSION 0x48 /**< DMMU TSB primary extension register */
#define VA_DMMU_SECONDARY_EXTENSION 0x50 /**< DMMU TSB secondary extension register */
#define VA_DMMU_NUCLEUS_EXTENSION 0x58 /**< DMMU TSB nucleus extension register */
#endif
 
#ifndef __ASM__
 
80,6 → 92,7
#include <arch/barrier.h>
#include <arch/types.h>
 
#if defined(US)
/** LSU Control Register. */
typedef union {
uint64_t value;
100,6 → 113,7
} __attribute__ ((packed));
} lsu_cr_reg_t;
#endif /* US */
 
#endif /* !def __ASM__ */
 
/branches/dynload/kernel/arch/sparc64/include/mm/tlb.h
35,9 → 35,17
#ifndef KERN_sparc64_TLB_H_
#define KERN_sparc64_TLB_H_
 
#if defined (US)
#define ITLB_ENTRY_COUNT 64
#define DTLB_ENTRY_COUNT 64
#define DTLB_MAX_LOCKED_ENTRIES DTLB_ENTRY_COUNT
#endif
 
/** TLB_DSMALL is the only of the three DMMUs that can hold locked entries. */
#if defined (US3)
#define DTLB_MAX_LOCKED_ENTRIES 16
#endif
 
#define MEM_CONTEXT_KERNEL 0
#define MEM_CONTEXT_TEMP 1
 
53,6 → 61,9
/* TLB Demap Operation types. */
#define TLB_DEMAP_PAGE 0
#define TLB_DEMAP_CONTEXT 1
#if defined (US3)
#define TLB_DEMAP_ALL 2
#endif
 
#define TLB_DEMAP_TYPE_SHIFT 6
 
61,6 → 72,18
#define TLB_DEMAP_SECONDARY 1
#define TLB_DEMAP_NUCLEUS 2
 
/* There are more TLBs in one MMU in US3, their codes are defined here. */
#if defined (US3)
/* D-MMU: one small (16-entry) TLB and two big (512-entry) TLBs */
#define TLB_DSMALL 0
#define TLB_DBIG_0 2
#define TLB_DBIG_1 3
/* I-MMU: one small (16-entry) TLB and one big TLB */
#define TLB_ISMALL 0
#define TLB_IBIG 2
#endif
 
#define TLB_DEMAP_CONTEXT_SHIFT 4
 
/* TLB Tag Access shifts */
76,6 → 99,8
#include <arch/asm.h>
#include <arch/barrier.h>
#include <arch/types.h>
#include <arch/register.h>
#include <arch/cpu.h>
 
union tlb_context_reg {
uint64_t v;
90,6 → 115,9
typedef tte_data_t tlb_data_t;
 
/** I-/D-TLB Data Access Address in Alternate Space. */
 
#if defined (US)
 
union tlb_data_access_addr {
uint64_t value;
struct {
98,9 → 126,54
unsigned : 3;
} __attribute__ ((packed));
};
typedef union tlb_data_access_addr tlb_data_access_addr_t;
typedef union tlb_data_access_addr tlb_tag_read_addr_t;
typedef union tlb_data_access_addr dtlb_data_access_addr_t;
typedef union tlb_data_access_addr dtlb_tag_read_addr_t;
typedef union tlb_data_access_addr itlb_data_access_addr_t;
typedef union tlb_data_access_addr itlb_tag_read_addr_t;
 
#elif defined (US3)
 
/*
* In US3, I-MMU and D-MMU have different formats of the data
* access register virtual address. In the corresponding
* structures the member variable for the entry number is
* called "local_tlb_entry" - it contrasts with the "tlb_entry"
* for the US data access register VA structure. The rationale
* behind this is to prevent careless mistakes in the code
* caused by setting only the entry number and not the TLB
* number in the US3 code (when taking the code from US).
*/
 
union dtlb_data_access_addr {
uint64_t value;
struct {
uint64_t : 45;
unsigned : 1;
unsigned tlb_number : 2;
unsigned : 4;
unsigned local_tlb_entry : 9;
unsigned : 3;
} __attribute__ ((packed));
};
typedef union dtlb_data_access_addr dtlb_data_access_addr_t;
typedef union dtlb_data_access_addr dtlb_tag_read_addr_t;
 
union itlb_data_access_addr {
uint64_t value;
struct {
uint64_t : 45;
unsigned : 1;
unsigned tlb_number : 2;
unsigned : 6;
unsigned local_tlb_entry : 7;
unsigned : 3;
} __attribute__ ((packed));
};
typedef union itlb_data_access_addr itlb_data_access_addr_t;
typedef union itlb_data_access_addr itlb_tag_read_addr_t;
 
#endif
 
/** I-/D-TLB Tag Read Register. */
union tlb_tag_read_reg {
uint64_t value;
118,8 → 191,13
uint64_t value;
struct {
uint64_t vpn: 51; /**< Virtual Address bits 63:13. */
#if defined (US)
unsigned : 6; /**< Ignored. */
unsigned type : 1; /**< The type of demap operation. */
#elif defined (US3)
unsigned : 5; /**< Ignored. */
unsigned type: 2; /**< The type of demap operation. */
#endif
unsigned context : 2; /**< Context register selection. */
unsigned : 4; /**< Zero. */
} __attribute__ ((packed));
130,10 → 208,19
union tlb_sfsr_reg {
uint64_t value;
struct {
#if defined (US)
unsigned long : 40; /**< Implementation dependent. */
unsigned asi : 8; /**< ASI. */
unsigned : 2;
unsigned ft : 7; /**< Fault type. */
#elif defined (US3)
unsigned long : 39; /**< Implementation dependent. */
unsigned nf : 1; /**< Non-faulting load. */
unsigned asi : 8; /**< ASI. */
unsigned tm : 1; /**< I-TLB miss. */
unsigned : 3; /**< Reserved. */
unsigned ft : 5; /**< Fault type. */
#endif
unsigned e : 1; /**< Side-effect bit. */
unsigned ct : 2; /**< Context Register selection. */
unsigned pr : 1; /**< Privilege bit. */
144,9 → 231,53
};
typedef union tlb_sfsr_reg tlb_sfsr_reg_t;
 
#if defined (US3)
 
/*
* Functions for determining the number of entries in TLBs. They either return
* a constant value or a value based on the CPU autodetection.
*/
 
/**
* Determine the number of entries in the DMMU's small TLB.
*/
static inline uint16_t tlb_dsmall_size(void)
{
return 16;
}
 
/**
* Determine the number of entries in each DMMU's big TLB.
*/
static inline uint16_t tlb_dbig_size(void)
{
return 512;
}
 
/**
* Determine the number of entries in the IMMU's small TLB.
*/
static inline uint16_t tlb_ismall_size(void)
{
return 16;
}
 
/**
* Determine the number of entries in the IMMU's big TLB.
*/
static inline uint16_t tlb_ibig_size(void)
{
if (((ver_reg_t) ver_read()).impl == IMPL_ULTRASPARCIV_PLUS)
return 512;
else
return 128;
}
 
#endif
 
/** Read MMU Primary Context Register.
*
* @return Current value of Primary Context Register.
* @return Current value of Primary Context Register.
*/
static inline uint64_t mmu_primary_context_read(void)
{
155,7 → 286,7
 
/** Write MMU Primary Context Register.
*
* @param v New value of Primary Context Register.
* @param v New value of Primary Context Register.
*/
static inline void mmu_primary_context_write(uint64_t v)
{
165,7 → 296,7
 
/** Read MMU Secondary Context Register.
*
* @return Current value of Secondary Context Register.
* @return Current value of Secondary Context Register.
*/
static inline uint64_t mmu_secondary_context_read(void)
{
174,7 → 305,7
 
/** Write MMU Primary Context Register.
*
* @param v New value of Primary Context Register.
* @param v New value of Primary Context Register.
*/
static inline void mmu_secondary_context_write(uint64_t v)
{
182,15 → 313,18
flush_pipeline();
}
 
#if defined (US)
 
/** Read IMMU TLB Data Access Register.
*
* @param entry TLB Entry index.
* @param entry TLB Entry index.
*
* @return Current value of specified IMMU TLB Data Access Register.
* @return Current value of specified IMMU TLB Data Access
* Register.
*/
static inline uint64_t itlb_data_access_read(index_t entry)
{
tlb_data_access_addr_t reg;
itlb_data_access_addr_t reg;
reg.value = 0;
reg.tlb_entry = entry;
199,12 → 333,12
 
/** Write IMMU TLB Data Access Register.
*
* @param entry TLB Entry index.
* @param value Value to be written.
* @param entry TLB Entry index.
* @param value Value to be written.
*/
static inline void itlb_data_access_write(index_t entry, uint64_t value)
{
tlb_data_access_addr_t reg;
itlb_data_access_addr_t reg;
reg.value = 0;
reg.tlb_entry = entry;
214,13 → 348,14
 
/** Read DMMU TLB Data Access Register.
*
* @param entry TLB Entry index.
* @param entry TLB Entry index.
*
* @return Current value of specified DMMU TLB Data Access Register.
* @return Current value of specified DMMU TLB Data Access
* Register.
*/
static inline uint64_t dtlb_data_access_read(index_t entry)
{
tlb_data_access_addr_t reg;
dtlb_data_access_addr_t reg;
reg.value = 0;
reg.tlb_entry = entry;
229,12 → 364,12
 
/** Write DMMU TLB Data Access Register.
*
* @param entry TLB Entry index.
* @param value Value to be written.
* @param entry TLB Entry index.
* @param value Value to be written.
*/
static inline void dtlb_data_access_write(index_t entry, uint64_t value)
{
tlb_data_access_addr_t reg;
dtlb_data_access_addr_t reg;
reg.value = 0;
reg.tlb_entry = entry;
244,13 → 379,13
 
/** Read IMMU TLB Tag Read Register.
*
* @param entry TLB Entry index.
* @param entry TLB Entry index.
*
* @return Current value of specified IMMU TLB Tag Read Register.
* @return Current value of specified IMMU TLB Tag Read Register.
*/
static inline uint64_t itlb_tag_read_read(index_t entry)
{
tlb_tag_read_addr_t tag;
itlb_tag_read_addr_t tag;
 
tag.value = 0;
tag.tlb_entry = entry;
259,13 → 394,13
 
/** Read DMMU TLB Tag Read Register.
*
* @param entry TLB Entry index.
* @param entry TLB Entry index.
*
* @return Current value of specified DMMU TLB Tag Read Register.
* @return Current value of specified DMMU TLB Tag Read Register.
*/
static inline uint64_t dtlb_tag_read_read(index_t entry)
{
tlb_tag_read_addr_t tag;
dtlb_tag_read_addr_t tag;
 
tag.value = 0;
tag.tlb_entry = entry;
272,9 → 407,120
return asi_u64_read(ASI_DTLB_TAG_READ_REG, tag.value);
}
 
#elif defined (US3)
 
 
/** Read IMMU TLB Data Access Register.
*
* @param tlb TLB number (one of TLB_ISMALL or TLB_IBIG)
* @param entry TLB Entry index.
*
* @return Current value of specified IMMU TLB Data Access
* Register.
*/
static inline uint64_t itlb_data_access_read(int tlb, index_t entry)
{
itlb_data_access_addr_t reg;
reg.value = 0;
reg.tlb_number = tlb;
reg.local_tlb_entry = entry;
return asi_u64_read(ASI_ITLB_DATA_ACCESS_REG, reg.value);
}
 
/** Write IMMU TLB Data Access Register.
* @param tlb TLB number (one of TLB_ISMALL or TLB_IBIG)
* @param entry TLB Entry index.
* @param value Value to be written.
*/
static inline void itlb_data_access_write(int tlb, index_t entry,
uint64_t value)
{
itlb_data_access_addr_t reg;
reg.value = 0;
reg.tlb_number = tlb;
reg.local_tlb_entry = entry;
asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);
flush_pipeline();
}
 
/** Read DMMU TLB Data Access Register.
*
* @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG, TLB_DBIG)
* @param entry TLB Entry index.
*
* @return Current value of specified DMMU TLB Data Access
* Register.
*/
static inline uint64_t dtlb_data_access_read(int tlb, index_t entry)
{
dtlb_data_access_addr_t reg;
reg.value = 0;
reg.tlb_number = tlb;
reg.local_tlb_entry = entry;
return asi_u64_read(ASI_DTLB_DATA_ACCESS_REG, reg.value);
}
 
/** Write DMMU TLB Data Access Register.
*
* @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1)
* @param entry TLB Entry index.
* @param value Value to be written.
*/
static inline void dtlb_data_access_write(int tlb, index_t entry,
uint64_t value)
{
dtlb_data_access_addr_t reg;
reg.value = 0;
reg.tlb_number = tlb;
reg.local_tlb_entry = entry;
asi_u64_write(ASI_DTLB_DATA_ACCESS_REG, reg.value, value);
membar();
}
 
/** Read IMMU TLB Tag Read Register.
*
* @param tlb TLB number (one of TLB_ISMALL or TLB_IBIG)
* @param entry TLB Entry index.
*
* @return Current value of specified IMMU TLB Tag Read Register.
*/
static inline uint64_t itlb_tag_read_read(int tlb, index_t entry)
{
itlb_tag_read_addr_t tag;
 
tag.value = 0;
tag.tlb_number = tlb;
tag.local_tlb_entry = entry;
return asi_u64_read(ASI_ITLB_TAG_READ_REG, tag.value);
}
 
/** Read DMMU TLB Tag Read Register.
*
* @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1)
* @param entry TLB Entry index.
*
* @return Current value of specified DMMU TLB Tag Read Register.
*/
static inline uint64_t dtlb_tag_read_read(int tlb, index_t entry)
{
dtlb_tag_read_addr_t tag;
 
tag.value = 0;
tag.tlb_number = tlb;
tag.local_tlb_entry = entry;
return asi_u64_read(ASI_DTLB_TAG_READ_REG, tag.value);
}
 
#endif
 
 
/** Write IMMU TLB Tag Access Register.
*
* @param v Value to be written.
* @param v Value to be written.
*/
static inline void itlb_tag_access_write(uint64_t v)
{
284,7 → 530,7
 
/** Read IMMU TLB Tag Access Register.
*
* @return Current value of IMMU TLB Tag Access Register.
* @return Current value of IMMU TLB Tag Access Register.
*/
static inline uint64_t itlb_tag_access_read(void)
{
293,7 → 539,7
 
/** Write DMMU TLB Tag Access Register.
*
* @param v Value to be written.
* @param v Value to be written.
*/
static inline void dtlb_tag_access_write(uint64_t v)
{
303,7 → 549,7
 
/** Read DMMU TLB Tag Access Register.
*
* @return Current value of DMMU TLB Tag Access Register.
* @return Current value of DMMU TLB Tag Access Register.
*/
static inline uint64_t dtlb_tag_access_read(void)
{
313,7 → 559,7
 
/** Write IMMU TLB Data in Register.
*
* @param v Value to be written.
* @param v Value to be written.
*/
static inline void itlb_data_in_write(uint64_t v)
{
323,7 → 569,7
 
/** Write DMMU TLB Data in Register.
*
* @param v Value to be written.
* @param v Value to be written.
*/
static inline void dtlb_data_in_write(uint64_t v)
{
333,7 → 579,7
 
/** Read ITLB Synchronous Fault Status Register.
*
* @return Current content of I-SFSR register.
* @return Current content of I-SFSR register.
*/
static inline uint64_t itlb_sfsr_read(void)
{
342,7 → 588,7
 
/** Write ITLB Synchronous Fault Status Register.
*
* @param v New value of I-SFSR register.
* @param v New value of I-SFSR register.
*/
static inline void itlb_sfsr_write(uint64_t v)
{
352,7 → 598,7
 
/** Read DTLB Synchronous Fault Status Register.
*
* @return Current content of D-SFSR register.
* @return Current content of D-SFSR register.
*/
static inline uint64_t dtlb_sfsr_read(void)
{
361,7 → 607,7
 
/** Write DTLB Synchronous Fault Status Register.
*
* @param v New value of D-SFSR register.
* @param v New value of D-SFSR register.
*/
static inline void dtlb_sfsr_write(uint64_t v)
{
371,7 → 617,7
 
/** Read DTLB Synchronous Fault Address Register.
*
* @return Current content of D-SFAR register.
* @return Current content of D-SFAR register.
*/
static inline uint64_t dtlb_sfar_read(void)
{
380,10 → 626,11
 
/** Perform IMMU TLB Demap Operation.
*
* @param type Selects between context and page demap.
* @param type Selects between context and page demap (and entire MMU
* demap on US3).
* @param context_encoding Specifies which Context register has Context ID for
* demap.
* @param page Address which is on the page to be demapped.
* demap.
* @param page Address which is on the page to be demapped.
*/
static inline void itlb_demap(int type, int context_encoding, uintptr_t page)
{
397,18 → 644,19
da.context = context_encoding;
da.vpn = pg.vpn;
asi_u64_write(ASI_IMMU_DEMAP, da.value, 0); /* da.value is the
* address within the
* ASI */
/* da.value is the address within the ASI */
asi_u64_write(ASI_IMMU_DEMAP, da.value, 0);
 
flush_pipeline();
}
 
/** Perform DMMU TLB Demap Operation.
*
* @param type Selects between context and page demap.
* @param type Selects between context and page demap (and entire MMU
* demap on US3).
* @param context_encoding Specifies which Context register has Context ID for
* demap.
* @param page Address which is on the page to be demapped.
* demap.
* @param page Address which is on the page to be demapped.
*/
static inline void dtlb_demap(int type, int context_encoding, uintptr_t page)
{
422,17 → 670,17
da.context = context_encoding;
da.vpn = pg.vpn;
asi_u64_write(ASI_DMMU_DEMAP, da.value, 0); /* da.value is the
* address within the
* ASI */
/* da.value is the address within the ASI */
asi_u64_write(ASI_DMMU_DEMAP, da.value, 0);
 
membar();
}
 
extern void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate);
extern void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate);
extern void fast_data_access_protection(tlb_tag_access_reg_t tag , istate_t *istate);
extern void fast_instruction_access_mmu_miss(unative_t, istate_t *);
extern void fast_data_access_mmu_miss(tlb_tag_access_reg_t, istate_t *);
extern void fast_data_access_protection(tlb_tag_access_reg_t , istate_t *);
 
extern void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable);
extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool);
 
extern void dump_sfsr_and_sfar(void);
 
/branches/dynload/kernel/arch/sparc64/include/mm/cache.h
38,15 → 38,6
#include <mm/page.h>
#include <mm/frame.h>
 
#define dcache_flush_page(p) \
dcache_flush_color(PAGE_COLOR((p)))
#define dcache_flush_frame(p, f) \
dcache_flush_tag(PAGE_COLOR((p)), ADDR2PFN((f)));
 
extern void dcache_flush(void);
extern void dcache_flush_color(int c);
extern void dcache_flush_tag(int c, pfn_t tag);
 
#endif
 
/** @}
/branches/dynload/kernel/arch/sparc64/include/mm/tsb.h
107,6 → 107,55
asi_u64_write(ASI_DMMU, VA_DMMU_TSB_BASE, v);
}
 
#if defined (US3)
 
/** Write DTSB Primary Extension register.
*
* @param v New content of the DTSB Primary Extension register.
*/
static inline void dtsb_primary_extension_write(uint64_t v)
{
asi_u64_write(ASI_DMMU, VA_DMMU_PRIMARY_EXTENSION, v);
}
 
/** Write DTSB Secondary Extension register.
*
* @param v New content of the DTSB Secondary Extension register.
*/
static inline void dtsb_secondary_extension_write(uint64_t v)
{
asi_u64_write(ASI_DMMU, VA_DMMU_SECONDARY_EXTENSION, v);
}
 
/** Write DTSB Nucleus Extension register.
*
* @param v New content of the DTSB Nucleus Extension register.
*/
static inline void dtsb_nucleus_extension_write(uint64_t v)
{
asi_u64_write(ASI_DMMU, VA_DMMU_NUCLEUS_EXTENSION, v);
}
 
/** Write ITSB Primary Extension register.
*
* @param v New content of the ITSB Primary Extension register.
*/
static inline void itsb_primary_extension_write(uint64_t v)
{
asi_u64_write(ASI_IMMU, VA_IMMU_PRIMARY_EXTENSION, v);
}
 
/** Write ITSB Nucleus Extension register.
*
* @param v New content of the ITSB Nucleus Extension register.
*/
static inline void itsb_nucleus_extension_write(uint64_t v)
{
asi_u64_write(ASI_IMMU, VA_IMMU_NUCLEUS_EXTENSION, v);
}
 
#endif
 
/* Forward declarations. */
struct as;
struct pte;
/branches/dynload/kernel/arch/sparc64/include/register.h
117,23 → 117,6
};
typedef union fprs_reg fprs_reg_t;
 
/** UPA_CONFIG register.
*
* Note that format of this register differs significantly from
* processor version to version. The format defined here
* is the common subset for all supported processor versions.
*/
union upa_config {
uint64_t value;
struct {
uint64_t : 34;
unsigned pcon : 8; /**< Processor configuration. */
unsigned mid : 5; /**< Module (processor) ID register. */
unsigned pcap : 17; /**< Processor capabilities. */
} __attribute__ ((packed));
};
typedef union upa_config upa_config_t;
 
#endif
 
/** @}
/branches/dynload/kernel/arch/sparc64/include/cpu.h
35,15 → 35,6
#ifndef KERN_sparc64_CPU_H_
#define KERN_sparc64_CPU_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <arch/register.h>
#include <arch/asm.h>
 
#ifdef CONFIG_SMP
#include <arch/mm/cache.h>
#endif
 
#define MANUF_FUJITSU 0x04
#define MANUF_ULTRASPARC 0x17 /**< UltraSPARC I, UltraSPARC II */
#define MANUF_SUN 0x3e
52,14 → 43,29
#define IMPL_ULTRASPARCII 0x11
#define IMPL_ULTRASPARCII_I 0x12
#define IMPL_ULTRASPARCII_E 0x13
#define IMPL_ULTRASPARCIII 0x15
#define IMPL_ULTRASPARCIII 0x14
#define IMPL_ULTRASPARCIII_PLUS 0x15
#define IMPL_ULTRASPARCIII_I 0x16
#define IMPL_ULTRASPARCIV 0x18
#define IMPL_ULTRASPARCIV_PLUS 0x19
 
#define IMPL_SPARC64V 0x5
 
#ifndef __ASM__
 
#include <arch/types.h>
#include <typedefs.h>
#include <arch/register.h>
#include <arch/regdef.h>
#include <arch/asm.h>
 
#ifdef CONFIG_SMP
#include <arch/mm/cache.h>
#endif
 
typedef struct {
uint32_t mid; /**< Processor ID as read from
UPA_CONFIG. */
UPA_CONFIG/FIREPLANE_CONFIG. */
ver_reg_t ver;
uint32_t clock_frequency; /**< Processor frequency in Hz. */
uint64_t next_tick_cmpr; /**< Next clock interrupt should be
66,8 → 72,28
generated when the TICK register
matches this value. */
} cpu_arch_t;
 
 
/**
* Reads the module ID (agent ID/CPUID) of the current CPU.
*/
static inline uint32_t read_mid(void)
{
uint64_t icbus_config = asi_u64_read(ASI_ICBUS_CONFIG, 0);
icbus_config = icbus_config >> ICBUS_CONFIG_MID_SHIFT;
#if defined (US)
return icbus_config & 0x1f;
#elif defined (US3)
if (((ver_reg_t) ver_read()).impl == IMPL_ULTRASPARCIII_I)
return icbus_config & 0x1f;
else
return icbus_config & 0x3ff;
#endif
}
 
#endif
 
#endif
 
/** @}
*/
/branches/dynload/kernel/arch/sparc64/include/drivers/sgcn.h
0,0 → 1,126
/*
* Copyright (c) 2008 Pavel Rimsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64
* @{
*/
/** @file
*/
 
#ifndef KERN_sparc64_SGCN_H_
#define KERN_sparc64_SGCN_H_
 
#include <arch/types.h>
 
/* number of bytes in the TOC magic, including the terminating '\0' */
#define TOC_MAGIC_BYTES 8
 
/* number of bytes in the TOC key, including the terminating '\0' */
#define TOC_KEY_SIZE 8
 
/* maximum number of entries in the SRAM table of contents */
#define MAX_TOC_ENTRIES 32
 
/* number of bytes in the SGCN buffer magic, including the terminating '\0' */
#define SGCN_MAGIC_BYTES 4
 
/**
* Entry in the SRAM table of contents. Describes one segment of the SRAM
* which serves a particular purpose (e.g. OBP serial console, Solaris serial
* console, Solaris mailbox,...).
*/
typedef struct {
/** key (e.g. "OBPCONS", "SOLCONS", "SOLMBOX",...) */
char key[TOC_KEY_SIZE];
/** size of the segment in bytes */
uint32_t size;
/** offset of the segment within SRAM */
uint32_t offset;
} __attribute ((packed)) toc_entry_t;
 
/**
* SRAM table of contents. Describes all segments within the SRAM.
*/
typedef struct {
/** hard-wired to "TOCSRAM" */
char magic[TOC_MAGIC_BYTES];
/** we don't need this */
char unused[8];
/** TOC entries */
toc_entry_t keys[MAX_TOC_ENTRIES];
} __attribute__ ((packed)) iosram_toc_t;
 
/**
* SGCN buffer header. It is placed at the very beginning of the SGCN
* buffer.
*/
typedef struct {
/** hard-wired to "CON" */
char magic[SGCN_MAGIC_BYTES];
/** we don't need this */
char unused[8];
/** offset within the SGCN buffer of the input buffer start */
uint32_t in_begin;
/** offset within the SGCN buffer of the input buffer end */
uint32_t in_end;
/** offset within the SGCN buffer of the input buffer read pointer */
uint32_t in_rdptr;
/** offset within the SGCN buffer of the input buffer write pointer */
uint32_t in_wrptr;
 
/** offset within the SGCN buffer of the output buffer start */
uint32_t out_begin;
/** offset within the SGCN buffer of the output buffer end */
uint32_t out_end;
/** offset within the SGCN buffer of the output buffer read pointer */
uint32_t out_rdptr;
/** offset within the SGCN buffer of the output buffer write pointer */
uint32_t out_wrptr;
} __attribute__ ((packed)) sgcn_buffer_header_t;
 
void sgcn_grab(void);
void sgcn_release(void);
void sgcn_poll(void);
void sgcn_init(void);
 
#endif
 
/** @}
*/
/branches/dynload/kernel/arch/sparc64/include/drivers/pci.h
51,8 → 51,8
};
 
struct pci_operations {
void (* enable_interrupt)(pci_t *pci, int inr);
void (* clear_interrupt)(pci_t *pci, int inr);
void (* enable_interrupt)(pci_t *, int);
void (* clear_interrupt)(pci_t *, int);
};
 
struct pci {
61,9 → 61,9
volatile uint64_t *reg; /**< Registers including interrupt registers. */
};
 
extern pci_t *pci_init(ofw_tree_node_t *node);
extern void pci_enable_interrupt(pci_t *pci, int inr);
extern void pci_clear_interrupt(pci_t *pci, int inr);
extern pci_t *pci_init(ofw_tree_node_t *);
extern void pci_enable_interrupt(pci_t *, int);
extern void pci_clear_interrupt(void *, int);
 
#endif
 
/branches/dynload/kernel/arch/sparc64/include/drivers/fhc.h
44,9 → 44,9
 
extern fhc_t *central_fhc;
 
extern fhc_t *fhc_init(ofw_tree_node_t *node);
extern void fhc_enable_interrupt(fhc_t *fhc, int inr);
extern void fhc_clear_interrupt(fhc_t *fhc, int inr);
extern fhc_t *fhc_init(ofw_tree_node_t *);
extern void fhc_enable_interrupt(fhc_t *, int);
extern void fhc_clear_interrupt(void *, int);
 
#endif
 
/branches/dynload/kernel/arch/sparc64/include/drivers/kbd.h
41,7 → 41,8
typedef enum {
KBD_UNKNOWN,
KBD_Z8530,
KBD_NS16550
KBD_NS16550,
KBD_SGCN
} kbd_type_t;
 
extern kbd_type_t kbd_type;
/branches/dynload/kernel/arch/sparc64/include/drivers/scr.h
42,7 → 42,8
SCR_UNKNOWN,
SCR_ATYFB,
SCR_FFB,
SCR_CGSIX
SCR_CGSIX,
SCR_XVR
} scr_type_t;
 
extern scr_type_t scr_type;
/branches/dynload/kernel/arch/sparc64/include/cpu_family.h
0,0 → 1,82
/*
* Copyright (c) 2008 Pavel Rimsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64
* @{
*/
/** @file
*/
 
#ifndef KERN_sparc64_CPU_FAMILY_H_
#define KERN_sparc64_CPU_FAMILY_H_
 
#include <arch.h>
#include <cpu.h>
#include <arch/register.h>
#include <arch/asm.h>
 
/**
* Find the processor (sub)family.
*
* @return true iff the CPU belongs to the US family
*/
static inline bool is_us(void)
{
int impl = ((ver_reg_t) ver_read()).impl;
return (impl == IMPL_ULTRASPARCI) || (impl == IMPL_ULTRASPARCII) ||
(impl == IMPL_ULTRASPARCII_I) || (impl == IMPL_ULTRASPARCII_E);
}
 
/**
* Find the processor (sub)family.
*
* @return true iff the CPU belongs to the US-III subfamily
*/
static inline bool is_us_iii(void)
{
int impl = ((ver_reg_t) ver_read()).impl;
return (impl == IMPL_ULTRASPARCIII) ||
(impl == IMPL_ULTRASPARCIII_PLUS) ||
(impl == IMPL_ULTRASPARCIII_I);
}
 
/**
* Find the processor (sub)family.
*
* @return true iff the CPU belongs to the US-IV subfamily
*/
static inline bool is_us_iv(void)
{
int impl = ((ver_reg_t) ver_read()).impl;
return (impl == IMPL_ULTRASPARCIV) || (impl == IMPL_ULTRASPARCIV_PLUS);
}
#endif
 
/** @}
*/
/branches/dynload/kernel/arch/sparc64/Makefile.inc
80,6 → 80,18
DEFS += -DCONFIG_SMP
endif
 
ifeq ($(CONFIG_SGCN),y)
DEFS += -DCONFIG_SGCN
endif
 
ifeq ($(MACHINE),us)
DEFS += -DUS
endif
 
ifeq ($(MACHINE),us3)
DEFS += -DUS3
endif
 
ARCH_SOURCES = \
arch/$(ARCH)/src/cpu/cpu.c \
arch/$(ARCH)/src/asm.S \
106,8 → 118,10
arch/$(ARCH)/src/drivers/tick.c \
arch/$(ARCH)/src/drivers/kbd.c \
arch/$(ARCH)/src/drivers/scr.c \
arch/$(ARCH)/src/drivers/sgcn.c \
arch/$(ARCH)/src/drivers/pci.c
 
 
ifeq ($(CONFIG_SMP),y)
ARCH_SOURCES += \
arch/$(ARCH)/src/smp/ipi.c \
/branches/dynload/kernel/arch/sparc64/src/smp/smp.c
35,6 → 35,7
#include <smp/smp.h>
#include <genarch/ofw/ofw_tree.h>
#include <cpu.h>
#include <arch/cpu_family.h>
#include <arch/cpu.h>
#include <arch.h>
#include <config.h>
43,6 → 44,7
#include <synch/synch.h>
#include <synch/waitq.h>
#include <print.h>
#include <arch/cpu_node.h>
 
/**
* This global variable is used to pick-up application processors
61,15 → 63,55
ofw_tree_node_t *node;
count_t cnt = 0;
node = ofw_tree_find_child_by_device_type(ofw_tree_lookup("/"), "cpu");
while (node) {
cnt++;
node = ofw_tree_find_peer_by_device_type(node, "cpu");
if (is_us() || is_us_iii()) {
node = ofw_tree_find_child_by_device_type(cpus_parent(), "cpu");
while (node) {
cnt++;
node = ofw_tree_find_peer_by_device_type(node, "cpu");
}
} else if (is_us_iv()) {
node = ofw_tree_find_child(cpus_parent(), "cmp");
while (node) {
cnt += 2;
node = ofw_tree_find_peer_by_name(node, "cmp");
}
}
config.cpu_count = max(1, cnt);
}
 
/**
* Wakes up the CPU which is represented by the "node" OFW tree node.
* If "node" represents the current CPU, calling the function has
* no effect.
*/
static void wakeup_cpu(ofw_tree_node_t *node)
{
uint32_t mid;
ofw_tree_property_t *prop;
/* 'upa-portid' for US, 'portid' for US-III, 'cpuid' for US-IV */
prop = ofw_tree_getprop(node, "upa-portid");
if ((!prop) || (!prop->value))
prop = ofw_tree_getprop(node, "portid");
if ((!prop) || (!prop->value))
prop = ofw_tree_getprop(node, "cpuid");
if (!prop || prop->value == NULL)
return;
mid = *((uint32_t *) prop->value);
if (CPU->arch.mid == mid)
return;
 
waking_up_mid = mid;
if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) ==
ESYNCH_TIMEOUT)
printf("%s: waiting for processor (mid = %" PRIu32
") timed out\n", __func__, mid);
}
 
/** Wake application processors up. */
void kmp(void *arg)
{
76,31 → 118,18
ofw_tree_node_t *node;
int i;
node = ofw_tree_find_child_by_device_type(ofw_tree_lookup("/"), "cpu");
for (i = 0; node; node = ofw_tree_find_peer_by_device_type(node, "cpu"), i++) {
uint32_t mid;
ofw_tree_property_t *prop;
prop = ofw_tree_getprop(node, "upa-portid");
if (!prop || !prop->value)
continue;
mid = *((uint32_t *) prop->value);
if (CPU->arch.mid == mid) {
/*
* Skip the current CPU.
*/
continue;
if (is_us() || is_us_iii()) {
node = ofw_tree_find_child_by_device_type(cpus_parent(), "cpu");
for (i = 0; node;
node = ofw_tree_find_peer_by_device_type(node, "cpu"), i++)
wakeup_cpu(node);
} else if (is_us_iv()) {
node = ofw_tree_find_child(cpus_parent(), "cmp");
while (node) {
wakeup_cpu(ofw_tree_find_child(node, "cpu@0"));
wakeup_cpu(ofw_tree_find_child(node, "cpu@1"));
node = ofw_tree_find_peer_by_name(node, "cmp");
}
 
/*
* Processor with ID == mid can proceed with its initialization.
*/
waking_up_mid = mid;
if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT)
printf("%s: waiting for processor (mid = %" PRIu32 ") timed out\n",
__func__, mid);
}
}
 
/branches/dynload/kernel/arch/sparc64/src/smp/ipi.c
46,6 → 46,33
#include <time/delay.h>
#include <panic.h>
 
/** Set the contents of the outgoing interrupt vector data.
*
* The first data item (data 0) will be set to the value of func, the
* rest of the vector will contain zeros.
*
* This is a helper function used from within the cross_call function.
*
* @param func value the first data item of the vector will be set to
*/
static inline void set_intr_w_data(void (* func)(void))
{
#if defined (US)
asi_u64_write(ASI_INTR_W, ASI_UDB_INTR_W_DATA_0, (uintptr_t) func);
asi_u64_write(ASI_INTR_W, ASI_UDB_INTR_W_DATA_1, 0);
asi_u64_write(ASI_INTR_W, ASI_UDB_INTR_W_DATA_2, 0);
#elif defined (US3)
asi_u64_write(ASI_INTR_W, VA_INTR_W_DATA_0, (uintptr_t) func);
asi_u64_write(ASI_INTR_W, VA_INTR_W_DATA_1, 0);
asi_u64_write(ASI_INTR_W, VA_INTR_W_DATA_2, 0);
asi_u64_write(ASI_INTR_W, VA_INTR_W_DATA_3, 0);
asi_u64_write(ASI_INTR_W, VA_INTR_W_DATA_4, 0);
asi_u64_write(ASI_INTR_W, VA_INTR_W_DATA_5, 0);
asi_u64_write(ASI_INTR_W, VA_INTR_W_DATA_6, 0);
asi_u64_write(ASI_INTR_W, VA_INTR_W_DATA_7, 0);
#endif
}
 
/** Invoke function on another processor.
*
* Currently, only functions without arguments are supported.
73,14 → 100,13
if (status & INTR_DISPATCH_STATUS_BUSY)
panic("Interrupt Dispatch Status busy bit set\n");
ASSERT(!(pstate_read() & PSTATE_IE_BIT));
do {
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_0,
(uintptr_t) func);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_1, 0);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_2, 0);
asi_u64_write(ASI_UDB_INTR_W,
set_intr_w_data(func);
asi_u64_write(ASI_INTR_W,
(mid << INTR_VEC_DISPATCH_MID_SHIFT) |
ASI_UDB_INTR_W_DISPATCH, 0);
VA_INTR_W_DISPATCH, 0);
membar();
/branches/dynload/kernel/arch/sparc64/src/ddi/ddi.c
41,7 → 41,7
* Interrupts are disabled and task is locked.
*
* @param task Task.
* @param ioaddr Startign I/O space address.
* @param ioaddr Starting I/O space address.
* @param size Size of the enabled I/O range.
*
* @return 0 on success or an error code from errno.h.
/branches/dynload/kernel/arch/sparc64/src/console.c
38,6 → 38,8
#include <arch/drivers/scr.h>
#include <arch/drivers/kbd.h>
 
#include <arch/drivers/sgcn.h>
 
#ifdef CONFIG_Z8530
#include <genarch/kbd/z8530.h>
#endif
54,24 → 56,25
#include <genarch/ofw/ofw_tree.h>
#include <arch.h>
#include <panic.h>
#include <func.h>
#include <print.h>
 
#define KEYBOARD_POLL_PAUSE 50000 /* 50ms */
 
/** Initialize kernel console to use framebuffer and keyboard directly. */
void standalone_sparc64_console_init(void)
/**
* Initialize kernel console to use framebuffer and keyboard directly.
* Called on UltraSPARC machines with standard keyboard and framebuffer.
*
* @param aliases the "/aliases" OBP node
*/
static void standard_console_init(ofw_tree_node_t *aliases)
{
stdin = NULL;
 
ofw_tree_node_t *aliases;
ofw_tree_property_t *prop;
ofw_tree_node_t *screen;
ofw_tree_node_t *keyboard;
aliases = ofw_tree_lookup("/aliases");
if (!aliases)
panic("Can't find /aliases.\n");
prop = ofw_tree_getprop(aliases, "screen");
if (!prop)
panic("Can't find property \"screen\".\n");
95,6 → 98,36
kbd_init(keyboard);
}
 
/** Initilize I/O on the Serengeti machine. */
static void serengeti_init(void)
{
sgcn_init();
}
 
/**
* Initialize input/output. Auto-detects the type of machine
* and calls the appropriate I/O init routine.
*/
void standalone_sparc64_console_init(void)
{
ofw_tree_node_t *aliases;
ofw_tree_property_t *prop;
aliases = ofw_tree_lookup("/aliases");
if (!aliases)
panic("Can't find /aliases.\n");
/* "def-cn" = "default console" */
prop = ofw_tree_getprop(aliases, "def-cn");
if ((!prop) || (!prop->value) || (strcmp(prop->value, "/sgcn") != 0)) {
standard_console_init(aliases);
} else {
serengeti_init();
}
}
 
 
/** Kernel thread for polling keyboard.
*
* @param arg Ignored.
112,11 → 145,27
}
#endif
 
#ifdef CONFIG_NS16550
#ifdef CONFIG_NS16550_INTERRUPT_DRIVEN
if (kbd_type == KBD_NS16550) {
/*
* The ns16550 driver is interrupt-driven.
*/
return;
}
#endif
#endif
while (1) {
#ifdef CONFIG_NS16550
#ifndef CONFIG_NS16550_INTERRUPT_DRIVEN
if (kbd_type == KBD_NS16550)
ns16550_poll();
#endif
#endif
#ifdef CONFIG_SGCN
if (kbd_type == KBD_SGCN)
sgcn_poll();
#endif
thread_usleep(KEYBOARD_POLL_PAUSE);
}
}
137,6 → 186,11
ns16550_grab();
break;
#endif
#ifdef CONFIG_SGCN
case KBD_SGCN:
sgcn_grab();
break;
#endif
default:
break;
}
158,6 → 212,11
ns16550_release();
break;
#endif
#ifdef CONFIG_SGCN
case KBD_SGCN:
sgcn_release();
break;
#endif
default:
break;
}
/branches/dynload/kernel/arch/sparc64/src/sparc64.c
86,7 → 86,7
* But we only create 128 buckets.
*/
irq_init(1 << 11, 128);
 
standalone_sparc64_console_init();
}
}
/branches/dynload/kernel/arch/sparc64/src/trap/interrupt.c
67,11 → 67,19
*/
void interrupt(int n, istate_t *istate)
{
uint64_t status;
uint64_t intrcv;
uint64_t data0;
status = asi_u64_read(ASI_INTR_DISPATCH_STATUS, 0);
if (status & (!INTR_DISPATCH_STATUS_BUSY))
panic("Interrupt Dispatch Status busy bit not set\n");
 
intrcv = asi_u64_read(ASI_INTR_RECEIVE, 0);
data0 = asi_u64_read(ASI_UDB_INTR_R, ASI_UDB_INTR_R_DATA_0);
#if defined (US)
data0 = asi_u64_read(ASI_INTR_R, ASI_UDB_INTR_R_DATA_0);
#elif defined (US3)
data0 = asi_u64_read(ASI_INTR_R, VA_INTR_R_DATA_0);
#endif
 
irq_t *irq = irq_dispatch_and_lock(data0);
if (irq) {
79,6 → 87,12
* The IRQ handler was found.
*/
irq->handler(irq, irq->arg);
/*
* See if there is a clear-interrupt-routine and call it.
*/
if (irq->cir) {
irq->cir(irq->cir_arg, irq->inr);
}
spinlock_unlock(&irq->lock);
} else if (data0 > config.base) {
/*
98,7 → 112,7
*/
#ifdef CONFIG_DEBUG
printf("cpu%u: spurious interrupt (intrcv=%#" PRIx64
", data0=%#" PRIx64 ")\n", CPU->id, intrcv, data0);
", data0=%#" PRIx64 ")\n", CPU->id, intrcv, data0);
#endif
}
 
/branches/dynload/kernel/arch/sparc64/src/cpu/cpu.c
32,12 → 32,46
/** @file
*/
 
#include <arch/cpu_family.h>
#include <cpu.h>
#include <arch.h>
#include <genarch/ofw/ofw_tree.h>
#include <arch/drivers/tick.h>
#include <print.h>
#include <arch/cpu_node.h>
 
/**
* Finds out the clock frequency of the current CPU.
*
* @param node node representing the current CPU in the OFW tree
* @return clock frequency if "node" is the current CPU and no error
* occurs, -1 if "node" is not the current CPU or on error
*/
static int find_cpu_frequency(ofw_tree_node_t *node)
{
ofw_tree_property_t *prop;
uint32_t mid;
 
/* 'upa-portid' for US, 'portid' for US-III, 'cpuid' for US-IV */
prop = ofw_tree_getprop(node, "upa-portid");
if ((!prop) || (!prop->value))
prop = ofw_tree_getprop(node, "portid");
if ((!prop) || (!prop->value))
prop = ofw_tree_getprop(node, "cpuid");
if (prop && prop->value) {
mid = *((uint32_t *) prop->value);
if (mid == CPU->arch.mid) {
prop = ofw_tree_getprop(node, "clock-frequency");
if (prop && prop->value) {
return *((uint32_t *) prop->value);
}
}
}
return -1;
}
 
/** Perform sparc64 specific initialization of the processor structure for the
* current processor.
*/
44,34 → 78,37
void cpu_arch_init(void)
{
ofw_tree_node_t *node;
uint32_t mid;
uint32_t clock_frequency = 0;
upa_config_t upa_config;
upa_config.value = upa_config_read();
CPU->arch.mid = upa_config.mid;
CPU->arch.mid = read_mid();
/*
* Detect processor frequency.
*/
node = ofw_tree_find_child_by_device_type(ofw_tree_lookup("/"), "cpu");
while (node) {
ofw_tree_property_t *prop;
prop = ofw_tree_getprop(node, "upa-portid");
if (prop && prop->value) {
mid = *((uint32_t *) prop->value);
if (mid == CPU->arch.mid) {
prop = ofw_tree_getprop(node,
"clock-frequency");
if (prop && prop->value)
clock_frequency = *((uint32_t *)
prop->value);
}
if (is_us() || is_us_iii()) {
node = ofw_tree_find_child_by_device_type(cpus_parent(), "cpu");
while (node) {
int f = find_cpu_frequency(node);
if (f != -1)
clock_frequency = (uint32_t) f;
node = ofw_tree_find_peer_by_device_type(node, "cpu");
}
node = ofw_tree_find_peer_by_device_type(node, "cpu");
} else if (is_us_iv()) {
node = ofw_tree_find_child(cpus_parent(), "cmp");
while (node) {
int f;
f = find_cpu_frequency(
ofw_tree_find_child(node, "cpu@0"));
if (f != -1)
clock_frequency = (uint32_t) f;
f = find_cpu_frequency(
ofw_tree_find_child(node, "cpu@1"));
if (f != -1)
clock_frequency = (uint32_t) f;
node = ofw_tree_find_peer_by_name(node, "cmp");
}
}
 
CPU->arch.clock_frequency = clock_frequency;
tick_init();
}
124,6 → 161,15
case IMPL_ULTRASPARCIII:
impl = "UltraSPARC III";
break;
case IMPL_ULTRASPARCIII_PLUS:
impl = "UltraSPARC III+";
break;
case IMPL_ULTRASPARCIII_I:
impl = "UltraSPARC IIIi";
break;
case IMPL_ULTRASPARCIV:
impl = "UltraSPARC IV";
break;
case IMPL_ULTRASPARCIV_PLUS:
impl = "UltraSPARC IV+";
break;
/branches/dynload/kernel/arch/sparc64/src/mm/tlb.c
54,14 → 54,13
#include <arch/mm/tsb.h>
#endif
 
static void dtlb_pte_copy(pte_t *t, index_t index, bool ro);
static void itlb_pte_copy(pte_t *t, index_t index);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str);
static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
static void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
static void dtlb_pte_copy(pte_t *, index_t, bool);
static void itlb_pte_copy(pte_t *, index_t);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
const char *);
static void do_fast_data_access_protection_fault(istate_t *,
tlb_tag_access_reg_t, const char *);
 
char *context_encoding[] = {
"Primary",
86,11 → 85,11
 
/** Insert privileged mapping into DMMU TLB.
*
* @param page Virtual page address.
* @param frame Physical frame address.
* @param pagesize Page size.
* @param locked True for permanent mappings, false otherwise.
* @param cacheable True if the mapping is cacheable, false otherwise.
* @param page Virtual page address.
* @param frame Physical frame address.
* @param pagesize Page size.
* @param locked True for permanent mappings, false otherwise.
* @param cacheable True if the mapping is cacheable, false otherwise.
*/
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
bool locked, bool cacheable)
103,7 → 102,7
pg.address = page;
fr.address = frame;
 
tag.value = ASID_KERNEL;
tag.context = ASID_KERNEL;
tag.vpn = pg.vpn;
 
dtlb_tag_access_write(tag.value);
126,10 → 125,10
 
/** Copy PTE to TLB.
*
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the entry will be created read-only, regardless of its
* w field.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the entry will be created read-only, regardless
* of its w field.
*/
void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
{
165,8 → 164,8
 
/** Copy PTE to ITLB.
*
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
*/
void itlb_pte_copy(pte_t *t, index_t index)
{
235,10 → 234,11
* Note that some faults (e.g. kernel faults) were already resolved by the
* low-level, assembly language part of the fast_data_access_mmu_miss handler.
*
* @param tag Content of the TLB Tag Access register as it existed when the
* trap happened. This is to prevent confusion created by clobbered
* Tag Access register during a nested DTLB miss.
* @param istate Interrupted state saved on the stack.
* @param tag Content of the TLB Tag Access register as it existed
* when the trap happened. This is to prevent confusion
* created by clobbered Tag Access register during a nested
* DTLB miss.
* @param istate Interrupted state saved on the stack.
*/
void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
{
287,10 → 287,11
 
/** DTLB protection fault handler.
*
* @param tag Content of the TLB Tag Access register as it existed when the
* trap happened. This is to prevent confusion created by clobbered
* Tag Access register during a nested DTLB miss.
* @param istate Interrupted state saved on the stack.
* @param tag Content of the TLB Tag Access register as it existed
* when the trap happened. This is to prevent confusion
* created by clobbered Tag Access register during a nested
* DTLB miss.
* @param istate Interrupted state saved on the stack.
*/
void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
{
331,6 → 332,26
}
}
 
/** Print TLB entry (for debugging purposes).
*
* The diag field has been left out in order to make this function more generic
* (there is no diag field in US3 architeture).
*
* @param i TLB entry number
* @param t TLB entry tag
* @param d TLB entry data
*/
static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
{
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
#if defined (US)
 
/** Print contents of both TLBs. */
void tlb_print(void)
{
342,12 → 363,7
for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
d.value = itlb_data_access_read(i);
t.value = itlb_tag_read_read(i);
 
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
print_tlb_entry(i, t, d);
}
 
printf("D-TLB contents:\n");
354,16 → 370,57
for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
d.value = dtlb_data_access_read(i);
t.value = dtlb_tag_read_read(i);
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
print_tlb_entry(i, t, d);
}
}
 
#elif defined (US3)
 
/** Print contents of all TLBs. */
void tlb_print(void)
{
int i;
tlb_data_t d;
tlb_tag_read_reg_t t;
printf("TLB_ISMALL contents:\n");
for (i = 0; i < tlb_ismall_size(); i++) {
d.value = dtlb_data_access_read(TLB_ISMALL, i);
t.value = dtlb_tag_read_read(TLB_ISMALL, i);
print_tlb_entry(i, t, d);
}
printf("TLB_IBIG contents:\n");
for (i = 0; i < tlb_ibig_size(); i++) {
d.value = dtlb_data_access_read(TLB_IBIG, i);
t.value = dtlb_tag_read_read(TLB_IBIG, i);
print_tlb_entry(i, t, d);
}
printf("TLB_DSMALL contents:\n");
for (i = 0; i < tlb_dsmall_size(); i++) {
d.value = dtlb_data_access_read(TLB_DSMALL, i);
t.value = dtlb_tag_read_read(TLB_DSMALL, i);
print_tlb_entry(i, t, d);
}
printf("TLB_DBIG_1 contents:\n");
for (i = 0; i < tlb_dbig_size(); i++) {
d.value = dtlb_data_access_read(TLB_DBIG_0, i);
t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
print_tlb_entry(i, t, d);
}
printf("TLB_DBIG_2 contents:\n");
for (i = 0; i < tlb_dbig_size(); i++) {
d.value = dtlb_data_access_read(TLB_DBIG_1, i);
t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
print_tlb_entry(i, t, d);
}
}
 
#endif
 
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str)
{
411,30 → 468,71
sfsr.value = dtlb_sfsr_read();
sfar = dtlb_sfar_read();
#if defined (US)
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
#elif defined (US3)
printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
"w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
#endif
printf("DTLB SFAR: address=%p\n", sfar);
dtlb_sfsr_write(0);
}
 
#if defined (US3)
/** Invalidates given TLB entry if and only if it is non-locked or global.
*
* @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1,
* TLB_ISMALL, TLB_IBIG).
* @param entry Entry index within the given TLB.
*/
static void tlb_invalidate_entry(int tlb, index_t entry)
{
tlb_data_t d;
tlb_tag_read_reg_t t;
if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) {
d.value = dtlb_data_access_read(tlb, entry);
if (!d.l || d.g) {
t.value = dtlb_tag_read_read(tlb, entry);
d.v = false;
dtlb_tag_access_write(t.value);
dtlb_data_access_write(tlb, entry, d.value);
}
} else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) {
d.value = itlb_data_access_read(tlb, entry);
if (!d.l || d.g) {
t.value = itlb_tag_read_read(tlb, entry);
d.v = false;
itlb_tag_access_write(t.value);
itlb_data_access_write(tlb, entry, d.value);
}
}
}
#endif
 
/** Invalidate all unlocked ITLB and DTLB entries. */
void tlb_invalidate_all(void)
{
int i;
tlb_data_t d;
tlb_tag_read_reg_t t;
 
/*
* Walk all ITLB and DTLB entries and remove all unlocked mappings.
*
* The kernel doesn't use global mappings so any locked global mappings
* found must have been created by someone else. Their only purpose now
* found must have been created by someone else. Their only purpose now
* is to collide with proper mappings. Invalidate immediately. It should
* be safe to invalidate them as late as now.
*/
 
#if defined (US)
tlb_data_t d;
tlb_tag_read_reg_t t;
 
for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
d.value = itlb_data_access_read(i);
if (!d.l || d.g) {
444,7 → 542,7
itlb_data_access_write(i, d.value);
}
}
 
for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
d.value = dtlb_data_access_read(i);
if (!d.l || d.g) {
454,7 → 552,21
dtlb_data_access_write(i, d.value);
}
}
 
#elif defined (US3)
 
for (i = 0; i < tlb_ismall_size(); i++)
tlb_invalidate_entry(TLB_ISMALL, i);
for (i = 0; i < tlb_ibig_size(); i++)
tlb_invalidate_entry(TLB_IBIG, i);
for (i = 0; i < tlb_dsmall_size(); i++)
tlb_invalidate_entry(TLB_DSMALL, i);
for (i = 0; i < tlb_dbig_size(); i++)
tlb_invalidate_entry(TLB_DBIG_0, i);
for (i = 0; i < tlb_dbig_size(); i++)
tlb_invalidate_entry(TLB_DBIG_1, i);
#endif
 
}
 
/** Invalidate all ITLB and DTLB entries that belong to specified ASID
484,9 → 596,9
/** Invalidate all ITLB and DTLB entries for specified page range in specified
* address space.
*
* @param asid Address Space ID.
* @param page First page which to sweep out from ITLB and DTLB.
* @param cnt Number of ITLB and DTLB entries to invalidate.
* @param asid Address Space ID.
* @param page First page which to sweep out from ITLB and DTLB.
* @param cnt Number of ITLB and DTLB entries to invalidate.
*/
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
{
/branches/dynload/kernel/arch/sparc64/src/mm/as.c
164,7 → 164,25
itsb_base_write(tsb_base.value);
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
#if defined (US3)
/*
* Clear the extension registers.
* In HelenOS, primary and secondary context registers contain
* equal values and kernel misses (context 0, ie. the nucleus context)
* are excluded from the TSB miss handler, so it makes no sense
* to have separate TSBs for primary, secondary and nucleus contexts.
* Clearing the extension registers will ensure that the value of the
* TSB Base register will be used as an address of TSB, making the code
* compatible with the US port.
*/
itsb_primary_extension_write(0);
itsb_nucleus_extension_write(0);
dtsb_primary_extension_write(0);
dtsb_secondary_extension_write(0);
dtsb_nucleus_extension_write(0);
#endif
#endif
}
 
/** Perform sparc64-specific tasks when an address space is removed from the
/branches/dynload/kernel/arch/sparc64/src/mm/cache.S
47,45 → 47,3
retl
! beware SF Erratum #51, do not put the MEMBAR here
nop
 
/** Flush only D-cache lines of one virtual color.
*
* @param o0 Virtual color to be flushed.
*/
.global dcache_flush_color
dcache_flush_color:
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1
set DCACHE_SIZE / 2, %g2
sllx %g2, %o0, %g2
sub %g2, DCACHE_LINE_SIZE, %g2
0: stxa %g0, [%g2] ASI_DCACHE_TAG
membar #Sync
subcc %g1, 1, %g1
bnz,pt %xcc, 0b
sub %g2, DCACHE_LINE_SIZE, %g2
retl
nop
 
/** Flush only D-cache lines of one virtual color and one tag.
*
* @param o0 Virtual color to lookup the tag.
* @param o1 Tag of the cachelines to be flushed.
*/
.global dcache_flush_tag
dcache_flush_tag:
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1
set DCACHE_SIZE / 2, %g2
sllx %g2, %o0, %g2
sub %g2, DCACHE_LINE_SIZE, %g2
0: ldxa [%g2] ASI_DCACHE_TAG, %g3
srlx %g3, DCACHE_TAG_SHIFT, %g3
cmp %g3, %o1
bnz 1f
nop
stxa %g0, [%g2] ASI_DCACHE_TAG
membar #Sync
1: subcc %g1, 1, %g1
bnz,pt %xcc, 0b
sub %g2, DCACHE_LINE_SIZE, %g2
retl
nop
/branches/dynload/kernel/arch/sparc64/src/mm/tsb.c
112,9 → 112,9
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
tsb->data.p = t->k; /* p as privileged */
tsb->data.v = t->p;
tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */
tsb->data.p = t->k; /* p as privileged, k as kernel */
tsb->data.v = t->p; /* v as valid, p as present */
write_barrier();
173,3 → 173,4
 
/** @}
*/
 
/branches/dynload/kernel/arch/sparc64/src/mm/page.c
52,7 → 52,7
uintptr_t virt_page;
uintptr_t phys_page;
int pagesize_code;
} bsp_locked_dtlb_entry[DTLB_ENTRY_COUNT];
} bsp_locked_dtlb_entry[DTLB_MAX_LOCKED_ENTRIES];
 
/** Number of entries in bsp_locked_dtlb_entry array. */
static count_t bsp_locked_dtlb_entries = 0;
166,3 → 166,4
 
/** @}
*/
 
/branches/dynload/kernel/arch/sparc64/src/drivers/fhc.c
101,8 → 101,9
}
}
 
void fhc_clear_interrupt(fhc_t *fhc, int inr)
void fhc_clear_interrupt(void *fhcp, int inr)
{
fhc_t *fhc = (fhc_t *)fhcp;
ASSERT(fhc->uart_imap);
 
switch (inr) {
/branches/dynload/kernel/arch/sparc64/src/drivers/kbd.c
63,6 → 63,8
uintptr_t aligned_addr;
ofw_tree_property_t *prop;
const char *name;
cir_t cir;
void *cir_arg;
name = ofw_tree_node_name(node);
103,11 → 105,14
switch (kbd_type) {
case KBD_Z8530:
size = ((ofw_fhc_reg_t *) prop->value)->size;
if (!ofw_fhc_apply_ranges(node->parent, ((ofw_fhc_reg_t *) prop->value) , &pa)) {
if (!ofw_fhc_apply_ranges(node->parent,
((ofw_fhc_reg_t *) prop->value), &pa)) {
printf("Failed to determine keyboard address.\n");
return;
}
if (!ofw_fhc_map_interrupt(node->parent, ((ofw_fhc_reg_t *) prop->value), interrupts, &inr)) {
if (!ofw_fhc_map_interrupt(node->parent,
((ofw_fhc_reg_t *) prop->value), interrupts, &inr, &cir,
&cir_arg)) {
printf("Failed to determine keyboard interrupt.\n");
return;
}
115,11 → 120,14
case KBD_NS16550:
size = ((ofw_ebus_reg_t *) prop->value)->size;
if (!ofw_ebus_apply_ranges(node->parent, ((ofw_ebus_reg_t *) prop->value) , &pa)) {
if (!ofw_ebus_apply_ranges(node->parent,
((ofw_ebus_reg_t *) prop->value), &pa)) {
printf("Failed to determine keyboard address.\n");
return;
}
if (!ofw_ebus_map_interrupt(node->parent, ((ofw_ebus_reg_t *) prop->value), interrupts, &inr)) {
if (!ofw_ebus_map_interrupt(node->parent,
((ofw_ebus_reg_t *) prop->value), interrupts, &inr, &cir,
&cir_arg)) {
printf("Failed to determine keyboard interrupt.\n");
return;
};
142,16 → 150,17
switch (kbd_type) {
#ifdef CONFIG_Z8530
case KBD_Z8530:
z8530_init(devno, inr, vaddr);
z8530_init(devno, vaddr, inr, cir, cir_arg);
break;
#endif
#ifdef CONFIG_NS16550
case KBD_NS16550:
ns16550_init(devno, inr, (ioport_t)vaddr);
ns16550_init(devno, (ioport_t)vaddr, inr, cir, cir_arg);
break;
#endif
default:
printf("Kernel is not compiled with the necessary keyboard driver this machine requires.\n");
printf("Kernel is not compiled with the necessary keyboard "
"driver this machine requires.\n");
}
}
 
/branches/dynload/kernel/arch/sparc64/src/drivers/scr.c
55,6 → 55,10
void scr_init(ofw_tree_node_t *node)
{
ofw_tree_property_t *prop;
ofw_pci_reg_t *pci_reg;
ofw_pci_reg_t pci_abs_reg;
ofw_upa_reg_t *upa_reg;
ofw_sbus_reg_t *sbus_reg;
const char *name;
name = ofw_tree_node_name(node);
61,6 → 65,8
if (strcmp(name, "SUNW,m64B") == 0)
scr_type = SCR_ATYFB;
else if (strcmp(name, "SUNW,XVR-100") == 0)
scr_type = SCR_XVR;
else if (strcmp(name, "SUNW,ffb") == 0)
scr_type = SCR_FFB;
else if (strcmp(name, "cgsix") == 0)
67,7 → 73,7
scr_type = SCR_CGSIX;
if (scr_type == SCR_UNKNOWN) {
printf("Unknown keyboard device.\n");
printf("Unknown screen device.\n");
return;
}
106,15 → 112,15
return;
}
ofw_pci_reg_t *fb_reg = &((ofw_pci_reg_t *) prop->value)[1];
ofw_pci_reg_t abs_reg;
pci_reg = &((ofw_pci_reg_t *) prop->value)[1];
if (!ofw_pci_reg_absolutize(node, fb_reg, &abs_reg)) {
if (!ofw_pci_reg_absolutize(node, pci_reg, &pci_abs_reg)) {
printf("Failed to absolutize fb register.\n");
return;
}
if (!ofw_pci_apply_ranges(node->parent, &abs_reg , &fb_addr)) {
if (!ofw_pci_apply_ranges(node->parent, &pci_abs_reg,
&fb_addr)) {
printf("Failed to determine screen address.\n");
return;
}
142,12 → 148,54
}
break;
case SCR_XVR:
if (prop->size / sizeof(ofw_pci_reg_t) < 2) {
printf("Too few screen registers.\n");
return;
}
pci_reg = &((ofw_pci_reg_t *) prop->value)[1];
if (!ofw_pci_reg_absolutize(node, pci_reg, &pci_abs_reg)) {
printf("Failed to absolutize fb register.\n");
return;
}
if (!ofw_pci_apply_ranges(node->parent, &pci_abs_reg,
&fb_addr)) {
printf("Failed to determine screen address.\n");
return;
}
 
switch (fb_depth) {
case 8:
fb_scanline = fb_linebytes * (fb_depth >> 3);
visual = VISUAL_SB1500_PALETTE;
break;
case 16:
fb_scanline = fb_linebytes * (fb_depth >> 3);
visual = VISUAL_RGB_5_6_5;
break;
case 24:
fb_scanline = fb_linebytes * 4;
visual = VISUAL_RGB_8_8_8_0;
break;
case 32:
fb_scanline = fb_linebytes * (fb_depth >> 3);
visual = VISUAL_RGB_0_8_8_8;
break;
default:
printf("Unsupported bits per pixel.\n");
return;
}
break;
case SCR_FFB:
fb_scanline = 8192;
visual = VISUAL_BGR_0_8_8_8;
 
ofw_upa_reg_t *reg = &((ofw_upa_reg_t *) prop->value)[FFB_REG_24BPP];
if (!ofw_upa_apply_ranges(node->parent, reg, &fb_addr)) {
upa_reg = &((ofw_upa_reg_t *) prop->value)[FFB_REG_24BPP];
if (!ofw_upa_apply_ranges(node->parent, upa_reg, &fb_addr)) {
printf("Failed to determine screen address.\n");
return;
}
164,8 → 212,8
return;
}
ofw_sbus_reg_t *cg6_reg = &((ofw_sbus_reg_t *) prop->value)[0];
if (!ofw_sbus_apply_ranges(node->parent, cg6_reg, &fb_addr)) {
sbus_reg = &((ofw_sbus_reg_t *) prop->value)[0];
if (!ofw_sbus_apply_ranges(node->parent, sbus_reg, &fb_addr)) {
printf("Failed to determine screen address.\n");
return;
}
175,7 → 223,15
panic("Unexpected type.\n");
}
 
fb_init(fb_addr, fb_width, fb_height, fb_scanline, visual);
fb_properties_t props = {
.addr = fb_addr,
.offset = 0,
.x = fb_width,
.y = fb_height,
.scan = fb_scanline,
.visual = visual,
};
fb_init(&props);
}
 
/** @}
/branches/dynload/kernel/arch/sparc64/src/drivers/tick.c
45,11 → 45,12
 
#define TICK_RESTART_TIME 50 /* Worst case estimate. */
 
/** Initialize tick interrupt. */
/** Initialize tick and stick interrupt. */
void tick_init(void)
{
/* initialize TICK interrupt */
tick_compare_reg_t compare;
 
interrupt_register(14, "tick_int", tick_interrupt);
compare.int_dis = false;
compare.tick_cmpr = CPU->arch.clock_frequency / HZ;
56,6 → 57,21
CPU->arch.next_tick_cmpr = compare.tick_cmpr;
tick_compare_write(compare.value);
tick_write(0);
 
#if defined (US3)
/* disable STICK interrupts and clear any pending ones */
tick_compare_reg_t stick_compare;
softint_reg_t clear;
 
stick_compare.value = stick_compare_read();
stick_compare.int_dis = true;
stick_compare.tick_cmpr = 0;
stick_compare_write(stick_compare.value);
 
clear.value = 0;
clear.stick_int = 1;
clear_softint_write(clear.value);
#endif
}
 
/** Process tick interrupt.
67,7 → 83,7
{
softint_reg_t softint, clear;
uint64_t drift;
 
softint.value = softint_read();
/*
/branches/dynload/kernel/arch/sparc64/src/drivers/sgcn.c
0,0 → 1,450
/*
* Copyright (c) 2008 Pavel Rimsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64
* @{
*/
/**
* @file
* @brief SGCN driver.
*/
 
#include <arch/drivers/sgcn.h>
#include <arch/drivers/kbd.h>
#include <genarch/ofw/ofw_tree.h>
#include <debug.h>
#include <func.h>
#include <print.h>
#include <mm/page.h>
#include <ipc/irq.h>
#include <ddi/ddi.h>
#include <ddi/device.h>
#include <console/chardev.h>
#include <console/console.h>
#include <ddi/device.h>
#include <sysinfo/sysinfo.h>
#include <synch/spinlock.h>
 
/*
* Physical address at which the SBBC starts. This value has been obtained
* by inspecting (using Simics) memory accesses made by OBP. It is valid
* for the Simics-simulated Serengeti machine. The author of this code is
* not sure whether this value is valid generally.
*/
#define SBBC_START 0x63000000000
 
/* offset of SRAM within the SBBC memory */
#define SBBC_SRAM_OFFSET 0x900000
 
/* size (in bytes) of the physical memory area which will be mapped */
#define MAPPED_AREA_SIZE (128 * 1024)
 
/* magic string contained at the beginning of SRAM */
#define SRAM_TOC_MAGIC "TOCSRAM"
 
/*
* Key into the SRAM table of contents which identifies the entry
* describing the OBP console buffer. It is worth mentioning
* that the OBP console buffer is not the only console buffer
* which can be used. It is, however, used because when the kernel
* is running, the OBP buffer is not used by OBP any more but OBP
* has already made neccessary arangements so that the output will
* be read from the OBP buffer and input will go to the OBP buffer.
* Therefore HelenOS needs to make no such arrangements any more.
*/
#define CONSOLE_KEY "OBPCONS"
 
/* magic string contained at the beginning of the console buffer */
#define SGCN_BUFFER_MAGIC "CON"
 
/**
* The driver is polling based, but in order to notify the userspace
* of a key being pressed, we need to supply the interface with some
* interrupt number. The interrupt number can be arbitrary as it it
* will never be used for identifying HW interrupts, but only in
* notifying the userspace.
*/
#define FICTIONAL_INR 1
 
 
/*
* Returns a pointer to the object of a given type which is placed at the given
* offset from the SRAM beginning.
*/
#define SRAM(type, offset) ((type *) (sram_begin + (offset)))
 
/* Returns a pointer to the SRAM table of contents. */
#define SRAM_TOC (SRAM(iosram_toc_t, 0))
 
/*
* Returns a pointer to the object of a given type which is placed at the given
* offset from the console buffer beginning.
*/
#define SGCN_BUFFER(type, offset) \
((type *) (sgcn_buffer_begin + (offset)))
 
/** Returns a pointer to the console buffer header. */
#define SGCN_BUFFER_HEADER (SGCN_BUFFER(sgcn_buffer_header_t, 0))
 
/** defined in drivers/kbd.c */
extern kbd_type_t kbd_type;
 
/** starting address of SRAM, will be set by the init_sram_begin function */
static uintptr_t sram_begin;
 
/**
* starting address of the SGCN buffer, will be set by the
* init_sgcn_buffer_begin function
*/
static uintptr_t sgcn_buffer_begin;
 
/**
* SGCN IRQ structure. So far used only for notifying the userspace of the
* key being pressed, not for kernel being informed about keyboard interrupts.
*/
static irq_t sgcn_irq;
 
// TODO think of a way how to synchronize accesses to SGCN buffer between the kernel and the userspace
 
/*
* Ensures that writing to the buffer and consequent update of the write pointer
* are together one atomic operation.
*/
SPINLOCK_INITIALIZE(sgcn_output_lock);
 
/*
* Prevents the input buffer read/write pointers from getting to inconsistent
* state.
*/
SPINLOCK_INITIALIZE(sgcn_input_lock);
 
 
/* functions referenced from definitions of I/O operations structures */
static void sgcn_noop(chardev_t *);
static void sgcn_putchar(chardev_t *, const char);
static char sgcn_key_read(chardev_t *);
 
/** character device operations */
static chardev_operations_t sgcn_ops = {
.suspend = sgcn_noop,
.resume = sgcn_noop,
.read = sgcn_key_read,
.write = sgcn_putchar
};
 
/** SGCN character device */
chardev_t sgcn_io;
 
/**
* Registers the physical area of the SRAM so that the userspace SGCN
* driver can map it. Moreover, it sets some sysinfo values (SRAM address
* and SRAM size).
*/
static void register_sram_parea(uintptr_t sram_begin_physical)
{
static parea_t sram_parea;
sram_parea.pbase = sram_begin_physical;
sram_parea.vbase = (uintptr_t) sram_begin;
sram_parea.frames = MAPPED_AREA_SIZE / FRAME_SIZE;
sram_parea.cacheable = false;
ddi_parea_register(&sram_parea);
sysinfo_set_item_val("sram.area.size", NULL, MAPPED_AREA_SIZE);
sysinfo_set_item_val("sram.address.physical", NULL,
sram_begin_physical);
}
 
/**
* Initializes the starting address of SRAM.
*
* The SRAM starts 0x900000 + C bytes behind the SBBC start in the
* physical memory, where C is the value read from the "iosram-toc"
* property of the "/chosen" OBP node. The sram_begin variable will
* be set to the virtual address which maps to the SRAM physical
* address.
*
* It also registers the physical area of SRAM and sets some sysinfo
* values (SRAM address and SRAM size).
*/
static void init_sram_begin(void)
{
ofw_tree_node_t *chosen;
ofw_tree_property_t *iosram_toc;
uintptr_t sram_begin_physical;
 
chosen = ofw_tree_lookup("/chosen");
if (!chosen)
panic("Can't find /chosen.\n");
 
iosram_toc = ofw_tree_getprop(chosen, "iosram-toc");
if (!iosram_toc)
panic("Can't find property \"iosram-toc\".\n");
if (!iosram_toc->value)
panic("Can't find SRAM TOC.\n");
 
sram_begin_physical = SBBC_START + SBBC_SRAM_OFFSET
+ *((uint32_t *) iosram_toc->value);
sram_begin = hw_map(sram_begin_physical, MAPPED_AREA_SIZE);
register_sram_parea(sram_begin_physical);
}
 
/**
* Initializes the starting address of the SGCN buffer.
*
* The offset of the SGCN buffer within SRAM is obtained from the
* SRAM table of contents. The table of contents contains
* information about several buffers, among which there is an OBP
* console buffer - this one will be used as the SGCN buffer.
*
* This function also writes the offset of the SGCN buffer within SRAM
* under the sram.buffer.offset sysinfo key.
*/
static void sgcn_buffer_begin_init(void)
{
init_sram_begin();
ASSERT(strcmp(SRAM_TOC->magic, SRAM_TOC_MAGIC) == 0);
/* lookup TOC entry with the correct key */
uint32_t i;
for (i = 0; i < MAX_TOC_ENTRIES; i++) {
if (strcmp(SRAM_TOC->keys[i].key, CONSOLE_KEY) == 0)
break;
}
ASSERT(i < MAX_TOC_ENTRIES);
sgcn_buffer_begin = sram_begin + SRAM_TOC->keys[i].offset;
sysinfo_set_item_val("sram.buffer.offset", NULL,
SRAM_TOC->keys[i].offset);
}
 
/**
* Default suspend/resume operation for the input device.
*/
static void sgcn_noop(chardev_t *d)
{
}
 
/**
* Writes a single character to the SGCN (circular) output buffer
* and updates the output write pointer so that SGCN gets to know
* that the character has been written.
*/
static void sgcn_do_putchar(const char c)
{
uint32_t begin = SGCN_BUFFER_HEADER->out_begin;
uint32_t end = SGCN_BUFFER_HEADER->out_end;
uint32_t size = end - begin;
/* we need pointers to volatile variables */
volatile char *buf_ptr = (volatile char *)
SGCN_BUFFER(char, SGCN_BUFFER_HEADER->out_wrptr);
volatile uint32_t *out_wrptr_ptr = &(SGCN_BUFFER_HEADER->out_wrptr);
volatile uint32_t *out_rdptr_ptr = &(SGCN_BUFFER_HEADER->out_rdptr);
 
/*
* Write the character and increment the write pointer modulo the
* output buffer size. Note that if we are to rewrite a character
* which has not been read by the SGCN controller yet (i.e. the output
* buffer is full), we need to wait until the controller reads some more
* characters. We wait actively, which means that all threads waiting
* for the lock are blocked. However, this situation is
* 1) rare - the output buffer is big, so filling the whole
* output buffer is improbable
* 2) short-lasting - it will take the controller only a fraction
* of millisecond to pick the unread characters up
* 3) not serious - the blocked threads are those that print something
* to user console, which is not a time-critical operation
*/
uint32_t new_wrptr = (((*out_wrptr_ptr) - begin + 1) % size) + begin;
while (*out_rdptr_ptr == new_wrptr)
;
*buf_ptr = c;
*out_wrptr_ptr = new_wrptr;
}
 
/**
* SGCN output operation. Prints a single character to the SGCN. If the line
* feed character is written ('\n'), the carriage return character ('\r') is
* written straight away.
*/
static void sgcn_putchar(struct chardev * cd, const char c)
{
spinlock_lock(&sgcn_output_lock);
sgcn_do_putchar(c);
if (c == '\n') {
sgcn_do_putchar('\r');
}
spinlock_unlock(&sgcn_output_lock);
}
 
/**
* Called when actively reading the character. Not implemented yet.
*/
static char sgcn_key_read(chardev_t *d)
{
return (char) 0;
}
 
/**
* The driver works in polled mode, so no interrupt should be handled by it.
*/
static irq_ownership_t sgcn_claim(void)
{
return IRQ_DECLINE;
}
 
/**
* The driver works in polled mode, so no interrupt should be handled by it.
*/
static void sgcn_irq_handler(irq_t *irq, void *arg, ...)
{
panic("Not yet implemented, SGCN works in polled mode.\n");
}
 
/**
* Grabs the input for kernel.
*/
void sgcn_grab(void)
{
ipl_t ipl = interrupts_disable();
volatile uint32_t *in_wrptr_ptr = &(SGCN_BUFFER_HEADER->in_wrptr);
volatile uint32_t *in_rdptr_ptr = &(SGCN_BUFFER_HEADER->in_rdptr);
/* skip all the user typed before the grab and hasn't been processed */
spinlock_lock(&sgcn_input_lock);
*in_rdptr_ptr = *in_wrptr_ptr;
spinlock_unlock(&sgcn_input_lock);
 
spinlock_lock(&sgcn_irq.lock);
sgcn_irq.notif_cfg.notify = false;
spinlock_unlock(&sgcn_irq.lock);
interrupts_restore(ipl);
}
 
/**
* Releases the input so that userspace can use it.
*/
void sgcn_release(void)
{
ipl_t ipl = interrupts_disable();
spinlock_lock(&sgcn_irq.lock);
if (sgcn_irq.notif_cfg.answerbox)
sgcn_irq.notif_cfg.notify = true;
spinlock_unlock(&sgcn_irq.lock);
interrupts_restore(ipl);
}
 
/**
* Function regularly called by the keyboard polling thread. Finds out whether
* there are some unread characters in the input queue. If so, it picks them up
* and sends them to the upper layers of HelenOS.
*/
void sgcn_poll(void)
{
uint32_t begin = SGCN_BUFFER_HEADER->in_begin;
uint32_t end = SGCN_BUFFER_HEADER->in_end;
uint32_t size = end - begin;
spinlock_lock(&sgcn_input_lock);
ipl_t ipl = interrupts_disable();
spinlock_lock(&sgcn_irq.lock);
/* we need pointers to volatile variables */
volatile char *buf_ptr = (volatile char *)
SGCN_BUFFER(char, SGCN_BUFFER_HEADER->in_rdptr);
volatile uint32_t *in_wrptr_ptr = &(SGCN_BUFFER_HEADER->in_wrptr);
volatile uint32_t *in_rdptr_ptr = &(SGCN_BUFFER_HEADER->in_rdptr);
if (*in_rdptr_ptr != *in_wrptr_ptr) {
if (sgcn_irq.notif_cfg.notify && sgcn_irq.notif_cfg.answerbox) {
ipc_irq_send_notif(&sgcn_irq);
spinlock_unlock(&sgcn_irq.lock);
interrupts_restore(ipl);
spinlock_unlock(&sgcn_input_lock);
return;
}
}
spinlock_unlock(&sgcn_irq.lock);
interrupts_restore(ipl);
 
while (*in_rdptr_ptr != *in_wrptr_ptr) {
buf_ptr = (volatile char *)
SGCN_BUFFER(char, SGCN_BUFFER_HEADER->in_rdptr);
char c = *buf_ptr;
*in_rdptr_ptr = (((*in_rdptr_ptr) - begin + 1) % size) + begin;
if (c == '\r') {
c = '\n';
}
chardev_push_character(&sgcn_io, c);
}
spinlock_unlock(&sgcn_input_lock);
}
 
/**
* A public function which initializes I/O from/to Serengeti console
* and sets it as a default input/output.
*/
void sgcn_init(void)
{
sgcn_buffer_begin_init();
 
kbd_type = KBD_SGCN;
 
devno_t devno = device_assign_devno();
irq_initialize(&sgcn_irq);
sgcn_irq.devno = devno;
sgcn_irq.inr = FICTIONAL_INR;
sgcn_irq.claim = sgcn_claim;
sgcn_irq.handler = sgcn_irq_handler;
irq_register(&sgcn_irq);
sysinfo_set_item_val("kbd", NULL, true);
sysinfo_set_item_val("kbd.type", NULL, KBD_SGCN);
sysinfo_set_item_val("kbd.devno", NULL, devno);
sysinfo_set_item_val("kbd.inr", NULL, FICTIONAL_INR);
sysinfo_set_item_val("fb.kind", NULL, 4);
chardev_initialize("sgcn_io", &sgcn_io, &sgcn_ops);
stdin = &sgcn_io;
stdout = &sgcn_io;
}
 
/** @}
*/
/branches/dynload/kernel/arch/sparc64/src/drivers/pci.c
45,40 → 45,37
#include <func.h>
#include <arch/asm.h>
 
#define PCI_SABRE_REGS_REG 0
#define SABRE_INTERNAL_REG 0
#define PSYCHO_INTERNAL_REG 2
 
#define PCI_SABRE_IMAP_BASE 0x200
#define PCI_SABRE_ICLR_BASE 0x300
#define OBIO_IMR_BASE 0x200
#define OBIO_IMR(ino) (OBIO_IMR_BASE + ((ino) & INO_MASK))
 
#define PCI_PSYCHO_REGS_REG 2
#define OBIO_CIR_BASE 0x300
#define OBIO_CIR(ino) (OBIO_CIR_BASE + ((ino) & INO_MASK))
 
#define PCI_PSYCHO_IMAP_BASE 0x200
#define PCI_PSYCHO_ICLR_BASE 0x300
static void obio_enable_interrupt(pci_t *, int);
static void obio_clear_interrupt(pci_t *, int);
 
static pci_t *pci_sabre_init(ofw_tree_node_t *node);
static void pci_sabre_enable_interrupt(pci_t *pci, int inr);
static void pci_sabre_clear_interrupt(pci_t *pci, int inr);
static pci_t *pci_sabre_init(ofw_tree_node_t *);
static pci_t *pci_psycho_init(ofw_tree_node_t *);
 
static pci_t *pci_psycho_init(ofw_tree_node_t *node);
static void pci_psycho_enable_interrupt(pci_t *pci, int inr);
static void pci_psycho_clear_interrupt(pci_t *pci, int inr);
 
/** PCI operations for Sabre model. */
static pci_operations_t pci_sabre_ops = {
.enable_interrupt = pci_sabre_enable_interrupt,
.clear_interrupt = pci_sabre_clear_interrupt
.enable_interrupt = obio_enable_interrupt,
.clear_interrupt = obio_clear_interrupt
};
/** PCI operations for Psycho model. */
static pci_operations_t pci_psycho_ops = {
.enable_interrupt = pci_psycho_enable_interrupt,
.clear_interrupt = pci_psycho_clear_interrupt
.enable_interrupt = obio_enable_interrupt,
.clear_interrupt = obio_clear_interrupt
};
 
/** Initialize PCI controller (model Sabre).
*
* @param node OpenFirmware device tree node of the Sabre.
* @param node OpenFirmware device tree node of the Sabre.
*
* @return Address of the initialized PCI structure.
* @return Address of the initialized PCI structure.
*/
pci_t *pci_sabre_init(ofw_tree_node_t *node)
{
95,11 → 92,12
ofw_upa_reg_t *reg = prop->value;
count_t regs = prop->size / sizeof(ofw_upa_reg_t);
 
if (regs < PCI_SABRE_REGS_REG + 1)
if (regs < SABRE_INTERNAL_REG + 1)
return NULL;
 
uintptr_t paddr;
if (!ofw_upa_apply_ranges(node->parent, &reg[PCI_SABRE_REGS_REG], &paddr))
if (!ofw_upa_apply_ranges(node->parent, &reg[SABRE_INTERNAL_REG],
&paddr))
return NULL;
 
pci = (pci_t *) malloc(sizeof(pci_t), FRAME_ATOMIC);
108,7 → 106,7
 
pci->model = PCI_SABRE;
pci->op = &pci_sabre_ops;
pci->reg = (uint64_t *) hw_map(paddr, reg[PCI_SABRE_REGS_REG].size);
pci->reg = (uint64_t *) hw_map(paddr, reg[SABRE_INTERNAL_REG].size);
 
return pci;
}
116,9 → 114,9
 
/** Initialize the Psycho PCI controller.
*
* @param node OpenFirmware device tree node of the Psycho.
* @param node OpenFirmware device tree node of the Psycho.
*
* @return Address of the initialized PCI structure.
* @return Address of the initialized PCI structure.
*/
pci_t *pci_psycho_init(ofw_tree_node_t *node)
{
135,11 → 133,12
ofw_upa_reg_t *reg = prop->value;
count_t regs = prop->size / sizeof(ofw_upa_reg_t);
 
if (regs < PCI_PSYCHO_REGS_REG + 1)
if (regs < PSYCHO_INTERNAL_REG + 1)
return NULL;
 
uintptr_t paddr;
if (!ofw_upa_apply_ranges(node->parent, &reg[PCI_PSYCHO_REGS_REG], &paddr))
if (!ofw_upa_apply_ranges(node->parent, &reg[PSYCHO_INTERNAL_REG],
&paddr))
return NULL;
 
pci = (pci_t *) malloc(sizeof(pci_t), FRAME_ATOMIC);
148,31 → 147,21
 
pci->model = PCI_PSYCHO;
pci->op = &pci_psycho_ops;
pci->reg = (uint64_t *) hw_map(paddr, reg[PCI_PSYCHO_REGS_REG].size);
pci->reg = (uint64_t *) hw_map(paddr, reg[PSYCHO_INTERNAL_REG].size);
 
return pci;
}
 
void pci_sabre_enable_interrupt(pci_t *pci, int inr)
void obio_enable_interrupt(pci_t *pci, int inr)
{
pci->reg[PCI_SABRE_IMAP_BASE + (inr & INO_MASK)] |= IMAP_V_MASK;
pci->reg[OBIO_IMR(inr & INO_MASK)] |= IMAP_V_MASK;
}
 
void pci_sabre_clear_interrupt(pci_t *pci, int inr)
void obio_clear_interrupt(pci_t *pci, int inr)
{
pci->reg[PCI_SABRE_ICLR_BASE + (inr & INO_MASK)] = 0;
pci->reg[OBIO_CIR(inr & INO_MASK)] = 0; /* set IDLE */
}
 
void pci_psycho_enable_interrupt(pci_t *pci, int inr)
{
pci->reg[PCI_PSYCHO_IMAP_BASE + (inr & INO_MASK)] |= IMAP_V_MASK;
}
 
void pci_psycho_clear_interrupt(pci_t *pci, int inr)
{
pci->reg[PCI_PSYCHO_ICLR_BASE + (inr & INO_MASK)] = 0;
}
 
/** Initialize PCI controller. */
pci_t *pci_init(ofw_tree_node_t *node)
{
215,14 → 204,14
 
void pci_enable_interrupt(pci_t *pci, int inr)
{
ASSERT(pci->model);
ASSERT(pci->op && pci->op->enable_interrupt);
pci->op->enable_interrupt(pci, inr);
}
 
void pci_clear_interrupt(pci_t *pci, int inr)
void pci_clear_interrupt(void *pcip, int inr)
{
ASSERT(pci->model);
pci_t *pci = (pci_t *)pcip;
 
ASSERT(pci->op && pci->op->clear_interrupt);
pci->op->clear_interrupt(pci, inr);
}
/branches/dynload/kernel/arch/sparc64/src/start.S
27,6 → 27,7
#
 
#include <arch/arch.h>
#include <arch/cpu.h>
#include <arch/regdef.h>
#include <arch/boot/boot.h>
#include <arch/stack.h>
47,6 → 48,16
#define BSP_FLAG 1
 
/*
* 2^PHYSMEM_ADDR_SIZE is the size of the physical address space on
* a given processor.
*/
#if defined (US)
#define PHYSMEM_ADDR_SIZE 41
#elif defined (US3)
#define PHYSMEM_ADDR_SIZE 43
#endif
 
/*
* Here is where the kernel is passed control from the boot loader.
*
* The registers are expected to be in this state:
67,11 → 78,13
and %o0, %l0, %l7 ! l7 <= bootstrap processor?
andn %o0, %l0, %l6 ! l6 <= start of physical memory
 
! Get bits 40:13 of physmem_base.
! Get bits (PHYSMEM_ADDR_SIZE - 1):13 of physmem_base.
srlx %l6, 13, %l5
sllx %l5, 13 + (63 - 40), %l5
srlx %l5, 63 - 40, %l5 ! l5 <= physmem_base[40:13]
! l5 <= physmem_base[(PHYSMEM_ADDR_SIZE - 1):13]
sllx %l5, 13 + (63 - (PHYSMEM_ADDR_SIZE - 1)), %l5
srlx %l5, 63 - (PHYSMEM_ADDR_SIZE - 1), %l5
/*
* Setup basic runtime environment.
*/
83,6 → 96,8
! consistent
wrpr %g0, NWINDOWS - 1, %cleanwin ! prevent needless clean_window
! traps for kernel
wrpr %g0, 0, %wstate ! use default spill/fill trap
 
wrpr %g0, 0, %tl ! TL = 0, primary context
! register is used
244,7 → 259,8
 
/*
* Precompute kernel 8K TLB data template.
* %l5 contains starting physical address bits [40:13]
* %l5 contains starting physical address
* bits [(PHYSMEM_ADDR_SIZE - 1):13]
*/
sethi %hi(kernel_8k_tlb_data_template), %l4
ldx [%l4 + %lo(kernel_8k_tlb_data_template)], %l3
282,15 → 298,32
nop
 
 
1:
#ifdef CONFIG_SMP
/*
* Determine the width of the MID and save its mask to %g3. The width
* is
* * 5 for US and US-IIIi,
* * 10 for US3 except US-IIIi.
*/
#if defined(US)
mov 0x1f, %g3
#elif defined(US3)
mov 0x3ff, %g3
rdpr %ver, %g2
sllx %g2, 16, %g2
srlx %g2, 48, %g2
cmp %g2, IMPL_ULTRASPARCIII_I
move %xcc, 0x1f, %g3
#endif
 
/*
* Read MID from the processor.
*/
1:
ldxa [%g0] ASI_UPA_CONFIG, %g1
srlx %g1, UPA_CONFIG_MID_SHIFT, %g1
and %g1, UPA_CONFIG_MID_MASK, %g1
ldxa [%g0] ASI_ICBUS_CONFIG, %g1
srlx %g1, ICBUS_CONFIG_MID_SHIFT, %g1
and %g1, %g3, %g1
 
#ifdef CONFIG_SMP
/*
* Active loop for APs until the BSP picks them up. A processor cannot
* leave the loop until the global variable 'waking_up_mid' equals its
/branches/dynload/kernel/arch/ia64/include/interrupt.h
50,10 → 50,13
#define IVT_FIRST 0
 
/** External Interrupt vectors. */
 
#define VECTOR_TLB_SHOOTDOWN_IPI 0xf0
#define INTERRUPT_TIMER 255
#define IRQ_KBD 241
#define IRQ_MOUSE 252
#define IRQ_KBD (0x01+LAGACY_INTERRUPT_BASE)
#define IRQ_MOUSE (0x0c+LAGACY_INTERRUPT_BASE)
#define INTERRUPT_SPURIOUS 15
#define LAGACY_INTERRUPT_BASE 0x20
 
/** General Exception codes. */
#define GE_ILLEGALOP 0
150,6 → 153,7
extern void external_interrupt(uint64_t vector, istate_t *istate);
extern void disabled_fp_register(uint64_t vector, istate_t *istate);
 
 
#endif
 
/** @}
/branches/dynload/kernel/arch/ia64/include/proc/task.h
31,14 → 31,19
*/
/** @file
*/
#include <proc/task.h>
 
#ifndef KERN_ia64_TASK_H_
#define KERN_ia64_TASK_H_
 
#include <adt/bitmap.h>
 
typedef struct {
bitmap_t *iomap;
} task_arch_t;
 
#define task_create_arch(t)
 
#define task_create_arch(t) {(t)->arch.iomap=NULL;}
#define task_destroy_arch(t)
 
#endif
/branches/dynload/kernel/arch/ia64/include/bootinfo.h
33,6 → 33,13
 
#define CONFIG_INIT_TASKS 32
 
#define MEMMAP_ITEMS 128
 
#define EFI_MEMMAP_FREE_MEM 0
#define EFI_MEMMAP_IO 1
#define EFI_MEMMAP_IO_PORTS 2
 
 
typedef struct {
void *addr;
unsigned long size;
43,14 → 50,24
binit_task_t tasks[CONFIG_INIT_TASKS];
} binit_t;
 
typedef struct {
unsigned int type;
unsigned long base;
unsigned long size;
}efi_memmap_item_t;
 
 
typedef struct {
binit_t taskmap;
 
efi_memmap_item_t memmap[MEMMAP_ITEMS];
unsigned int memmap_items;
 
unsigned long * sapic;
unsigned long sys_freq;
unsigned long freq_scale;
unsigned int wakeup_intno;
int hello_configured;
 
} bootinfo_t;
 
/branches/dynload/kernel/arch/ia64/include/mm/page.h
48,8 → 48,15
#define IO_PAGE_WIDTH 26 /* 64M */
#define FW_PAGE_WIDTH 28 /* 256M */
 
/** Staticly mapped IO spaces */
#define USPACE_IO_PAGE_WIDTH 12 /* 4K */
 
 
 
/** Staticly mapped IO spaces - offsets to 0xe...00 of virtual adresses
becauce of "minimal virtual bits implemented is 51"
it is possible to have here values up to 0x0007000000000000
*/
 
/* Firmware area (bellow 4GB in phys mem) */
#define FW_OFFSET 0x00000000F0000000
/* Legacy IO space */
/branches/dynload/kernel/arch/ia64/include/mm/tlb.h
46,8 → 46,8
/** Data and instruction Translation Register indices. */
#define DTR_KERNEL 0
#define ITR_KERNEL 0
#define DTR_KSTACK1 1
#define DTR_KSTACK2 2
#define DTR_KSTACK1 4
#define DTR_KSTACK2 5
 
/** Portion of TLB insertion format data structure. */
union tlb_entry {
/branches/dynload/kernel/arch/ia64/include/cpu.h
87,6 → 87,8
static inline void ipi_send_ipi(int id,int eid,int intno)
{
(bootinfo->sapic)[2*(id*256+eid)]=intno;
srlz_d();
 
}
 
 
/branches/dynload/kernel/arch/ia64/include/drivers/kbd.h
0,0 → 1,48
/*
* Copyright (c) 2006 Jakub Jermar, Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia6464
* @{
*/
/** @file
*/
 
#ifndef KERN_ia64_KBD_H_
#define KERN_ia64_KBD_H_
 
 
#define KBD_UNKNOWN 0
#define KBD_SKI 1
#define KBD_LEGACY 2
#define KBD_NS16550 3
 
 
#endif
 
/** @}
*/
/branches/dynload/kernel/arch/ia64/src/ia64.c
60,13 → 60,16
#include <arch/atomic.h>
#include <panic.h>
#include <print.h>
#include <sysinfo/sysinfo.h>
 
/*NS16550 as a COM 1*/
#define NS16550_IRQ 4
#define NS16550_IRQ (4+LAGACY_INTERRUPT_BASE)
#define NS16550_PORT 0x3f8
 
bootinfo_t *bootinfo;
 
static uint64_t iosapic_base=0xfec00000;
 
void arch_pre_main(void)
{
/* Setup usermode init tasks. */
110,10 → 113,40
}
 
static void iosapic_init(void)
{
 
uint64_t IOSAPIC = PA2KA((unative_t)(iosapic_base))|FW_OFFSET;
int i;
int myid,myeid;
myid=ia64_get_cpu_id();
myeid=ia64_get_cpu_eid();
 
for(i=0;i<16;i++)
{
if(i==2) continue; //Disable Cascade interrupt
((uint32_t*)(IOSAPIC+0x00))[0]=0x10+2*i;
srlz_d();
((uint32_t*)(IOSAPIC+0x10))[0]=LAGACY_INTERRUPT_BASE+i;
srlz_d();
((uint32_t*)(IOSAPIC+0x00))[0]=0x10+2*i+1;
srlz_d();
((uint32_t*)(IOSAPIC+0x10))[0]=myid<<(56-32) | myeid<<(48-32);
srlz_d();
}
 
}
 
 
void arch_post_mm_init(void)
{
if(config.cpu_active==1)
{
iosapic_init();
irq_init(INR_COUNT, INR_COUNT);
#ifdef SKI
ski_init_console();
121,7 → 154,8
ega_init();
#endif
}
it_init();
it_init();
}
 
void arch_post_cpu_init(void)
139,9 → 173,14
static void i8042_kkbdpoll(void *arg)
{
while (1) {
i8042_poll();
#ifdef CONFIG_NS16550
#ifndef CONFIG_NS16550_INTERRUPT_DRIVEN
ns16550_poll();
#endif
#else
#ifndef CONFIG_I8042_INTERRUPT_DRIVEN
i8042_poll();
#endif
#endif
thread_usleep(POLL_INTERVAL);
}
148,6 → 187,14
}
#endif
 
 
void end_of_irq_void(void *cir_arg __attribute__((unused)),inr_t inr __attribute__((unused)));
void end_of_irq_void(void *cir_arg __attribute__((unused)),inr_t inr __attribute__((unused)))
{
return;
}
 
 
void arch_post_smp_init(void)
{
 
165,13 → 212,13
 
#ifdef I460GX
devno_t kbd = device_assign_devno();
devno_t mouse = device_assign_devno();
/* keyboard controller */
i8042_init(kbd, IRQ_KBD, mouse, IRQ_MOUSE);
 
#ifdef CONFIG_NS16550
ns16550_init(kbd, NS16550_IRQ, NS16550_PORT); // as a COM 1
ns16550_init(kbd, NS16550_PORT, NS16550_IRQ,end_of_irq_void,NULL); // as a COM 1
#else
devno_t mouse = device_assign_devno();
i8042_init(kbd, IRQ_KBD, mouse, IRQ_MOUSE);
#endif
thread_t *t;
t = thread_create(i8042_kkbdpoll, NULL, TASK, 0, "kkbdpoll", true);
182,6 → 229,15
#endif
 
}
sysinfo_set_item_val("ia64_iospace", NULL, true);
sysinfo_set_item_val("ia64_iospace.address", NULL, true);
sysinfo_set_item_val("ia64_iospace.address.virtual", NULL, IO_OFFSET);
 
 
 
 
 
}
 
 
231,6 → 287,12
{
#ifdef SKI
ski_kbd_grab();
#else
#ifdef CONFIG_NS16550
ns16550_grab();
#else
i8042_grab();
#endif
#endif
}
/** Return console to userspace
240,6 → 302,13
{
#ifdef SKI
ski_kbd_release();
#else
#ifdef CONFIG_NS16550
ns16550_release();
#else
i8042_release();
#endif
 
#endif
}
 
/branches/dynload/kernel/arch/ia64/src/ski/ski.c
44,6 → 44,7
#include <proc/thread.h>
#include <synch/spinlock.h>
#include <arch/asm.h>
#include <arch/drivers/kbd.h>
 
#define SKI_KBD_INR 0
 
227,6 → 228,7
sysinfo_set_item_val("kbd", NULL, true);
sysinfo_set_item_val("kbd.inr", NULL, SKI_KBD_INR);
sysinfo_set_item_val("kbd.devno", NULL, ski_kbd_devno);
sysinfo_set_item_val("kbd.type", NULL, KBD_SKI);
}
 
void ski_kbd_grab(void)
/branches/dynload/kernel/arch/ia64/src/smp/smp.c
87,7 → 87,6
myid=ia64_get_cpu_id();
myeid=ia64_get_cpu_eid();
 
printf("Not sending to ID:%d,EID:%d",myid,myeid);
for(id=0;id<256;id++)
for(eid=0;eid<256;eid++)
97,12 → 96,28
 
void ipi_broadcast_arch(int ipi )
{
ipi_broadcast_arch_all(ipi);
int id,eid;
int myid,myeid;
myid=ia64_get_cpu_id();
myeid=ia64_get_cpu_eid();
 
//printf("Sending ipi %d on %d\n",ipi,CPU->id);
for(id=0;id<256;id++)
for(eid=0;eid<256;eid++)
if((id!=myid) || (eid!=myeid))
if(cpu_by_id_eid_list[id][eid])
ipi_send_ipi(id,eid,ipi);
 
}
 
 
void smp_init(void)
{
if(!bootinfo->hello_configured) return;
//If we have not system prepared by hello, we are not able to start AP's
//this means we are running on simulator
sapic_init();
ipi_broadcast_arch_all(bootinfo->wakeup_intno);
volatile long long brk;
115,7 → 130,6
for(eid=0;eid<256;eid++)
if(cpu_by_id_eid_list[id][eid]==1){
config.cpu_count++;
printf("Found CPU ID:%d EDI:%d\n",id,eid);
cpu_by_id_eid_list[id][eid]=2;
 
}
/branches/dynload/kernel/arch/ia64/src/ddi/ddi.c
1,5 → 1,5
/*
* Copyright (c) 2006 Jakub Jermar
* Copyright (c) 2006 Jakub Jermar, Jakub vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
35,7 → 35,12
#include <ddi/ddi.h>
#include <proc/task.h>
#include <arch/types.h>
#include <mm/slab.h>
#include <errno.h>
 
#define IO_MEMMAP_PAGES 16384
#define PORTS_PER_PAGE 4
 
/** Enable I/O space range for task.
*
* Interrupts are disabled and task is locked.
48,6 → 53,23
*/
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
 
if(!task->arch.iomap)
{
uint8_t *map;
task->arch.iomap=malloc(sizeof(bitmap_t),0);
map=malloc(BITS2BYTES(IO_MEMMAP_PAGES),0);
if(!map)
return ENOMEM;
bitmap_initialize(task->arch.iomap,map,IO_MEMMAP_PAGES);
bitmap_clear_range(task->arch.iomap,0,IO_MEMMAP_PAGES);
}
uintptr_t iopage = ioaddr / PORTS_PER_PAGE;
size = ALIGN_UP (size+ioaddr-4*iopage,PORTS_PER_PAGE);
bitmap_set_range(task->arch.iomap,iopage,size/4);
 
 
return 0;
}
 
/branches/dynload/kernel/arch/ia64/src/mm/tlb.c
475,6 → 475,73
}
}
 
 
 
static int is_io_page_accessible(int page)
{
if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
else return 0;
}
 
#define IO_FRAME_BASE 0xFFFFC000000
 
/** There is special handling of memmaped lagacy io, because
* of 4KB sized access
* only for userspace
*
* @param va virtual address of page fault
* @param istate Structure with saved interruption state.
*
*
* @return 1 on success, 0 on fail
*/
static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
{
if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
if(TASK){
uint64_t io_page=(va & ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
if(is_io_page_accessible(io_page)){
//printf("Insert %llX\n",va);
 
uint64_t page,frame;
 
page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
 
 
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = true; /* present */
entry.ma = MA_UNCACHEABLE;
entry.a = true; /* already accessed */
entry.d = true; /* already dirty */
entry.pl = PL_USER;
entry.ar = AR_READ | AR_WRITE;
entry.ppn = frame >> PPN_SHIFT; //MUSIM spocitat frame
entry.ps = USPACE_IO_PAGE_WIDTH;
dtc_mapping_insert(page, TASK->as->asid, entry); //Musim zjistit ASID
return 1;
}else {
fault_if_from_uspace(istate,"IO access fault at %p",va);
return 0;
}
} else
return 0;
else
return 0;
return 0;
 
}
 
 
 
 
/** Data TLB fault handler for faults with VHPT turned off.
*
* @param vector Interruption vector.
511,10 → 578,11
dtc_pte_copy(t);
page_table_unlock(AS, true);
} else {
page_table_unlock(AS, true);
if (try_memmap_io_insertion(va,istate)) return;
/*
* Forward the page fault to the address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
fault_if_from_uspace(istate,"Page fault at %p",va);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
/branches/dynload/kernel/arch/ia64/src/mm/frame.c
36,14 → 36,20
#include <mm/frame.h>
#include <config.h>
#include <panic.h>
#include <arch/bootinfo.h>
#include <align.h>
#include <macros.h>
 
/*
* This is Ski-specific and certainly not sufficient
* for real ia64 systems that provide memory map.
*/
#define MEMORY_SIZE (64 * 1024 * 1024)
#define MEMORY_SIZE (256 * 1024 * 1024)
#define MEMORY_BASE (0 * 64 * 1024 * 1024)
 
#define KERNEL_RESERVED_AREA_BASE (0x4400000)
#define KERNEL_RESERVED_AREA_SIZE (16*1024*1024)
 
#define ONE_TO_ONE_MAPPING_SIZE (256*1048576) // Mapped at start
 
#define ROM_BASE 0xa0000 //For ski
50,22 → 56,41
#define ROM_SIZE (384 * 1024) //For ski
void poke_char(int x,int y,char ch, char c);
 
#define MIN_ZONE_SIZE (64*1024)
 
uintptr_t last_frame;
#define MINCONF 1
 
void frame_arch_init(void)
{
 
if(config.cpu_active==1)
{
zone_create(MEMORY_BASE >> FRAME_WIDTH, SIZE2FRAMES(MEMORY_SIZE), (MEMORY_SIZE) >> FRAME_WIDTH, 0);
if(config.cpu_active==1){
unsigned int i;
for(i=0;i<bootinfo->memmap_items;i++){
if (bootinfo->memmap[i].type==EFI_MEMMAP_FREE_MEM){
uint64_t base=bootinfo->memmap[i].base;
uint64_t size=bootinfo->memmap[i].size;
uint64_t abase=ALIGN_UP(base,FRAME_SIZE);
if(size>FRAME_SIZE) size -=abase-base;
 
if(size>MIN_ZONE_SIZE) {
zone_create(abase >> FRAME_WIDTH, (size) >> FRAME_WIDTH, max(MINCONF,((abase) >> FRAME_WIDTH)), 0);
}
}
}
//zone_create(MEMORY_BASE >> FRAME_WIDTH, SIZE2FRAMES(MEMORY_SIZE), (MEMORY_SIZE) >> FRAME_WIDTH, 0);
/*
* Blacklist ROM regions.
*/
//frame_mark_unavailable(ADDR2PFN(ROM_BASE), SIZE2FRAMES(ROM_SIZE));
frame_mark_unavailable(ADDR2PFN(ROM_BASE), SIZE2FRAMES(ROM_SIZE));
 
frame_mark_unavailable(ADDR2PFN(0), SIZE2FRAMES(1048576));
last_frame=SIZE2FRAMES((VRN_KERNEL<<VRN_SHIFT)+ONE_TO_ONE_MAPPING_SIZE);
frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE), SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE));
}
}
 
/branches/dynload/kernel/arch/ia64/src/interrupt.c
53,6 → 53,7
#include <ipc/irq.h>
#include <ipc/ipc.h>
#include <synch/spinlock.h>
#include <mm/tlb.h>
 
#define VECTORS_64_BUNDLE 20
#define VECTORS_16_BUNDLE 48
234,19 → 235,19
vector_to_string(vector));
}
 
static void end_of_local_irq(void)
{
asm volatile ("mov cr.eoi=r0;;");
}
 
 
void external_interrupt(uint64_t vector, istate_t *istate)
{
irq_t *irq;
cr_ivr_t ivr;
ivr.value = ivr_read();
srlz_d();
 
irq = irq_dispatch_and_lock(ivr.vector);
if (irq) {
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
switch (ivr.vector) {
case INTERRUPT_SPURIOUS:
#ifdef CONFIG_DEBUG
254,12 → 255,60
#endif
break;
 
#ifdef CONFIG_SMP
case VECTOR_TLB_SHOOTDOWN_IPI:
tlb_shootdown_ipi_recv();
end_of_local_irq();
break;
#endif
 
case INTERRUPT_TIMER:
{
 
irq_t *irq = irq_dispatch_and_lock(ivr.vector);
if (irq) {
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
panic("\nUnhandled Internal Timer Interrupt (%d)\n",ivr.vector);
}
}
break;
default:
panic("\nUnhandled External Interrupt Vector %d\n",
ivr.vector);
{
 
int ack=false;
irq_t *irq = irq_dispatch_and_lock(ivr.vector);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
end_of_local_irq();
ack=true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Unhandled interrupt.
*/
end_of_local_irq();
ack=true;
#ifdef CONFIG_DEBUG
printf("\nUnhandled External Interrupt Vector %d\n",ivr.vector);
#endif
}
if(!ack) end_of_local_irq();
 
}
 
 
break;
}
}
}
 
/** @}
/branches/dynload/kernel/arch/ia64/src/drivers/ega.c
46,12 → 46,18
#include <console/console.h>
#include <sysinfo/sysinfo.h>
#include <arch/drivers/ega.h>
#include <ddi/ddi.h>
 
 
/*
* The EGA driver.
* Simple and short. Function for displaying characters and "scrolling".
*/
 
 
static parea_t ega_parea; /**< Physical memory area for EGA video RAM. */
 
 
SPINLOCK_INITIALIZE(egalock);
static uint32_t ega_cursor;
static uint8_t *videoram;
75,12 → 81,21
 
chardev_initialize("ega_out", &ega_console, &ega_ops);
stdout = &ega_console;
 
 
ega_parea.pbase = VIDEORAM & 0xffffffff;
ega_parea.vbase = (uintptr_t) videoram;
ega_parea.frames = 1;
ega_parea.cacheable = false;
ddi_parea_register(&ega_parea);
 
sysinfo_set_item_val("fb", NULL, true);
sysinfo_set_item_val("fb.kind", NULL, 2);
sysinfo_set_item_val("fb.width", NULL, ROW);
sysinfo_set_item_val("fb.height", NULL, ROWS);
sysinfo_set_item_val("fb.address.physical", NULL, VIDEORAM);
sysinfo_set_item_val("fb.blinking", NULL, true);
sysinfo_set_item_val("fb.address.physical", NULL, VIDEORAM & 0xffffffff);
#ifndef CONFIG_FB
putchar('\n');
/branches/dynload/kernel/arch/arm32/src/arm32.c
86,7 → 86,15
console_init(device_assign_devno());
 
#ifdef CONFIG_FB
fb_init(machine_get_fb_address(), 640, 480, 1920, VISUAL_RGB_8_8_8);
fb_properties_t prop = {
.addr = machine_get_fb_address(),
.offset = 0,
.x = 640,
.y = 480,
.scan = 1920,
.visual = VISUAL_RGB_8_8_8,
};
fb_init(&prop);
#endif
}
 
/branches/dynload/kernel/arch/ppc32/src/ppc32.c
94,7 → 94,15
default:
panic("Unsupported bits per pixel");
}
fb_init(bootinfo.screen.addr, bootinfo.screen.width, bootinfo.screen.height, bootinfo.screen.scanline, visual);
fb_properties_t prop = {
.addr = bootinfo.screen.addr,
.offset = 0,
.x = bootinfo.screen.width,
.y = bootinfo.screen.height,
.scan = bootinfo.screen.scanline,
.visual = visual,
};
fb_init(&prop);
/* Initialize IRQ routing */
irq_init(IRQ_COUNT, IRQ_COUNT);
103,7 → 111,8
pic_init(bootinfo.keyboard.addr, PAGE_SIZE);
/* Initialize I/O controller */
cuda_init(device_assign_devno(), bootinfo.keyboard.addr + 0x16000, 2 * PAGE_SIZE);
cuda_init(device_assign_devno(),
bootinfo.keyboard.addr + 0x16000, 2 * PAGE_SIZE);
/* Merge all zones to 1 big zone */
zone_merge_all();
128,7 → 137,10
 
void userspace(uspace_arg_t *kernel_uarg)
{
userspace_asm((uintptr_t) kernel_uarg->uspace_uarg, (uintptr_t) kernel_uarg->uspace_stack + THREAD_STACK_SIZE - SP_DELTA, (uintptr_t) kernel_uarg->uspace_entry);
userspace_asm((uintptr_t) kernel_uarg->uspace_uarg,
(uintptr_t) kernel_uarg->uspace_stack +
THREAD_STACK_SIZE - SP_DELTA,
(uintptr_t) kernel_uarg->uspace_entry);
/* Unreachable */
for (;;)
/branches/dynload/kernel/arch/mips32/src/mips32.c
126,7 → 126,15
console_init(device_assign_devno());
#ifdef CONFIG_FB
/* GXemul framebuffer */
fb_init(0x12000000, 640, 480, 1920, VISUAL_RGB_8_8_8);
fb_properties_t gxemul_prop = {
.addr = 0x12000000,
.offset = 0,
.x = 640,
.y = 480,
.scan = 1920,
.visual = VISUAL_RGB_8_8_8,
};
fb_init(&gxemul_prop);
#endif
sysinfo_set_item_val("machine." STRING(MACHINE), NULL, 1);
}
/branches/dynload/kernel/arch/ia32/src/asm.S
158,6 → 158,7
*/
.global sysenter_handler
sysenter_handler:
sti
pushl %ebp # remember user stack
pushl %edi # remember return user address
 
/branches/dynload/kernel/arch/ia32/src/drivers/vesa.c
86,7 → 86,15
panic("Unsupported bits per pixel");
}
fb_init(vesa_ph_addr, vesa_width, vesa_height, vesa_scanline, visual);
fb_properties_t vesa_props = {
.addr = vesa_ph_addr,
.offset = 0,
.x = vesa_width,
.y = vesa_height,
.scan = vesa_scanline,
.visual = visual,
};
fb_init(&vesa_props);
}
 
#endif