Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3664 → Rev 3665

/branches/sparc/kernel/kernel.config
143,12 → 143,24
# Support for Z8530 serial port
! [ARCH=sparc64] CONFIG_Z8530 (y/n)
 
# Support for NS16550 serial port
! [ARCH=sparc64|ARCH=ia64] CONFIG_NS16550 (y/n)
# Support for NS16550 serial port (On IA64 as a console instead legacy keyboard)
! [ARCH=sparc64] CONFIG_NS16550 (y/n)
 
# Support for Serengeti console
! [ARCH=sparc64] CONFIG_SGCN (y/n)
 
# Support for NS16550 serial port (On IA64 as a console instead legacy keyboard)
! [ARCH=ia64&MACHINE!=ski] CONFIG_NS16550 (n/y)
 
# IOSapic on default address support (including legacy IRQ)
! [ARCH=ia64&MACHINE!=ski] CONFIG_IOSAPIC (y/n)
 
# Interrupt-driven driver for Legacy Keyboard?
! [CONFIG_NS16550=n&CONFIG_IOSAPIC=y&MACHINE!=ski] CONFIG_I8042_INTERRUPT_DRIVEN (y/n)
 
# Interrupt-driven driver for NS16550?
! [CONFIG_NS16550=y&((ARCH!=ia64)|CONFIG_IOSAPIC=y)&MACHINE!=ski] CONFIG_NS16550_INTERRUPT_DRIVEN (y/n)
 
# Virtually indexed D-cache support
! [ARCH=sparc64] CONFIG_VIRT_IDX_DCACHE (y/n)
 
/branches/sparc/kernel/genarch/include/kbd/z8530.h
39,17 → 39,18
 
#include <console/chardev.h>
#include <ipc/irq.h>
#include <ddi/irq.h>
 
extern bool z8530_belongs_to_kernel;
 
extern void z8530_init(devno_t devno, inr_t inr, uintptr_t vaddr);
extern void z8530_init(devno_t, uintptr_t, inr_t, cir_t, void *);
extern void z8530_poll(void);
extern void z8530_grab(void);
extern void z8530_release(void);
extern void z8530_interrupt(void);
extern char z8530_key_read(chardev_t *d);
extern char z8530_key_read(chardev_t *);
extern irq_ownership_t z8530_claim(void);
extern void z8530_irq_handler(irq_t *irq, void *arg, ...);
extern void z8530_irq_handler(irq_t *, void *, ...);
 
#endif
 
/branches/sparc/kernel/genarch/include/kbd/ns16550.h
38,15 → 38,16
#define KERN_NS16550_H_
 
#include <console/chardev.h>
#include <ddi/irq.h>
#include <ipc/irq.h>
 
extern void ns16550_init(devno_t devno, inr_t inr, uintptr_t vaddr);
extern void ns16550_init(devno_t, uintptr_t, inr_t, cir_t, void *);
extern void ns16550_poll(void);
extern void ns16550_grab(void);
extern void ns16550_release(void);
extern char ns16550_key_read(chardev_t *d);
extern char ns16550_key_read(chardev_t *);
extern irq_ownership_t ns16550_claim(void);
extern void ns16550_irq_handler(irq_t *irq, void *arg, ...);
extern void ns16550_irq_handler(irq_t *, void *, ...);
 
#include <arch/types.h>
#ifndef ia64
58,6 → 59,7
#define IIR_REG 2 /** Interrupt Ident Register (read). */
#define FCR_REG 2 /** FIFO control register (write). */
#define LCR_REG 3 /** Line Control register. */
#define MCR_REG 4 /** Modem Control Register. */
#define LSR_REG 5 /** Line Status Register. */
 
#define IER_ERBFI 0x01 /** Enable Receive Buffer Full Interrupt. */
64,57 → 66,68
 
#define LCR_DLAB 0x80 /** Divisor Latch Access bit. */
 
#define MCR_OUT2 0x08 /** OUT2. */
 
/** Structure representing the ns16550 device. */
typedef struct {
devno_t devno;
volatile ioport_t io_port; /** Memory mapped registers of the ns16550. */
/** Memory mapped registers of the ns16550. */
volatile ioport_t io_port;
} ns16550_t;
 
static inline uint8_t ns16550_rbr_read(ns16550_t *dev)
{
return inb(dev->io_port+RBR_REG);
return inb(dev->io_port + RBR_REG);
}
static inline void ns16550_rbr_write(ns16550_t *dev, uint8_t v)
{
outb(dev->io_port+RBR_REG,v);
outb(dev->io_port + RBR_REG, v);
}
 
static inline uint8_t ns16550_ier_read(ns16550_t *dev)
{
return inb(dev->io_port+IER_REG);
return inb(dev->io_port + IER_REG);
}
 
static inline void ns16550_ier_write(ns16550_t *dev, uint8_t v)
{
outb(dev->io_port+IER_REG,v);
outb(dev->io_port + IER_REG, v);
}
 
static inline uint8_t ns16550_iir_read(ns16550_t *dev)
{
return inb(dev->io_port+IIR_REG);
return inb(dev->io_port + IIR_REG);
}
 
static inline void ns16550_fcr_write(ns16550_t *dev, uint8_t v)
{
outb(dev->io_port+FCR_REG,v);
outb(dev->io_port + FCR_REG, v);
}
 
static inline uint8_t ns16550_lcr_read(ns16550_t *dev)
{
return inb(dev->io_port+LCR_REG);
return inb(dev->io_port + LCR_REG);
}
 
static inline void ns16550_lcr_write(ns16550_t *dev, uint8_t v)
{
outb(dev->io_port+LCR_REG,v);
outb(dev->io_port + LCR_REG, v);
}
 
static inline uint8_t ns16550_lsr_read(ns16550_t *dev)
{
return inb(dev->io_port+LSR_REG);
return inb(dev->io_port + LSR_REG);
}
 
static inline uint8_t ns16550_mcr_read(ns16550_t *dev)
{
return inb(dev->io_port + MCR_REG);
}
 
static inline void ns16550_mcr_write(ns16550_t *dev, uint8_t v)
{
outb(dev->io_port + MCR_REG, v);
}
 
#endif
 
/branches/sparc/kernel/genarch/include/ofw/ofw_tree.h
30,6 → 30,7
#define KERN_OFW_TREE_H_
 
#include <arch/types.h>
#include <ddi/irq.h>
#include <typedefs.h>
 
#define OFW_TREE_PROPERTY_MAX_NAMELEN 32
43,11 → 44,11
ofw_tree_node_t *peer;
ofw_tree_node_t *child;
 
uint32_t node_handle; /**< Old OpenFirmware node handle. */
uint32_t node_handle; /**< Old OpenFirmware node handle. */
 
char *da_name; /**< Disambigued name. */
char *da_name; /**< Disambigued name. */
 
unsigned properties; /**< Number of properties. */
unsigned properties; /**< Number of properties. */
ofw_tree_property_t *property;
/**
105,7 → 106,7
uint32_t child_space;
uint32_t child_base;
uint32_t parent_space;
uint64_t parent_base; /* group phys.mid and phys.lo together */
uint64_t parent_base; /* group phys.mid and phys.lo together */
uint32_t size;
} __attribute__ ((packed));
typedef struct ofw_ebus_range ofw_ebus_range_t;
127,8 → 128,8
typedef struct ofw_ebus_intr_mask ofw_ebus_intr_mask_t;
 
struct ofw_pci_reg {
uint32_t space; /* needs to be masked to obtain pure space id */
uint64_t addr; /* group phys.mid and phys.lo together */
uint32_t space; /* needs to be masked to obtain pure space id */
uint64_t addr; /* group phys.mid and phys.lo together */
uint64_t size;
} __attribute__ ((packed));
typedef struct ofw_pci_reg ofw_pci_reg_t;
135,7 → 136,7
 
struct ofw_pci_range {
uint32_t space;
uint64_t child_base; /* group phys.mid and phys.lo together */
uint64_t child_base; /* group phys.mid and phys.lo together */
uint64_t parent_base;
uint64_t size;
} __attribute__ ((packed));
160,28 → 161,43
} __attribute__ ((packed));
typedef struct ofw_upa_reg ofw_upa_reg_t;
 
extern void ofw_tree_init(ofw_tree_node_t *root);
extern void ofw_tree_init(ofw_tree_node_t *);
extern void ofw_tree_print(void);
extern const char *ofw_tree_node_name(const ofw_tree_node_t *node);
extern ofw_tree_node_t *ofw_tree_lookup(const char *path);
extern ofw_tree_property_t *ofw_tree_getprop(const ofw_tree_node_t *node, const char *name);
extern ofw_tree_node_t *ofw_tree_find_child(ofw_tree_node_t *node, const char *name);
extern ofw_tree_node_t *ofw_tree_find_child_by_device_type(ofw_tree_node_t *node, const char *device_type);
extern ofw_tree_node_t *ofw_tree_find_peer_by_device_type(ofw_tree_node_t *node, const char *device_type);
extern ofw_tree_node_t *ofw_tree_find_peer_by_name(ofw_tree_node_t *node, const char *name);
extern ofw_tree_node_t *ofw_tree_find_node_by_handle(ofw_tree_node_t *root, uint32_t handle);
extern const char *ofw_tree_node_name(const ofw_tree_node_t *);
extern ofw_tree_node_t *ofw_tree_lookup(const char *);
extern ofw_tree_property_t *ofw_tree_getprop(const ofw_tree_node_t *,
const char *);
extern ofw_tree_node_t *ofw_tree_find_child(ofw_tree_node_t *, const char *);
extern ofw_tree_node_t *ofw_tree_find_child_by_device_type(ofw_tree_node_t *,
const char *);
extern ofw_tree_node_t *ofw_tree_find_peer_by_device_type(ofw_tree_node_t *,
const char *);
extern ofw_tree_node_t *ofw_tree_find_peer_by_name(ofw_tree_node_t *node,
const char *name);
extern ofw_tree_node_t *ofw_tree_find_node_by_handle(ofw_tree_node_t *,
uint32_t);
 
extern bool ofw_fhc_apply_ranges(ofw_tree_node_t *node, ofw_fhc_reg_t *reg, uintptr_t *pa);
extern bool ofw_central_apply_ranges(ofw_tree_node_t *node, ofw_central_reg_t *reg, uintptr_t *pa);
extern bool ofw_ebus_apply_ranges(ofw_tree_node_t *node, ofw_ebus_reg_t *reg, uintptr_t *pa);
extern bool ofw_pci_apply_ranges(ofw_tree_node_t *node, ofw_pci_reg_t *reg, uintptr_t *pa);
extern bool ofw_sbus_apply_ranges(ofw_tree_node_t *node, ofw_sbus_reg_t *reg, uintptr_t *pa);
extern bool ofw_upa_apply_ranges(ofw_tree_node_t *node, ofw_upa_reg_t *reg, uintptr_t *pa);
extern bool ofw_fhc_apply_ranges(ofw_tree_node_t *, ofw_fhc_reg_t *,
uintptr_t *);
extern bool ofw_central_apply_ranges(ofw_tree_node_t *, ofw_central_reg_t *,
uintptr_t *);
extern bool ofw_ebus_apply_ranges(ofw_tree_node_t *, ofw_ebus_reg_t *,
uintptr_t *);
extern bool ofw_pci_apply_ranges(ofw_tree_node_t *, ofw_pci_reg_t *,
uintptr_t *);
extern bool ofw_sbus_apply_ranges(ofw_tree_node_t *, ofw_sbus_reg_t *,
uintptr_t *);
extern bool ofw_upa_apply_ranges(ofw_tree_node_t *, ofw_upa_reg_t *,
uintptr_t *);
 
extern bool ofw_pci_reg_absolutize(ofw_tree_node_t *node, ofw_pci_reg_t *reg, ofw_pci_reg_t *out);
extern bool ofw_pci_reg_absolutize(ofw_tree_node_t *, ofw_pci_reg_t *,
ofw_pci_reg_t *);
 
extern bool ofw_fhc_map_interrupt(ofw_tree_node_t *node, ofw_fhc_reg_t *reg, uint32_t interrupt, int *inr);
extern bool ofw_ebus_map_interrupt(ofw_tree_node_t *node, ofw_ebus_reg_t *reg, uint32_t interrupt, int *inr);
extern bool ofw_pci_map_interrupt(ofw_tree_node_t *node, ofw_pci_reg_t *reg, int ino, int *inr);
extern bool ofw_fhc_map_interrupt(ofw_tree_node_t *, ofw_fhc_reg_t *,
uint32_t, int *, cir_t *, void **);
extern bool ofw_ebus_map_interrupt(ofw_tree_node_t *, ofw_ebus_reg_t *,
uint32_t, int *, cir_t *, void **);
extern bool ofw_pci_map_interrupt(ofw_tree_node_t *, ofw_pci_reg_t *,
int, int *, cir_t *, void **);
 
#endif
/branches/sparc/kernel/genarch/src/kbd/ns16550.c
38,8 → 38,8
#include <genarch/kbd/key.h>
#include <genarch/kbd/scanc.h>
#include <genarch/kbd/scanc_sun.h>
#include <arch/drivers/kbd.h>
#ifndef ia64
#include <arch/drivers/kbd.h>
#include <arch/drivers/ns16550.h>
#endif
#include <ddi/irq.h>
107,11 → 107,14
 
/** Initialize ns16550.
*
* @param devno Device number.
* @param inr Interrupt number.
* @param vaddr Virtual address of device's registers.
* @param devno Device number.
* @param port Virtual/IO address of device's registers.
* @param inr Interrupt number.
* @param cir Clear interrupt function.
* @param cir_arg First argument to cir.
*/
void ns16550_init(devno_t devno, inr_t inr, ioport_t port)
void
ns16550_init(devno_t devno, ioport_t port, inr_t inr, cir_t cir, void *cir_arg)
{
chardev_initialize("ns16550_kbd", &kbrd, &ops);
stdin = &kbrd;
124,18 → 127,31
ns16550_irq.inr = inr;
ns16550_irq.claim = ns16550_claim;
ns16550_irq.handler = ns16550_irq_handler;
ns16550_irq.cir = cir;
ns16550_irq.cir_arg = cir_arg;
irq_register(&ns16550_irq);
 
 
while ((ns16550_lsr_read(&ns16550) & LSR_DATA_READY))
ns16550_rbr_read(&ns16550);
 
sysinfo_set_item_val("kbd", NULL, true);
#ifndef ia64
sysinfo_set_item_val("kbd.type", NULL, KBD_NS16550);
#endif
sysinfo_set_item_val("kbd.devno", NULL, devno);
sysinfo_set_item_val("kbd.inr", NULL, inr);
sysinfo_set_item_val("kbd.address.virtual", NULL, port);
sysinfo_set_item_val("kbd.port", NULL, port);
 
#ifdef CONFIG_NS16550_INTERRUPT_DRIVEN
/* Enable interrupts */
ns16550_ier_write(&ns16550, IER_ERBFI);
ns16550_mcr_write(&ns16550, MCR_OUT2);
#endif
 
#ifdef ia64
uint8_t c;
// This switches rbr & ier to mode when accept baudrate constant
c = ns16550_lcr_read(&ns16550);
ns16550_lcr_write(&ns16550, 0x80 | c);
ns16550_rbr_write(&ns16550, 0x0c);
149,10 → 165,7
/** Process ns16550 interrupt. */
void ns16550_interrupt(void)
{
/* TODO
*
* ns16550 works in the polled mode so far.
*/
ns16550_poll();
}
 
/* Called from getc(). */
201,6 → 214,7
*/
void ns16550_poll(void)
{
#ifndef CONFIG_NS16550_INTERRUPT_DRIVEN
ipl_t ipl;
 
ipl = interrupts_disable();
220,6 → 234,7
 
spinlock_unlock(&ns16550_irq.lock);
interrupts_restore(ipl);
#endif
 
while (ns16550_lsr_read(&ns16550) & LSR_DATA_READY) {
uint8_t x;
251,7 → 266,10
 
void ns16550_irq_handler(irq_t *irq, void *arg, ...)
{
panic("Not yet implemented, ns16550 works in polled mode.\n");
if (irq->notif_cfg.notify && irq->notif_cfg.answerbox)
ipc_irq_send_notif(irq);
else
ns16550_interrupt();
}
 
/** @}
/branches/sparc/kernel/genarch/src/kbd/i8042.c
37,6 → 37,9
*/
 
#include <genarch/kbd/i8042.h>
#ifdef ia64
#include <arch/drivers/kbd.h>
#endif
#include <genarch/kbd/key.h>
#include <genarch/kbd/scanc.h>
#include <genarch/kbd/scanc_pc.h>
184,7 → 187,9
sysinfo_set_item_val("kbd", NULL, true);
sysinfo_set_item_val("kbd.devno", NULL, kbd_devno);
sysinfo_set_item_val("kbd.inr", NULL, kbd_inr);
#ifdef KBD_LEGACY
sysinfo_set_item_val("kbd.type", NULL, KBD_LEGACY);
#endif
sysinfo_set_item_val("mouse", NULL, true);
sysinfo_set_item_val("mouse.devno", NULL, mouse_devno);
sysinfo_set_item_val("mouse.inr", NULL, mouse_inr);
/branches/sparc/kernel/genarch/src/kbd/z8530.c
43,7 → 43,6
#include <ipc/irq.h>
#include <arch/interrupt.h>
#include <arch/drivers/kbd.h>
#include <arch/drivers/fhc.h>
#include <cpu.h>
#include <arch/asm.h>
#include <arch.h>
83,12 → 82,14
*/
z8530_write_a(&z8530, WR0, WR0_TX_IP_RST);
 
z8530_write_a(&z8530, WR1, WR1_IARCSC); /* interrupt on all characters */
/* interrupt on all characters */
z8530_write_a(&z8530, WR1, WR1_IARCSC);
 
/* 8 bits per character and enable receiver */
z8530_write_a(&z8530, WR3, WR3_RX8BITSCH | WR3_RX_ENABLE);
z8530_write_a(&z8530, WR9, WR9_MIE); /* Master Interrupt Enable. */
/* Master Interrupt Enable. */
z8530_write_a(&z8530, WR9, WR9_MIE);
spinlock_lock(&z8530_irq.lock);
z8530_irq.notif_cfg.notify = false;
108,7 → 109,8
}
 
/** Initialize z8530. */
void z8530_init(devno_t devno, inr_t inr, uintptr_t vaddr)
void
z8530_init(devno_t devno, uintptr_t vaddr, inr_t inr, cir_t cir, void *cir_arg)
{
chardev_initialize("z8530_kbd", &kbrd, &ops);
stdin = &kbrd;
121,6 → 123,8
z8530_irq.inr = inr;
z8530_irq.claim = z8530_claim;
z8530_irq.handler = z8530_irq_handler;
z8530_irq.cir = cir;
z8530_irq.cir_arg = cir_arg;
irq_register(&z8530_irq);
 
sysinfo_set_item_val("kbd", NULL, true);
197,18 → 201,10
 
void z8530_irq_handler(irq_t *irq, void *arg, ...)
{
/*
* So far, we know we got this interrupt through the FHC.
* Since we don't have enough documentation about the FHC
* and because the interrupt looks like level sensitive,
* we cannot handle it by scheduling one of the level
* interrupt traps. Process the interrupt directly.
*/
if (irq->notif_cfg.notify && irq->notif_cfg.answerbox)
ipc_irq_send_notif(irq);
else
z8530_interrupt();
fhc_clear_interrupt(central_fhc, irq->inr);
}
 
/** @}
/branches/sparc/kernel/genarch/src/ofw/ebus.c
44,7 → 44,8
#include <macros.h>
 
/** Apply EBUS ranges to EBUS register. */
bool ofw_ebus_apply_ranges(ofw_tree_node_t *node, ofw_ebus_reg_t *reg, uintptr_t *pa)
bool
ofw_ebus_apply_ranges(ofw_tree_node_t *node, ofw_ebus_reg_t *reg, uintptr_t *pa)
{
ofw_tree_property_t *prop;
ofw_ebus_range_t *range;
62,11 → 63,13
for (i = 0; i < ranges; i++) {
if (reg->space != range[i].child_space)
continue;
if (overlaps(reg->addr, reg->size, range[i].child_base, range[i].size)) {
if (overlaps(reg->addr, reg->size, range[i].child_base,
range[i].size)) {
ofw_pci_reg_t pci_reg;
pci_reg.space = range[i].parent_space;
pci_reg.addr = range[i].parent_base + (reg->addr - range[i].child_base);
pci_reg.addr = range[i].parent_base +
(reg->addr - range[i].child_base);
pci_reg.size = reg->size;
return ofw_pci_apply_ranges(node->parent, &pci_reg, pa);
76,7 → 79,9
return false;
}
 
bool ofw_ebus_map_interrupt(ofw_tree_node_t *node, ofw_ebus_reg_t *reg, uint32_t interrupt, int *inr)
bool
ofw_ebus_map_interrupt(ofw_tree_node_t *node, ofw_ebus_reg_t *reg,
uint32_t interrupt, int *inr, cir_t *cir, void **cir_arg)
{
ofw_tree_property_t *prop;
ofw_tree_node_t *controller;
104,8 → 109,8
unsigned int i;
for (i = 0; i < count; i++) {
if ((intr_map[i].space == space) && (intr_map[i].addr == addr)
&& (intr_map[i].intr == intr))
if ((intr_map[i].space == space) &&
(intr_map[i].addr == addr) && (intr_map[i].intr == intr))
goto found;
}
return false;
113,10 → 118,12
found:
/*
* We found the device that functions as an interrupt controller
* for the interrupt. We also found partial mapping from interrupt to INO.
* for the interrupt. We also found partial mapping from interrupt to
* INO.
*/
 
controller = ofw_tree_find_node_by_handle(ofw_tree_lookup("/"), intr_map[i].controller_handle);
controller = ofw_tree_find_node_by_handle(ofw_tree_lookup("/"),
intr_map[i].controller_handle);
if (!controller)
return false;
130,7 → 137,8
/*
* Let the PCI do the next step in mapping the interrupt.
*/
if (!ofw_pci_map_interrupt(controller, NULL, intr_map[i].controller_ino, inr))
if (!ofw_pci_map_interrupt(controller, NULL, intr_map[i].controller_ino,
inr, cir, cir_arg))
return false;
 
return true;
/branches/sparc/kernel/genarch/src/ofw/fhc.c
109,7 → 109,9
return false;
}
 
bool ofw_fhc_map_interrupt(ofw_tree_node_t *node, ofw_fhc_reg_t *reg, uint32_t interrupt, int *inr)
bool
ofw_fhc_map_interrupt(ofw_tree_node_t *node, ofw_fhc_reg_t *reg,
uint32_t interrupt, int *inr, cir_t *cir, void **cir_arg)
{
fhc_t *fhc = NULL;
if (!node->device) {
126,6 → 128,8
fhc_enable_interrupt(fhc, interrupt);
*inr = interrupt;
*cir = fhc_clear_interrupt;
*cir_arg = fhc;
return true;
}
 
/branches/sparc/kernel/genarch/src/ofw/pci.c
49,7 → 49,8
 
#define PCI_IGN 0x1f
 
bool ofw_pci_apply_ranges(ofw_tree_node_t *node, ofw_pci_reg_t *reg, uintptr_t *pa)
bool
ofw_pci_apply_ranges(ofw_tree_node_t *node, ofw_pci_reg_t *reg, uintptr_t *pa)
{
ofw_tree_property_t *prop;
ofw_pci_range_t *range;
68,10 → 69,13
unsigned int i;
for (i = 0; i < ranges; i++) {
if ((reg->space & PCI_SPACE_MASK) != (range[i].space & PCI_SPACE_MASK))
if ((reg->space & PCI_SPACE_MASK) !=
(range[i].space & PCI_SPACE_MASK))
continue;
if (overlaps(reg->addr, reg->size, range[i].child_base, range[i].size)) {
*pa = range[i].parent_base + (reg->addr - range[i].child_base);
if (overlaps(reg->addr, reg->size, range[i].child_base,
range[i].size)) {
*pa = range[i].parent_base +
(reg->addr - range[i].child_base);
return true;
}
}
79,7 → 83,9
return false;
}
 
bool ofw_pci_reg_absolutize(ofw_tree_node_t *node, ofw_pci_reg_t *reg, ofw_pci_reg_t *out)
bool
ofw_pci_reg_absolutize(ofw_tree_node_t *node, ofw_pci_reg_t *reg,
ofw_pci_reg_t *out)
{
if (reg->space & PCI_ABS_MASK) {
/* already absolute */
103,7 → 109,8
unsigned int i;
for (i = 0; i < assigned_addresses; i++) {
if ((assigned_address[i].space & PCI_REG_MASK) == (reg->space & PCI_REG_MASK)) {
if ((assigned_address[i].space & PCI_REG_MASK) ==
(reg->space & PCI_REG_MASK)) {
out->space = assigned_address[i].space;
out->addr = reg->addr + assigned_address[i].addr;
out->size = reg->size;
119,7 → 126,9
* So far, we only know how to map interrupts of non-PCI devices connected
* to a PCI bridge.
*/
bool ofw_pci_map_interrupt(ofw_tree_node_t *node, ofw_pci_reg_t *reg, int ino, int *inr)
bool
ofw_pci_map_interrupt(ofw_tree_node_t *node, ofw_pci_reg_t *reg, int ino,
int *inr, cir_t *cir, void **cir_arg)
{
pci_t *pci = node->device;
if (!pci) {
132,6 → 141,8
pci_enable_interrupt(pci, ino);
 
*inr = (PCI_IGN << IGN_SHIFT) | ino;
*cir = pci_clear_interrupt;
*cir_arg = pci;
 
return true;
}
/branches/sparc/kernel/generic/include/byteorder.h
51,6 → 51,14
#define uint32_t_be2host(n) (n)
#define uint64_t_be2host(n) (n)
 
#define host2uint16_t_le(n) uint16_t_byteorder_swap(n)
#define host2uint32_t_le(n) uint32_t_byteorder_swap(n)
#define host2uint64_t_le(n) uint64_t_byteorder_swap(n)
 
#define host2uint16_t_be(n) (n)
#define host2uint32_t_be(n) (n)
#define host2uint64_t_be(n) (n)
 
#else
 
#define uint16_t_le2host(n) (n)
61,6 → 69,14
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n)
 
#define host2uint16_t_le(n) (n)
#define host2uint32_t_le(n) (n)
#define host2uint64_t_le(n) (n)
 
#define host2uint16_t_be(n) uint16_t_byteorder_swap(n)
#define host2uint32_t_be(n) uint32_t_byteorder_swap(n)
#define host2uint64_t_be(n) uint64_t_byteorder_swap(n)
 
#endif
 
static inline uint64_t uint64_t_byteorder_swap(uint64_t n)
/branches/sparc/kernel/generic/include/proc/task.h
53,6 → 53,7
#include <mm/tlb.h>
#include <proc/scheduler.h>
#include <udebug/udebug.h>
#include <ipc/kbox.h>
 
#define TASK_NAME_BUFLEN 20
 
98,19 → 99,13
atomic_t active_calls;
 
#ifdef CONFIG_UDEBUG
/** Debugging stuff */
/** Debugging stuff. */
udebug_task_t udebug;
 
/** Kernel answerbox */
answerbox_t kernel_box;
/** Thread used to service kernel answerbox */
struct thread *kb_thread;
/** Kbox thread creation vs. begin of cleanup mutual exclusion */
mutex_t kb_cleanup_lock;
/** True if cleanup of kbox has already started */
bool kb_finished;
/** Kernel answerbox. */
kbox_t kb;
#endif
 
/** Architecture specific task data. */
task_arch_t arch;
/branches/sparc/kernel/generic/include/udebug/udebug.h
147,9 → 147,7
/** BEGIN operation in progress (waiting for threads to stop) */
UDEBUG_TS_BEGINNING,
/** Debugger fully connected */
UDEBUG_TS_ACTIVE,
/** Task is shutting down, no more debug activities allowed */
UDEBUG_TS_SHUTDOWN
UDEBUG_TS_ACTIVE
} udebug_task_state_t;
 
/** Debugging part of task_t structure.
169,13 → 167,6
/** Debugging part of thread_t structure.
*/
typedef struct {
/**
* Prevent deadlock with udebug_before_thread_runs() in interrupt
* handler, without actually disabling interrupts.
* ==0 means "unlocked", >0 means "locked"
*/
atomic_t int_lock;
 
/** Synchronize debug ops on this thread / access to this structure. */
mutex_t lock;
 
182,6 → 173,7
waitq_t go_wq;
call_t *go_call;
unative_t syscall_args[6];
istate_t *uspace_state;
 
/** What type of event are we stopped in or 0 if none. */
udebug_event_t cur_event;
200,7 → 192,7
unative_t a4, unative_t a5, unative_t a6, unative_t id, unative_t rc,
bool end_variant);
 
void udebug_thread_b_event(struct thread *t);
void udebug_thread_b_event_attach(struct thread *t, struct task *ta);
void udebug_thread_e_event(void);
 
void udebug_stoppable_begin(void);
/branches/sparc/kernel/generic/include/ddi/irq.h
83,6 → 83,9
struct irq;
typedef void (* irq_handler_t)(struct irq *irq, void *arg, ...);
 
/** Type for function used to clear the interrupt. */
typedef void (* cir_t)(void *arg, inr_t inr);
 
/** IPC notification config structure.
*
* Primarily, this structure is encapsulated in the irq_t structure.
144,6 → 147,11
/** Argument for the handler. */
void *arg;
 
/** Clear interrupt routine. */
cir_t cir;
/** First argument to the clear interrupt routine. */
void *cir_arg;
 
/** Notification configuration structure. */
ipc_notif_cfg_t notif_cfg;
} irq_t;
/branches/sparc/kernel/generic/include/adt/bitmap.h
49,6 → 49,14
extern void bitmap_clear_range(bitmap_t *bitmap, index_t start, count_t bits);
extern void bitmap_copy(bitmap_t *dst, bitmap_t *src, count_t bits);
 
static inline int bitmap_get(bitmap_t *bitmap,index_t bit)
{
if(bit >= bitmap->bits)
return 0;
return !! ((bitmap->map)[bit/8] & (1 << (bit & 7)));
}
 
 
#endif
 
/** @}
/branches/sparc/kernel/generic/include/ipc/kbox.h
37,6 → 37,18
 
#include <typedefs.h>
 
/** Kernel answerbox structure. */
typedef struct kbox {
/** The answerbox itself. */
answerbox_t box;
/** Thread used to service the answerbox. */
struct thread *thread;
/** Kbox thread creation vs. begin of cleanup mutual exclusion. */
mutex_t cleanup_lock;
/** True if cleanup of kbox has already started. */
bool finished;
} kbox_t;
 
extern int ipc_connect_kbox(task_id_t);
extern void ipc_kbox_cleanup(void);
 
/branches/sparc/kernel/generic/src/synch/futex.c
115,6 → 115,7
uintptr_t paddr;
pte_t *t;
ipl_t ipl;
int rc;
ipl = interrupts_disable();
 
134,9 → 135,17
interrupts_restore(ipl);
 
futex = futex_find(paddr);
return (unative_t) waitq_sleep_timeout(&futex->wq, usec, flags |
 
#ifdef CONFIG_UDEBUG
udebug_stoppable_begin();
#endif
rc = waitq_sleep_timeout(&futex->wq, usec, flags |
SYNCH_FLAGS_INTERRUPTIBLE);
 
#ifdef CONFIG_UDEBUG
udebug_stoppable_end();
#endif
return (unative_t) rc;
}
 
/** Wakeup one thread waiting in futex wait queue.
/branches/sparc/kernel/generic/src/interrupt/interrupt.c
86,8 → 86,17
void exc_dispatch(int n, istate_t *istate)
{
ASSERT(n < IVT_ITEMS);
 
#ifdef CONFIG_UDEBUG
if (THREAD) THREAD->udebug.uspace_state = istate;
#endif
exc_table[n].f(n + IVT_FIRST, istate);
 
#ifdef CONFIG_UDEBUG
if (THREAD) THREAD->udebug.uspace_state = NULL;
#endif
 
/* This is a safe place to exit exiting thread */
if (THREAD && THREAD->interrupted && istate_from_uspace(istate))
thread_exit();
/branches/sparc/kernel/generic/src/time/clock.c
190,6 → 190,14
if (!ticks && !PREEMPTION_DISABLED) {
scheduler();
#ifdef CONFIG_UDEBUG
/*
* Give udebug chance to stop the thread
* before it begins executing.
*/
if (istate_from_uspace(THREAD->udebug.uspace_state))
udebug_before_thread_runs();
#endif
}
}
 
/branches/sparc/kernel/generic/src/ddi/irq.c
145,6 → 145,8
irq->claim = NULL;
irq->handler = NULL;
irq->arg = NULL;
irq->cir = NULL;
irq->cir_arg = NULL;
irq->notif_cfg.notify = false;
irq->notif_cfg.answerbox = NULL;
irq->notif_cfg.code = NULL;
/branches/sparc/kernel/generic/src/proc/task.c
164,10 → 164,10
udebug_task_init(&ta->udebug);
 
/* Init kbox stuff */
ipc_answerbox_init(&ta->kernel_box, ta);
ta->kb_thread = NULL;
mutex_initialize(&ta->kb_cleanup_lock, MUTEX_PASSIVE);
ta->kb_finished = false;
ipc_answerbox_init(&ta->kb.box, ta);
ta->kb.thread = NULL;
mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
ta->kb.finished = false;
#endif
 
ipc_answerbox_init(&ta->answerbox, ta);
/branches/sparc/kernel/generic/src/proc/thread.c
763,14 → 763,20
return (unative_t) rc;
}
}
#ifdef CONFIG_UDEBUG
/*
* Generate udebug THREAD_B event and attach the thread.
* This must be done atomically (with the debug locks held),
* otherwise we would either miss some thread or receive
* THREAD_B events for threads that already existed
* and could be detected with THREAD_READ before.
*/
udebug_thread_b_event_attach(t, TASK);
#else
thread_attach(t, TASK);
#endif
thread_ready(t);
 
#ifdef CONFIG_UDEBUG
/* Generate udebug THREAD_B event */
udebug_thread_b_event(t);
#endif
 
return 0;
} else
free(kernel_uarg);
/branches/sparc/kernel/generic/src/mm/tlb.c
134,9 → 134,7
 
void tlb_shootdown_ipi_send(void)
{
#ifndef ia64
ipi_broadcast(VECTOR_TLB_SHOOTDOWN_IPI);
#endif
}
 
/** Receive TLB shootdown message. */
/branches/sparc/kernel/generic/src/syscall/syscall.c
105,11 → 105,7
#ifdef CONFIG_UDEBUG
udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, 0, false);
#endif
 
if (id < SYSCALL_END) {
#ifdef CONFIG_UDEBUG
udebug_stoppable_begin();
#endif
rc = syscall_table[id](a1, a2, a3, a4, a5, a6);
} else {
printf("Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id);
122,9 → 118,14
 
#ifdef CONFIG_UDEBUG
udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, rc, true);
 
/*
* Stopping point needed for tasks that only invoke non-blocking
* system calls.
*/
udebug_stoppable_begin();
udebug_stoppable_end();
#endif
#endif
return rc;
}
 
/branches/sparc/kernel/generic/src/ipc/kbox.c
48,14 → 48,20
ipl_t ipl;
bool have_kb_thread;
 
/* Only hold kb_cleanup_lock while setting kb_finished - this is enough */
mutex_lock(&TASK->kb_cleanup_lock);
TASK->kb_finished = true;
mutex_unlock(&TASK->kb_cleanup_lock);
/*
* Only hold kb.cleanup_lock while setting kb.finished -
* this is enough.
*/
mutex_lock(&TASK->kb.cleanup_lock);
TASK->kb.finished = true;
mutex_unlock(&TASK->kb.cleanup_lock);
 
have_kb_thread = (TASK->kb_thread != NULL);
have_kb_thread = (TASK->kb.thread != NULL);
 
/* From now on nobody will try to connect phones or attach kbox threads */
/*
* From now on nobody will try to connect phones or attach
* kbox threads
*/
 
/*
* Disconnect all phones connected to our kbox. Passing true for
63,7 → 69,7
* disconnected phone. This ensures the kbox thread is going to
* wake up and terminate.
*/
ipc_answerbox_slam_phones(&TASK->kernel_box, have_kb_thread);
ipc_answerbox_slam_phones(&TASK->kb.box, have_kb_thread);
 
/*
* If the task was being debugged, clean up debugging session.
77,18 → 83,18
interrupts_restore(ipl);
if (have_kb_thread) {
LOG("join kb_thread..\n");
thread_join(TASK->kb_thread);
thread_detach(TASK->kb_thread);
LOG("join kb.thread..\n");
thread_join(TASK->kb.thread);
thread_detach(TASK->kb.thread);
LOG("join done\n");
TASK->kb_thread = NULL;
TASK->kb.thread = NULL;
}
 
/* Answer all messages in 'calls' and 'dispatched_calls' queues */
spinlock_lock(&TASK->kernel_box.lock);
ipc_cleanup_call_list(&TASK->kernel_box.dispatched_calls);
ipc_cleanup_call_list(&TASK->kernel_box.calls);
spinlock_unlock(&TASK->kernel_box.lock);
/* Answer all messages in 'calls' and 'dispatched_calls' queues. */
spinlock_lock(&TASK->kb.box.lock);
ipc_cleanup_call_list(&TASK->kb.box.dispatched_calls);
ipc_cleanup_call_list(&TASK->kb.box.calls);
spinlock_unlock(&TASK->kb.box.lock);
}
 
/** Handle hangup message in kbox.
105,7 → 111,7
 
/* Was it our debugger, who hung up? */
if (call->sender == TASK->udebug.debugger) {
/* Terminate debugging session (if any) */
/* Terminate debugging session (if any). */
LOG("kbox: terminate debug session\n");
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
118,7 → 124,7
 
LOG("kbox: continue with hangup message\n");
IPC_SET_RETVAL(call->data, 0);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
 
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
130,13 → 136,13
*/
 
/* Only detach kbox thread unless already terminating. */
mutex_lock(&TASK->kb_cleanup_lock);
if (&TASK->kb_finished == false) {
mutex_lock(&TASK->kb.cleanup_lock);
if (&TASK->kb.finished == false) {
/* Detach kbox thread so it gets freed from memory. */
thread_detach(TASK->kb_thread);
TASK->kb_thread = NULL;
thread_detach(TASK->kb.thread);
TASK->kb.thread = NULL;
}
mutex_unlock(&TASK->kb_cleanup_lock);
mutex_unlock(&TASK->kb.cleanup_lock);
 
LOG("phone list is empty\n");
*last = true;
166,7 → 172,7
done = false;
 
while (!done) {
call = ipc_wait_for_call(&TASK->kernel_box, SYNCH_NO_TIMEOUT,
call = ipc_wait_for_call(&TASK->kb.box, SYNCH_NO_TIMEOUT,
SYNCH_FLAGS_NONE);
 
if (call == NULL)
201,10 → 207,10
/**
* Connect phone to a task kernel-box specified by id.
*
* Note that this is not completely atomic. For optimisation reasons,
* The task might start cleaning up kbox after the phone has been connected
* and before a kbox thread has been created. This must be taken into account
* in the cleanup code.
* Note that this is not completely atomic. For optimisation reasons, the task
* might start cleaning up kbox after the phone has been connected and before
* a kbox thread has been created. This must be taken into account in the
* cleanup code.
*
* @return Phone id on success, or negative error code.
*/
230,44 → 236,45
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
 
mutex_lock(&ta->kb_cleanup_lock);
mutex_lock(&ta->kb.cleanup_lock);
 
if (atomic_predec(&ta->refcount) == 0) {
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
task_destroy(ta);
return ENOENT;
}
 
if (ta->kb_finished != false) {
mutex_unlock(&ta->kb_cleanup_lock);
if (ta->kb.finished != false) {
mutex_unlock(&ta->kb.cleanup_lock);
return EINVAL;
}
 
newphid = phone_alloc();
if (newphid < 0) {
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
return ELIMIT;
}
 
/* Connect the newly allocated phone to the kbox */
ipc_phone_connect(&TASK->phones[newphid], &ta->kernel_box);
ipc_phone_connect(&TASK->phones[newphid], &ta->kb.box);
 
if (ta->kb_thread != NULL) {
mutex_unlock(&ta->kb_cleanup_lock);
if (ta->kb.thread != NULL) {
mutex_unlock(&ta->kb.cleanup_lock);
return newphid;
}
 
/* Create a kbox thread */
kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0, "kbox", false);
kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0,
"kbox", false);
if (!kb_thread) {
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
return ENOMEM;
}
 
ta->kb_thread = kb_thread;
ta->kb.thread = kb_thread;
thread_ready(kb_thread);
 
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
 
return newphid;
}
/branches/sparc/kernel/generic/src/ipc/sysipc.c
455,10 → 455,17
IPC_SET_ARG5(call.data, 0);
 
if (!(res = request_preprocess(&call, phone))) {
#ifdef CONFIG_UDEBUG
udebug_stoppable_begin();
#endif
rc = ipc_call_sync(phone, &call);
#ifdef CONFIG_UDEBUG
udebug_stoppable_end();
#endif
if (rc != EOK)
return rc;
process_answer(&call);
 
} else {
IPC_SET_RETVAL(call.data, res);
}
495,7 → 502,13
GET_CHECK_PHONE(phone, phoneid, return ENOENT);
 
if (!(res = request_preprocess(&call, phone))) {
#ifdef CONFIG_UDEBUG
udebug_stoppable_begin();
#endif
rc = ipc_call_sync(phone, &call);
#ifdef CONFIG_UDEBUG
udebug_stoppable_end();
#endif
if (rc != EOK)
return rc;
process_answer(&call);
798,9 → 811,17
{
call_t *call;
 
restart:
restart:
 
#ifdef CONFIG_UDEBUG
udebug_stoppable_begin();
#endif
call = ipc_wait_for_call(&TASK->answerbox, usec,
flags | SYNCH_FLAGS_INTERRUPTIBLE);
 
#ifdef CONFIG_UDEBUG
udebug_stoppable_end();
#endif
if (!call)
return 0;
 
/branches/sparc/kernel/generic/src/ipc/irq.c
100,7 → 100,7
*((uint64_t *) code->cmds[i].addr) =
code->cmds[i].value;
break;
#if defined(ia32) || defined(amd64)
#if defined(ia32) || defined(amd64) || defined(ia64)
case CMD_PORT_READ_1:
dstval = inb((long) code->cmds[i].addr);
break;
/branches/sparc/kernel/generic/src/udebug/udebug_ipc.c
73,7 → 73,7
rc = udebug_begin(call);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
83,7 → 83,7
*/
if (rc != 0) {
IPC_SET_RETVAL(call->data, 0);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
}
 
99,7 → 99,7
rc = udebug_end();
 
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process a SET_EVMASK call.
116,7 → 116,7
rc = udebug_set_evmask(mask);
 
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
 
135,7 → 135,7
rc = udebug_go(t, call);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
}
154,7 → 154,7
 
rc = udebug_stop(t, call);
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process a THREAD_READ call.
182,7 → 182,7
rc = udebug_thread_read(&buffer, buf_size, &n);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
209,7 → 209,7
IPC_SET_ARG3(call->data, total_bytes);
call->buffer = buffer;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process an ARGS_READ call.
229,7 → 229,7
rc = udebug_args_read(t, &buffer);
if (rc != EOK) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
247,7 → 247,7
IPC_SET_ARG2(call->data, 6 * sizeof(unative_t));
call->buffer = buffer;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process an MEM_READ call.
270,7 → 270,7
rc = udebug_mem_read(uspace_src, size, &buffer);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
282,7 → 282,7
IPC_SET_ARG2(call->data, size);
call->buffer = buffer;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Handle a debug call received on the kernel answerbox.
306,7 → 306,7
*/
if (TASK->udebug.debugger != call->sender) {
IPC_SET_RETVAL(call->data, EINVAL);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
}
/branches/sparc/kernel/generic/src/udebug/udebug.c
35,18 → 35,6
* @brief Udebug hooks and data structure management.
*
* Udebug is an interface that makes userspace debuggers possible.
*
* Functions in this file are executed directly in each thread, which
* may or may not be the subject of debugging. The udebug_stoppable_begin/end()
* functions are also executed in the clock interrupt handler. To avoid
* deadlock, functions in this file are protected from the interrupt
* by locking the recursive lock THREAD->udebug.int_lock (just an atomic
* variable). This prevents udebug_stoppable_begin/end() from being
* executed in the interrupt handler (they are skipped).
*
* Functions in udebug_ops.c and udebug_ipc.c execute in different threads,
* so they needn't be protected from the (preemptible) interrupt-initiated
* code.
*/
#include <synch/waitq.h>
55,16 → 43,7
#include <errno.h>
#include <arch.h>
 
static inline void udebug_int_lock(void)
{
atomic_inc(&THREAD->udebug.int_lock);
}
 
static inline void udebug_int_unlock(void)
{
atomic_dec(&THREAD->udebug.int_lock);
}
 
/** Initialize udebug part of task structure.
*
* Called as part of task structure initialization.
89,12 → 68,8
mutex_initialize(&ut->lock, MUTEX_PASSIVE);
waitq_initialize(&ut->go_wq);
 
/*
* At the beginning the thread is stoppable, so int_lock be set, too.
*/
atomic_set(&ut->int_lock, 1);
 
ut->go_call = NULL;
ut->uspace_state = NULL;
ut->go = false;
ut->stoppable = true;
ut->debug_active = false;
161,11 → 136,8
ASSERT(THREAD);
ASSERT(TASK);
 
udebug_int_lock();
 
/* Early check for undebugged tasks */
if (!udebug_thread_precheck()) {
udebug_int_unlock();
return;
}
 
231,7 → 203,6
{
/* Early check for undebugged tasks */
if (!udebug_thread_precheck()) {
udebug_int_unlock();
return;
}
 
257,44 → 228,17
mutex_unlock(&THREAD->udebug.lock);
mutex_unlock(&TASK->udebug.lock);
}
 
udebug_int_unlock();
}
 
/** Upon being scheduled to run, check if the current thread should stop.
*
* This function is called from clock(). Preemption is enabled.
* interrupts are disabled, but since this is called after
* being scheduled-in, we can enable them, if we're careful enough
* not to allow arbitrary recursion or deadlock with the thread context.
* This function is called from clock().
*/
void udebug_before_thread_runs(void)
{
ipl_t ipl;
 
return;
ASSERT(!PREEMPTION_DISABLED);
 
/*
* Prevent agains re-entering, such as when preempted inside this
* function.
*/
if (atomic_get(&THREAD->udebug.int_lock) != 0)
return;
 
udebug_int_lock();
 
ipl = interrupts_enable();
 
/* Now we're free to do whatever we need (lock mutexes, sleep, etc.) */
 
/* Check if we're supposed to stop */
udebug_stoppable_begin();
udebug_stoppable_end();
 
interrupts_restore(ipl);
 
udebug_int_unlock();
}
 
/** Syscall event hook.
311,11 → 255,8
 
etype = end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B;
 
udebug_int_lock();
 
/* Early check for undebugged tasks */
if (!udebug_thread_precheck()) {
udebug_int_unlock();
return;
}
 
362,27 → 303,33
mutex_unlock(&TASK->udebug.lock);
 
udebug_wait_for_go(&THREAD->udebug.go_wq);
 
udebug_int_unlock();
}
 
/** Thread-creation event hook.
/** Thread-creation event hook combined with attaching the thread.
*
* Must be called when a new userspace thread is created in the debugged
* task. Generates a THREAD_B event.
* task. Generates a THREAD_B event. Also attaches the thread @a t
* to the task @a ta.
*
* This is necessary to avoid a race condition where the BEGIN and THREAD_READ
* requests would be handled inbetween attaching the thread and checking it
* for being in a debugging session to send the THREAD_B event. We could then
* either miss threads or get some threads both in the thread list
* and get a THREAD_B event for them.
*
* @param t Structure of the thread being created. Not locked, as the
* thread is not executing yet.
* @param ta Task to which the thread should be attached.
*/
void udebug_thread_b_event(struct thread *t)
void udebug_thread_b_event_attach(struct thread *t, struct task *ta)
{
call_t *call;
 
udebug_int_lock();
 
mutex_lock(&TASK->udebug.lock);
mutex_lock(&THREAD->udebug.lock);
 
thread_attach(t, ta);
 
LOG("udebug_thread_b_event\n");
LOG("- check state\n");
 
419,8 → 366,6
 
LOG("- sleep\n");
udebug_wait_for_go(&THREAD->udebug.go_wq);
 
udebug_int_unlock();
}
 
/** Thread-termination event hook.
432,8 → 377,6
{
call_t *call;
 
udebug_int_lock();
 
mutex_lock(&TASK->udebug.lock);
mutex_lock(&THREAD->udebug.lock);
 
467,8 → 410,10
mutex_unlock(&THREAD->udebug.lock);
mutex_unlock(&TASK->udebug.lock);
 
/* Leave int_lock enabled. */
/* This event does not sleep - debugging has finished in this thread. */
/*
* This event does not sleep - debugging has finished
* in this thread.
*/
}
 
/**
491,8 → 436,6
LOG("udebug_task_cleanup()\n");
LOG("task %" PRIu64 "\n", ta->taskid);
 
udebug_int_lock();
 
if (ta->udebug.dt_state != UDEBUG_TS_BEGINNING &&
ta->udebug.dt_state != UDEBUG_TS_ACTIVE) {
LOG("udebug_task_cleanup(): task not being debugged\n");
554,8 → 497,6
ta->udebug.dt_state = UDEBUG_TS_INACTIVE;
ta->udebug.debugger = NULL;
 
udebug_int_unlock();
 
return 0;
}
 
/branches/sparc/kernel/generic/src/udebug/udebug_ops.c
317,7 → 317,6
int rc;
 
LOG("udebug_stop()\n");
mutex_lock(&TASK->udebug.lock);
 
/*
* On success, this will lock t->udebug.lock. Note that this makes sure
354,6 → 353,7
 
_thread_op_end(t);
 
mutex_lock(&TASK->udebug.lock);
ipc_answer(&TASK->answerbox, call);
mutex_unlock(&TASK->udebug.lock);
 
/branches/sparc/kernel/Makefile
126,6 → 126,18
DEFS += -DCONFIG_NS16550
endif
 
ifeq ($(CONFIG_I8042_INTERRUPT_DRIVEN),y)
DEFS += -DCONFIG_I8042_INTERRUPT_DRIVEN
endif
 
ifeq ($(CONFIG_NS16550_INTERRUPT_DRIVEN),y)
DEFS += -DCONFIG_NS16550_INTERRUPT_DRIVEN
endif
 
ifeq ($(CONFIG_IOSAPIC),y)
DEFS += -DCONFIG_IOSAPIC
endif
 
ifeq ($(CONFIG_VIRT_IDX_DCACHE),y)
DEFS += -DCONFIG_VIRT_IDX_DCACHE
endif
/branches/sparc/kernel/arch/sparc64/include/drivers/pci.h
51,8 → 51,8
};
 
struct pci_operations {
void (* enable_interrupt)(pci_t *pci, int inr);
void (* clear_interrupt)(pci_t *pci, int inr);
void (* enable_interrupt)(pci_t *, int);
void (* clear_interrupt)(pci_t *, int);
};
 
struct pci {
61,9 → 61,9
volatile uint64_t *reg; /**< Registers including interrupt registers. */
};
 
extern pci_t *pci_init(ofw_tree_node_t *node);
extern void pci_enable_interrupt(pci_t *pci, int inr);
extern void pci_clear_interrupt(pci_t *pci, int inr);
extern pci_t *pci_init(ofw_tree_node_t *);
extern void pci_enable_interrupt(pci_t *, int);
extern void pci_clear_interrupt(void *, int);
 
#endif
 
/branches/sparc/kernel/arch/sparc64/include/drivers/fhc.h
44,9 → 44,9
 
extern fhc_t *central_fhc;
 
extern fhc_t *fhc_init(ofw_tree_node_t *node);
extern void fhc_enable_interrupt(fhc_t *fhc, int inr);
extern void fhc_clear_interrupt(fhc_t *fhc, int inr);
extern fhc_t *fhc_init(ofw_tree_node_t *);
extern void fhc_enable_interrupt(fhc_t *, int);
extern void fhc_clear_interrupt(void *, int);
 
#endif
 
/branches/sparc/kernel/arch/sparc64/src/console.c
145,16 → 145,27
}
#endif
 
#ifdef CONFIG_NS16550
#ifdef CONFIG_NS16550_INTERRUPT_DRIVEN
if (kbd_type == KBD_NS16550) {
/*
* The ns16550 driver is interrupt-driven.
*/
return;
}
#endif
#endif
while (1) {
#ifdef CONFIG_NS16550
#ifndef CONFIG_NS16550_INTERRUPT_DRIVEN
if (kbd_type == KBD_NS16550)
ns16550_poll();
#endif
#endif
#ifdef CONFIG_SGCN
if (kbd_type == KBD_SGCN)
sgcn_poll();
#endif
 
thread_usleep(KEYBOARD_POLL_PAUSE);
}
}
/branches/sparc/kernel/arch/sparc64/src/trap/interrupt.c
87,6 → 87,12
* The IRQ handler was found.
*/
irq->handler(irq, irq->arg);
/*
* See if there is a clear-interrupt-routine and call it.
*/
if (irq->cir) {
irq->cir(irq->cir_arg, irq->inr);
}
spinlock_unlock(&irq->lock);
} else if (data0 > config.base) {
/*
106,7 → 112,7
*/
#ifdef CONFIG_DEBUG
printf("cpu%u: spurious interrupt (intrcv=%#" PRIx64
", data0=%#" PRIx64 ")\n", CPU->id, intrcv, data0);
", data0=%#" PRIx64 ")\n", CPU->id, intrcv, data0);
#endif
}
 
/branches/sparc/kernel/arch/sparc64/src/drivers/fhc.c
101,8 → 101,9
}
}
 
void fhc_clear_interrupt(fhc_t *fhc, int inr)
void fhc_clear_interrupt(void *fhcp, int inr)
{
fhc_t *fhc = (fhc_t *)fhcp;
ASSERT(fhc->uart_imap);
 
switch (inr) {
/branches/sparc/kernel/arch/sparc64/src/drivers/kbd.c
63,6 → 63,8
uintptr_t aligned_addr;
ofw_tree_property_t *prop;
const char *name;
cir_t cir;
void *cir_arg;
name = ofw_tree_node_name(node);
103,11 → 105,14
switch (kbd_type) {
case KBD_Z8530:
size = ((ofw_fhc_reg_t *) prop->value)->size;
if (!ofw_fhc_apply_ranges(node->parent, ((ofw_fhc_reg_t *) prop->value) , &pa)) {
if (!ofw_fhc_apply_ranges(node->parent,
((ofw_fhc_reg_t *) prop->value), &pa)) {
printf("Failed to determine keyboard address.\n");
return;
}
if (!ofw_fhc_map_interrupt(node->parent, ((ofw_fhc_reg_t *) prop->value), interrupts, &inr)) {
if (!ofw_fhc_map_interrupt(node->parent,
((ofw_fhc_reg_t *) prop->value), interrupts, &inr, &cir,
&cir_arg)) {
printf("Failed to determine keyboard interrupt.\n");
return;
}
115,11 → 120,14
case KBD_NS16550:
size = ((ofw_ebus_reg_t *) prop->value)->size;
if (!ofw_ebus_apply_ranges(node->parent, ((ofw_ebus_reg_t *) prop->value) , &pa)) {
if (!ofw_ebus_apply_ranges(node->parent,
((ofw_ebus_reg_t *) prop->value), &pa)) {
printf("Failed to determine keyboard address.\n");
return;
}
if (!ofw_ebus_map_interrupt(node->parent, ((ofw_ebus_reg_t *) prop->value), interrupts, &inr)) {
if (!ofw_ebus_map_interrupt(node->parent,
((ofw_ebus_reg_t *) prop->value), interrupts, &inr, &cir,
&cir_arg)) {
printf("Failed to determine keyboard interrupt.\n");
return;
};
142,16 → 150,17
switch (kbd_type) {
#ifdef CONFIG_Z8530
case KBD_Z8530:
z8530_init(devno, inr, vaddr);
z8530_init(devno, vaddr, inr, cir, cir_arg);
break;
#endif
#ifdef CONFIG_NS16550
case KBD_NS16550:
ns16550_init(devno, inr, (ioport_t)vaddr);
ns16550_init(devno, (ioport_t)vaddr, inr, cir, cir_arg);
break;
#endif
default:
printf("Kernel is not compiled with the necessary keyboard driver this machine requires.\n");
printf("Kernel is not compiled with the necessary keyboard "
"driver this machine requires.\n");
}
}
 
/branches/sparc/kernel/arch/sparc64/src/drivers/pci.c
45,40 → 45,37
#include <func.h>
#include <arch/asm.h>
 
#define PCI_SABRE_REGS_REG 0
#define SABRE_INTERNAL_REG 0
#define PSYCHO_INTERNAL_REG 2
 
#define PCI_SABRE_IMAP_BASE 0x200
#define PCI_SABRE_ICLR_BASE 0x300
#define OBIO_IMR_BASE 0x200
#define OBIO_IMR(ino) (OBIO_IMR_BASE + ((ino) & INO_MASK))
 
#define PCI_PSYCHO_REGS_REG 2
#define OBIO_CIR_BASE 0x300
#define OBIO_CIR(ino) (OBIO_CIR_BASE + ((ino) & INO_MASK))
 
#define PCI_PSYCHO_IMAP_BASE 0x200
#define PCI_PSYCHO_ICLR_BASE 0x300
static void obio_enable_interrupt(pci_t *, int);
static void obio_clear_interrupt(pci_t *, int);
 
static pci_t *pci_sabre_init(ofw_tree_node_t *node);
static void pci_sabre_enable_interrupt(pci_t *pci, int inr);
static void pci_sabre_clear_interrupt(pci_t *pci, int inr);
static pci_t *pci_sabre_init(ofw_tree_node_t *);
static pci_t *pci_psycho_init(ofw_tree_node_t *);
 
static pci_t *pci_psycho_init(ofw_tree_node_t *node);
static void pci_psycho_enable_interrupt(pci_t *pci, int inr);
static void pci_psycho_clear_interrupt(pci_t *pci, int inr);
 
/** PCI operations for Sabre model. */
static pci_operations_t pci_sabre_ops = {
.enable_interrupt = pci_sabre_enable_interrupt,
.clear_interrupt = pci_sabre_clear_interrupt
.enable_interrupt = obio_enable_interrupt,
.clear_interrupt = obio_clear_interrupt
};
/** PCI operations for Psycho model. */
static pci_operations_t pci_psycho_ops = {
.enable_interrupt = pci_psycho_enable_interrupt,
.clear_interrupt = pci_psycho_clear_interrupt
.enable_interrupt = obio_enable_interrupt,
.clear_interrupt = obio_clear_interrupt
};
 
/** Initialize PCI controller (model Sabre).
*
* @param node OpenFirmware device tree node of the Sabre.
* @param node OpenFirmware device tree node of the Sabre.
*
* @return Address of the initialized PCI structure.
* @return Address of the initialized PCI structure.
*/
pci_t *pci_sabre_init(ofw_tree_node_t *node)
{
95,11 → 92,12
ofw_upa_reg_t *reg = prop->value;
count_t regs = prop->size / sizeof(ofw_upa_reg_t);
 
if (regs < PCI_SABRE_REGS_REG + 1)
if (regs < SABRE_INTERNAL_REG + 1)
return NULL;
 
uintptr_t paddr;
if (!ofw_upa_apply_ranges(node->parent, &reg[PCI_SABRE_REGS_REG], &paddr))
if (!ofw_upa_apply_ranges(node->parent, &reg[SABRE_INTERNAL_REG],
&paddr))
return NULL;
 
pci = (pci_t *) malloc(sizeof(pci_t), FRAME_ATOMIC);
108,7 → 106,7
 
pci->model = PCI_SABRE;
pci->op = &pci_sabre_ops;
pci->reg = (uint64_t *) hw_map(paddr, reg[PCI_SABRE_REGS_REG].size);
pci->reg = (uint64_t *) hw_map(paddr, reg[SABRE_INTERNAL_REG].size);
 
return pci;
}
116,9 → 114,9
 
/** Initialize the Psycho PCI controller.
*
* @param node OpenFirmware device tree node of the Psycho.
* @param node OpenFirmware device tree node of the Psycho.
*
* @return Address of the initialized PCI structure.
* @return Address of the initialized PCI structure.
*/
pci_t *pci_psycho_init(ofw_tree_node_t *node)
{
135,11 → 133,12
ofw_upa_reg_t *reg = prop->value;
count_t regs = prop->size / sizeof(ofw_upa_reg_t);
 
if (regs < PCI_PSYCHO_REGS_REG + 1)
if (regs < PSYCHO_INTERNAL_REG + 1)
return NULL;
 
uintptr_t paddr;
if (!ofw_upa_apply_ranges(node->parent, &reg[PCI_PSYCHO_REGS_REG], &paddr))
if (!ofw_upa_apply_ranges(node->parent, &reg[PSYCHO_INTERNAL_REG],
&paddr))
return NULL;
 
pci = (pci_t *) malloc(sizeof(pci_t), FRAME_ATOMIC);
148,31 → 147,21
 
pci->model = PCI_PSYCHO;
pci->op = &pci_psycho_ops;
pci->reg = (uint64_t *) hw_map(paddr, reg[PCI_PSYCHO_REGS_REG].size);
pci->reg = (uint64_t *) hw_map(paddr, reg[PSYCHO_INTERNAL_REG].size);
 
return pci;
}
 
void pci_sabre_enable_interrupt(pci_t *pci, int inr)
void obio_enable_interrupt(pci_t *pci, int inr)
{
pci->reg[PCI_SABRE_IMAP_BASE + (inr & INO_MASK)] |= IMAP_V_MASK;
pci->reg[OBIO_IMR(inr & INO_MASK)] |= IMAP_V_MASK;
}
 
void pci_sabre_clear_interrupt(pci_t *pci, int inr)
void obio_clear_interrupt(pci_t *pci, int inr)
{
pci->reg[PCI_SABRE_ICLR_BASE + (inr & INO_MASK)] = 0;
pci->reg[OBIO_CIR(inr & INO_MASK)] = 0; /* set IDLE */
}
 
void pci_psycho_enable_interrupt(pci_t *pci, int inr)
{
pci->reg[PCI_PSYCHO_IMAP_BASE + (inr & INO_MASK)] |= IMAP_V_MASK;
}
 
void pci_psycho_clear_interrupt(pci_t *pci, int inr)
{
pci->reg[PCI_PSYCHO_ICLR_BASE + (inr & INO_MASK)] = 0;
}
 
/** Initialize PCI controller. */
pci_t *pci_init(ofw_tree_node_t *node)
{
215,14 → 204,14
 
void pci_enable_interrupt(pci_t *pci, int inr)
{
ASSERT(pci->model);
ASSERT(pci->op && pci->op->enable_interrupt);
pci->op->enable_interrupt(pci, inr);
}
 
void pci_clear_interrupt(pci_t *pci, int inr)
void pci_clear_interrupt(void *pcip, int inr)
{
ASSERT(pci->model);
pci_t *pci = (pci_t *)pcip;
 
ASSERT(pci->op && pci->op->clear_interrupt);
pci->op->clear_interrupt(pci, inr);
}
/branches/sparc/kernel/arch/ia64/include/interrupt.h
50,10 → 50,13
#define IVT_FIRST 0
 
/** External Interrupt vectors. */
 
#define VECTOR_TLB_SHOOTDOWN_IPI 0xf0
#define INTERRUPT_TIMER 255
#define IRQ_KBD 241
#define IRQ_MOUSE 252
#define IRQ_KBD (0x01+LAGACY_INTERRUPT_BASE)
#define IRQ_MOUSE (0x0c+LAGACY_INTERRUPT_BASE)
#define INTERRUPT_SPURIOUS 15
#define LAGACY_INTERRUPT_BASE 0x20
 
/** General Exception codes. */
#define GE_ILLEGALOP 0
150,6 → 153,7
extern void external_interrupt(uint64_t vector, istate_t *istate);
extern void disabled_fp_register(uint64_t vector, istate_t *istate);
 
 
#endif
 
/** @}
/branches/sparc/kernel/arch/ia64/include/proc/task.h
31,14 → 31,19
*/
/** @file
*/
#include <proc/task.h>
 
#ifndef KERN_ia64_TASK_H_
#define KERN_ia64_TASK_H_
 
#include <adt/bitmap.h>
 
typedef struct {
bitmap_t *iomap;
} task_arch_t;
 
#define task_create_arch(t)
 
#define task_create_arch(t) {(t)->arch.iomap=NULL;}
#define task_destroy_arch(t)
 
#endif
/branches/sparc/kernel/arch/ia64/include/bootinfo.h
33,6 → 33,13
 
#define CONFIG_INIT_TASKS 32
 
#define MEMMAP_ITEMS 128
 
#define EFI_MEMMAP_FREE_MEM 0
#define EFI_MEMMAP_IO 1
#define EFI_MEMMAP_IO_PORTS 2
 
 
typedef struct {
void *addr;
unsigned long size;
43,14 → 50,24
binit_task_t tasks[CONFIG_INIT_TASKS];
} binit_t;
 
typedef struct {
unsigned int type;
unsigned long base;
unsigned long size;
}efi_memmap_item_t;
 
 
typedef struct {
binit_t taskmap;
 
efi_memmap_item_t memmap[MEMMAP_ITEMS];
unsigned int memmap_items;
 
unsigned long * sapic;
unsigned long sys_freq;
unsigned long freq_scale;
unsigned int wakeup_intno;
int hello_configured;
 
} bootinfo_t;
 
/branches/sparc/kernel/arch/ia64/include/mm/page.h
48,8 → 48,15
#define IO_PAGE_WIDTH 26 /* 64M */
#define FW_PAGE_WIDTH 28 /* 256M */
 
/** Staticly mapped IO spaces */
#define USPACE_IO_PAGE_WIDTH 12 /* 4K */
 
 
 
/** Staticly mapped IO spaces - offsets to 0xe...00 of virtual adresses
becauce of "minimal virtual bits implemented is 51"
it is possible to have here values up to 0x0007000000000000
*/
 
/* Firmware area (bellow 4GB in phys mem) */
#define FW_OFFSET 0x00000000F0000000
/* Legacy IO space */
/branches/sparc/kernel/arch/ia64/include/mm/tlb.h
46,8 → 46,8
/** Data and instruction Translation Register indices. */
#define DTR_KERNEL 0
#define ITR_KERNEL 0
#define DTR_KSTACK1 1
#define DTR_KSTACK2 2
#define DTR_KSTACK1 4
#define DTR_KSTACK2 5
 
/** Portion of TLB insertion format data structure. */
union tlb_entry {
/branches/sparc/kernel/arch/ia64/include/cpu.h
87,6 → 87,8
static inline void ipi_send_ipi(int id,int eid,int intno)
{
(bootinfo->sapic)[2*(id*256+eid)]=intno;
srlz_d();
 
}
 
 
/branches/sparc/kernel/arch/ia64/include/drivers/kbd.h
0,0 → 1,48
/*
* Copyright (c) 2006 Jakub Jermar, Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia6464
* @{
*/
/** @file
*/
 
#ifndef KERN_ia64_KBD_H_
#define KERN_ia64_KBD_H_
 
 
#define KBD_UNKNOWN 0
#define KBD_SKI 1
#define KBD_LEGACY 2
#define KBD_NS16550 3
 
 
#endif
 
/** @}
*/
/branches/sparc/kernel/arch/ia64/src/ia64.c
60,13 → 60,16
#include <arch/atomic.h>
#include <panic.h>
#include <print.h>
#include <sysinfo/sysinfo.h>
 
/*NS16550 as a COM 1*/
#define NS16550_IRQ 4
#define NS16550_IRQ (4+LAGACY_INTERRUPT_BASE)
#define NS16550_PORT 0x3f8
 
bootinfo_t *bootinfo;
 
static uint64_t iosapic_base=0xfec00000;
 
void arch_pre_main(void)
{
/* Setup usermode init tasks. */
110,10 → 113,40
}
 
static void iosapic_init(void)
{
 
uint64_t IOSAPIC = PA2KA((unative_t)(iosapic_base))|FW_OFFSET;
int i;
int myid,myeid;
myid=ia64_get_cpu_id();
myeid=ia64_get_cpu_eid();
 
for(i=0;i<16;i++)
{
if(i==2) continue; //Disable Cascade interrupt
((uint32_t*)(IOSAPIC+0x00))[0]=0x10+2*i;
srlz_d();
((uint32_t*)(IOSAPIC+0x10))[0]=LAGACY_INTERRUPT_BASE+i;
srlz_d();
((uint32_t*)(IOSAPIC+0x00))[0]=0x10+2*i+1;
srlz_d();
((uint32_t*)(IOSAPIC+0x10))[0]=myid<<(56-32) | myeid<<(48-32);
srlz_d();
}
 
}
 
 
void arch_post_mm_init(void)
{
if(config.cpu_active==1)
{
iosapic_init();
irq_init(INR_COUNT, INR_COUNT);
#ifdef SKI
ski_init_console();
121,7 → 154,8
ega_init();
#endif
}
it_init();
it_init();
}
 
void arch_post_cpu_init(void)
139,9 → 173,14
static void i8042_kkbdpoll(void *arg)
{
while (1) {
i8042_poll();
#ifdef CONFIG_NS16550
#ifndef CONFIG_NS16550_INTERRUPT_DRIVEN
ns16550_poll();
#endif
#else
#ifndef CONFIG_I8042_INTERRUPT_DRIVEN
i8042_poll();
#endif
#endif
thread_usleep(POLL_INTERVAL);
}
148,6 → 187,14
}
#endif
 
 
void end_of_irq_void(void *cir_arg __attribute__((unused)),inr_t inr __attribute__((unused)));
void end_of_irq_void(void *cir_arg __attribute__((unused)),inr_t inr __attribute__((unused)))
{
return;
}
 
 
void arch_post_smp_init(void)
{
 
165,13 → 212,13
 
#ifdef I460GX
devno_t kbd = device_assign_devno();
devno_t mouse = device_assign_devno();
/* keyboard controller */
i8042_init(kbd, IRQ_KBD, mouse, IRQ_MOUSE);
 
#ifdef CONFIG_NS16550
ns16550_init(kbd, NS16550_IRQ, NS16550_PORT); // as a COM 1
ns16550_init(kbd, NS16550_PORT, NS16550_IRQ,end_of_irq_void,NULL); // as a COM 1
#else
devno_t mouse = device_assign_devno();
i8042_init(kbd, IRQ_KBD, mouse, IRQ_MOUSE);
#endif
thread_t *t;
t = thread_create(i8042_kkbdpoll, NULL, TASK, 0, "kkbdpoll", true);
182,6 → 229,15
#endif
 
}
sysinfo_set_item_val("ia64_iospace", NULL, true);
sysinfo_set_item_val("ia64_iospace.address", NULL, true);
sysinfo_set_item_val("ia64_iospace.address.virtual", NULL, IO_OFFSET);
 
 
 
 
 
}
 
 
231,6 → 287,12
{
#ifdef SKI
ski_kbd_grab();
#else
#ifdef CONFIG_NS16550
ns16550_grab();
#else
i8042_grab();
#endif
#endif
}
/** Return console to userspace
240,6 → 302,13
{
#ifdef SKI
ski_kbd_release();
#else
#ifdef CONFIG_NS16550
ns16550_release();
#else
i8042_release();
#endif
 
#endif
}
 
/branches/sparc/kernel/arch/ia64/src/ski/ski.c
44,6 → 44,7
#include <proc/thread.h>
#include <synch/spinlock.h>
#include <arch/asm.h>
#include <arch/drivers/kbd.h>
 
#define SKI_KBD_INR 0
 
227,6 → 228,7
sysinfo_set_item_val("kbd", NULL, true);
sysinfo_set_item_val("kbd.inr", NULL, SKI_KBD_INR);
sysinfo_set_item_val("kbd.devno", NULL, ski_kbd_devno);
sysinfo_set_item_val("kbd.type", NULL, KBD_SKI);
}
 
void ski_kbd_grab(void)
/branches/sparc/kernel/arch/ia64/src/smp/smp.c
87,7 → 87,6
myid=ia64_get_cpu_id();
myeid=ia64_get_cpu_eid();
 
printf("Not sending to ID:%d,EID:%d",myid,myeid);
for(id=0;id<256;id++)
for(eid=0;eid<256;eid++)
97,12 → 96,28
 
void ipi_broadcast_arch(int ipi )
{
ipi_broadcast_arch_all(ipi);
int id,eid;
int myid,myeid;
myid=ia64_get_cpu_id();
myeid=ia64_get_cpu_eid();
 
//printf("Sending ipi %d on %d\n",ipi,CPU->id);
for(id=0;id<256;id++)
for(eid=0;eid<256;eid++)
if((id!=myid) || (eid!=myeid))
if(cpu_by_id_eid_list[id][eid])
ipi_send_ipi(id,eid,ipi);
 
}
 
 
void smp_init(void)
{
if(!bootinfo->hello_configured) return;
//If we have not system prepared by hello, we are not able to start AP's
//this means we are running on simulator
sapic_init();
ipi_broadcast_arch_all(bootinfo->wakeup_intno);
volatile long long brk;
115,7 → 130,6
for(eid=0;eid<256;eid++)
if(cpu_by_id_eid_list[id][eid]==1){
config.cpu_count++;
printf("Found CPU ID:%d EDI:%d\n",id,eid);
cpu_by_id_eid_list[id][eid]=2;
 
}
/branches/sparc/kernel/arch/ia64/src/ddi/ddi.c
1,5 → 1,5
/*
* Copyright (c) 2006 Jakub Jermar
* Copyright (c) 2006 Jakub Jermar, Jakub vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
35,7 → 35,12
#include <ddi/ddi.h>
#include <proc/task.h>
#include <arch/types.h>
#include <mm/slab.h>
#include <errno.h>
 
#define IO_MEMMAP_PAGES 16384
#define PORTS_PER_PAGE 4
 
/** Enable I/O space range for task.
*
* Interrupts are disabled and task is locked.
48,6 → 53,23
*/
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
 
if(!task->arch.iomap)
{
uint8_t *map;
task->arch.iomap=malloc(sizeof(bitmap_t),0);
map=malloc(BITS2BYTES(IO_MEMMAP_PAGES),0);
if(!map)
return ENOMEM;
bitmap_initialize(task->arch.iomap,map,IO_MEMMAP_PAGES);
bitmap_clear_range(task->arch.iomap,0,IO_MEMMAP_PAGES);
}
uintptr_t iopage = ioaddr / PORTS_PER_PAGE;
size = ALIGN_UP (size+ioaddr-4*iopage,PORTS_PER_PAGE);
bitmap_set_range(task->arch.iomap,iopage,size/4);
 
 
return 0;
}
 
/branches/sparc/kernel/arch/ia64/src/mm/tlb.c
475,6 → 475,73
}
}
 
 
 
static int is_io_page_accessible(int page)
{
if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
else return 0;
}
 
#define IO_FRAME_BASE 0xFFFFC000000
 
/** There is special handling of memmaped lagacy io, because
* of 4KB sized access
* only for userspace
*
* @param va virtual address of page fault
* @param istate Structure with saved interruption state.
*
*
* @return 1 on success, 0 on fail
*/
static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
{
if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
if(TASK){
uint64_t io_page=(va & ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
if(is_io_page_accessible(io_page)){
//printf("Insert %llX\n",va);
 
uint64_t page,frame;
 
page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
 
 
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = true; /* present */
entry.ma = MA_UNCACHEABLE;
entry.a = true; /* already accessed */
entry.d = true; /* already dirty */
entry.pl = PL_USER;
entry.ar = AR_READ | AR_WRITE;
entry.ppn = frame >> PPN_SHIFT; //MUSIM spocitat frame
entry.ps = USPACE_IO_PAGE_WIDTH;
dtc_mapping_insert(page, TASK->as->asid, entry); //Musim zjistit ASID
return 1;
}else {
fault_if_from_uspace(istate,"IO access fault at %p",va);
return 0;
}
} else
return 0;
else
return 0;
return 0;
 
}
 
 
 
 
/** Data TLB fault handler for faults with VHPT turned off.
*
* @param vector Interruption vector.
511,10 → 578,11
dtc_pte_copy(t);
page_table_unlock(AS, true);
} else {
page_table_unlock(AS, true);
if (try_memmap_io_insertion(va,istate)) return;
/*
* Forward the page fault to the address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
fault_if_from_uspace(istate,"Page fault at %p",va);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
/branches/sparc/kernel/arch/ia64/src/mm/frame.c
36,14 → 36,20
#include <mm/frame.h>
#include <config.h>
#include <panic.h>
#include <arch/bootinfo.h>
#include <align.h>
#include <macros.h>
 
/*
* This is Ski-specific and certainly not sufficient
* for real ia64 systems that provide memory map.
*/
#define MEMORY_SIZE (64 * 1024 * 1024)
#define MEMORY_SIZE (256 * 1024 * 1024)
#define MEMORY_BASE (0 * 64 * 1024 * 1024)
 
#define KERNEL_RESERVED_AREA_BASE (0x4400000)
#define KERNEL_RESERVED_AREA_SIZE (16*1024*1024)
 
#define ONE_TO_ONE_MAPPING_SIZE (256*1048576) // Mapped at start
 
#define ROM_BASE 0xa0000 //For ski
50,22 → 56,41
#define ROM_SIZE (384 * 1024) //For ski
void poke_char(int x,int y,char ch, char c);
 
#define MIN_ZONE_SIZE (64*1024)
 
uintptr_t last_frame;
#define MINCONF 1
 
void frame_arch_init(void)
{
 
if(config.cpu_active==1)
{
zone_create(MEMORY_BASE >> FRAME_WIDTH, SIZE2FRAMES(MEMORY_SIZE), (MEMORY_SIZE) >> FRAME_WIDTH, 0);
if(config.cpu_active==1){
unsigned int i;
for(i=0;i<bootinfo->memmap_items;i++){
if (bootinfo->memmap[i].type==EFI_MEMMAP_FREE_MEM){
uint64_t base=bootinfo->memmap[i].base;
uint64_t size=bootinfo->memmap[i].size;
uint64_t abase=ALIGN_UP(base,FRAME_SIZE);
if(size>FRAME_SIZE) size -=abase-base;
 
if(size>MIN_ZONE_SIZE) {
zone_create(abase >> FRAME_WIDTH, (size) >> FRAME_WIDTH, max(MINCONF,((abase) >> FRAME_WIDTH)), 0);
}
}
}
//zone_create(MEMORY_BASE >> FRAME_WIDTH, SIZE2FRAMES(MEMORY_SIZE), (MEMORY_SIZE) >> FRAME_WIDTH, 0);
/*
* Blacklist ROM regions.
*/
//frame_mark_unavailable(ADDR2PFN(ROM_BASE), SIZE2FRAMES(ROM_SIZE));
frame_mark_unavailable(ADDR2PFN(ROM_BASE), SIZE2FRAMES(ROM_SIZE));
 
frame_mark_unavailable(ADDR2PFN(0), SIZE2FRAMES(1048576));
last_frame=SIZE2FRAMES((VRN_KERNEL<<VRN_SHIFT)+ONE_TO_ONE_MAPPING_SIZE);
frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE), SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE));
}
}
 
/branches/sparc/kernel/arch/ia64/src/interrupt.c
53,6 → 53,7
#include <ipc/irq.h>
#include <ipc/ipc.h>
#include <synch/spinlock.h>
#include <mm/tlb.h>
 
#define VECTORS_64_BUNDLE 20
#define VECTORS_16_BUNDLE 48
234,19 → 235,19
vector_to_string(vector));
}
 
static void end_of_local_irq(void)
{
asm volatile ("mov cr.eoi=r0;;");
}
 
 
void external_interrupt(uint64_t vector, istate_t *istate)
{
irq_t *irq;
cr_ivr_t ivr;
ivr.value = ivr_read();
srlz_d();
 
irq = irq_dispatch_and_lock(ivr.vector);
if (irq) {
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
switch (ivr.vector) {
case INTERRUPT_SPURIOUS:
#ifdef CONFIG_DEBUG
254,12 → 255,60
#endif
break;
 
#ifdef CONFIG_SMP
case VECTOR_TLB_SHOOTDOWN_IPI:
tlb_shootdown_ipi_recv();
end_of_local_irq();
break;
#endif
 
case INTERRUPT_TIMER:
{
 
irq_t *irq = irq_dispatch_and_lock(ivr.vector);
if (irq) {
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
panic("\nUnhandled Internal Timer Interrupt (%d)\n",ivr.vector);
}
}
break;
default:
panic("\nUnhandled External Interrupt Vector %d\n",
ivr.vector);
{
 
int ack=false;
irq_t *irq = irq_dispatch_and_lock(ivr.vector);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
end_of_local_irq();
ack=true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
/*
* Unhandled interrupt.
*/
end_of_local_irq();
ack=true;
#ifdef CONFIG_DEBUG
printf("\nUnhandled External Interrupt Vector %d\n",ivr.vector);
#endif
}
if(!ack) end_of_local_irq();
 
}
 
 
break;
}
}
}
 
/** @}
/branches/sparc/kernel/arch/ia64/src/drivers/ega.c
46,12 → 46,18
#include <console/console.h>
#include <sysinfo/sysinfo.h>
#include <arch/drivers/ega.h>
#include <ddi/ddi.h>
 
 
/*
* The EGA driver.
* Simple and short. Function for displaying characters and "scrolling".
*/
 
 
static parea_t ega_parea; /**< Physical memory area for EGA video RAM. */
 
 
SPINLOCK_INITIALIZE(egalock);
static uint32_t ega_cursor;
static uint8_t *videoram;
75,12 → 81,21
 
chardev_initialize("ega_out", &ega_console, &ega_ops);
stdout = &ega_console;
 
 
ega_parea.pbase = VIDEORAM & 0xffffffff;
ega_parea.vbase = (uintptr_t) videoram;
ega_parea.frames = 1;
ega_parea.cacheable = false;
ddi_parea_register(&ega_parea);
 
sysinfo_set_item_val("fb", NULL, true);
sysinfo_set_item_val("fb.kind", NULL, 2);
sysinfo_set_item_val("fb.width", NULL, ROW);
sysinfo_set_item_val("fb.height", NULL, ROWS);
sysinfo_set_item_val("fb.address.physical", NULL, VIDEORAM);
sysinfo_set_item_val("fb.blinking", NULL, true);
sysinfo_set_item_val("fb.address.physical", NULL, VIDEORAM & 0xffffffff);
#ifndef CONFIG_FB
putchar('\n');
/branches/sparc/kernel/arch/ia32/src/asm.S
158,6 → 158,7
*/
.global sysenter_handler
sysenter_handler:
sti
pushl %ebp # remember user stack
pushl %edi # remember return user address