//kernel/trunk/generic/src/adt/bitmap.c |
---|
0,0 → 1,165 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file bitmap.c |
* @brief Implementation of bitmap ADT. |
* |
* This file implements bitmap ADT and provides functions for |
* setting and clearing ranges of bits. |
*/ |
#include <adt/bitmap.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <align.h> |
#include <debug.h> |
#include <macros.h> |
#define ALL_ONES 0xff |
#define ALL_ZEROES 0x00 |
/** Initialize bitmap. |
* |
* No portion of the bitmap is set or cleared by this function. |
* |
* @param bitmap Bitmap structure. |
* @param map Address of the memory used to hold the map. |
* @param bits Number of bits stored in bitmap. |
*/ |
void bitmap_initialize(bitmap_t *bitmap, __u8 *map, count_t bits) |
{ |
bitmap->map = map; |
bitmap->bits = bits; |
} |
/** Set range of bits. |
* |
* @param bitmap Bitmap structure. |
* @param start Starting bit. |
* @param bits Number of bits to set. |
*/ |
void bitmap_set_range(bitmap_t *bitmap, index_t start, count_t bits) |
{ |
index_t i; |
index_t aligned_start; |
count_t lub; /* leading unaligned bits */ |
count_t amb; /* aligned middle bits */ |
count_t tab; /* trailing aligned bits */ |
ASSERT(start + bits <= bitmap->bits); |
aligned_start = ALIGN_UP(start, 8); |
lub = min(aligned_start - start, bits); |
amb = bits > lub ? bits - lub : 0; |
tab = amb % 8; |
if (lub) { |
/* |
* Make sure to set any leading unaligned bits. |
*/ |
bitmap->map[start / 8] |= ~((1 << (8 - lub)) - 1); |
} |
for (i = 0; i < amb / 8; i++) { |
/* |
* The middle bits can be set byte by byte. |
*/ |
bitmap->map[aligned_start / 8 + i] = ALL_ONES; |
} |
if (tab) { |
/* |
* Make sure to set any trailing aligned bits. |
*/ |
bitmap->map[aligned_start / 8 + i] |= (1 << tab) - 1; |
} |
} |
/** Clear range of bits. |
* |
* @param bitmap Bitmap structure. |
* @param start Starting bit. |
* @param bits Number of bits to clear. |
*/ |
void bitmap_clear_range(bitmap_t *bitmap, index_t start, count_t bits) |
{ |
index_t i; |
index_t aligned_start; |
count_t lub; /* leading unaligned bits */ |
count_t amb; /* aligned middle bits */ |
count_t tab; /* trailing aligned bits */ |
ASSERT(start + bits <= bitmap->bits); |
aligned_start = ALIGN_UP(start, 8); |
lub = min(aligned_start - start, bits); |
amb = bits > lub ? bits - lub : 0; |
tab = amb % 8; |
if (lub) { |
/* |
* Make sure to clear any leading unaligned bits. |
*/ |
bitmap->map[start / 8] &= (1 << (8 - lub)) - 1; |
} |
for (i = 0; i < amb / 8; i++) { |
/* |
* The middle bits can be cleared byte by byte. |
*/ |
bitmap->map[aligned_start / 8 + i] = ALL_ZEROES; |
} |
if (tab) { |
/* |
* Make sure to clear any trailing aligned bits. |
*/ |
bitmap->map[aligned_start / 8 + i] &= ~((1 << tab) - 1); |
} |
} |
/** Copy portion of one bitmap into another bitmap. |
* |
* @param dst Destination bitmap. |
* @param src Source bitmap. |
* @param bits Number of bits to copy. |
*/ |
void bitmap_copy(bitmap_t *dst, bitmap_t *src, count_t bits) |
{ |
index_t i; |
ASSERT(bits <= dst->bits); |
ASSERT(bits <= src->bits); |
for (i = 0; i < bits / 8; i++) |
dst->map[i] = src->map[i]; |
if (bits % 8) { |
bitmap_clear_range(dst, i * 8, bits % 8); |
dst->map[i] |= src->map[i] & ((1 << (bits % 8)) - 1); |
} |
} |
//kernel/trunk/generic/src/adt/btree.c |
---|
0,0 → 1,978 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file btree.c |
* @brief B+tree implementation. |
* |
* This file implements B+tree type and operations. |
* |
* The B+tree has the following properties: |
* @li it is a ballanced 3-4-5 tree (i.e. BTREE_M = 5) |
* @li values (i.e. pointers to values) are stored only in leaves |
* @li leaves are linked in a list |
* |
* Be carefull when using these trees. They need to allocate |
* and deallocate memory for their index nodes and as such |
* can sleep. |
*/ |
#include <adt/btree.h> |
#include <adt/list.h> |
#include <mm/slab.h> |
#include <debug.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <print.h> |
static void _btree_insert(btree_t *t, btree_key_t key, void *value, btree_node_t *rsubtree, btree_node_t *node); |
static void _btree_remove(btree_t *t, btree_key_t key, btree_node_t *node); |
static void node_initialize(btree_node_t *node); |
static void node_insert_key_and_lsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *lsubtree); |
static void node_insert_key_and_rsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree); |
static void node_remove_key_and_lsubtree(btree_node_t *node, btree_key_t key); |
static void node_remove_key_and_rsubtree(btree_node_t *node, btree_key_t key); |
static btree_node_t *node_split(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree, btree_key_t *median); |
static btree_node_t *node_combine(btree_node_t *node); |
static index_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, bool right); |
static void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, index_t idx); |
static void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, index_t idx); |
static bool try_insert_by_rotation_to_left(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree); |
static bool try_insert_by_rotation_to_right(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree); |
static bool try_rotation_from_left(btree_node_t *rnode); |
static bool try_rotation_from_right(btree_node_t *lnode); |
#define ROOT_NODE(n) (!(n)->parent) |
#define INDEX_NODE(n) ((n)->subtree[0] != NULL) |
#define LEAF_NODE(n) ((n)->subtree[0] == NULL) |
#define FILL_FACTOR ((BTREE_M-1)/2) |
#define MEDIAN_LOW_INDEX(n) (((n)->keys-1)/2) |
#define MEDIAN_HIGH_INDEX(n) ((n)->keys/2) |
#define MEDIAN_LOW(n) ((n)->key[MEDIAN_LOW_INDEX((n))]); |
#define MEDIAN_HIGH(n) ((n)->key[MEDIAN_HIGH_INDEX((n))]); |
static slab_cache_t *btree_node_slab; |
/** Initialize B-trees. */ |
void btree_init(void) |
{ |
btree_node_slab = slab_cache_create("btree_node_slab", sizeof(btree_node_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); |
} |
/** Create empty B-tree. |
* |
* @param t B-tree. |
*/ |
void btree_create(btree_t *t) |
{ |
list_initialize(&t->leaf_head); |
t->root = (btree_node_t *) slab_alloc(btree_node_slab, 0); |
node_initialize(t->root); |
list_append(&t->root->leaf_link, &t->leaf_head); |
} |
/** Destroy empty B-tree. */ |
void btree_destroy(btree_t *t) |
{ |
ASSERT(!t->root->keys); |
slab_free(btree_node_slab, t->root); |
} |
/** Insert key-value pair into B-tree. |
* |
* @param t B-tree. |
* @param key Key to be inserted. |
* @param value Value to be inserted. |
* @param leaf_node Leaf node where the insertion should begin. |
*/ |
void btree_insert(btree_t *t, btree_key_t key, void *value, btree_node_t *leaf_node) |
{ |
btree_node_t *lnode; |
ASSERT(value); |
lnode = leaf_node; |
if (!lnode) { |
if (btree_search(t, key, &lnode)) { |
panic("B-tree %p already contains key %d\n", t, key); |
} |
} |
_btree_insert(t, key, value, NULL, lnode); |
} |
/** Recursively insert into B-tree. |
* |
* @param t B-tree. |
* @param key Key to be inserted. |
* @param value Value to be inserted. |
* @param rsubtree Right subtree of the inserted key. |
* @param node Start inserting into this node. |
*/ |
void _btree_insert(btree_t *t, btree_key_t key, void *value, btree_node_t *rsubtree, btree_node_t *node) |
{ |
if (node->keys < BTREE_MAX_KEYS) { |
/* |
* Node conatins enough space, the key can be stored immediately. |
*/ |
node_insert_key_and_rsubtree(node, key, value, rsubtree); |
} else if (try_insert_by_rotation_to_left(node, key, value, rsubtree)) { |
/* |
* The key-value-rsubtree triplet has been inserted because |
* some keys could have been moved to the left sibling. |
*/ |
} else if (try_insert_by_rotation_to_right(node, key, value, rsubtree)) { |
/* |
* The key-value-rsubtree triplet has been inserted because |
* some keys could have been moved to the right sibling. |
*/ |
} else { |
btree_node_t *rnode; |
btree_key_t median; |
/* |
* Node is full and both siblings (if both exist) are full too. |
* Split the node and insert the smallest key from the node containing |
* bigger keys (i.e. the new node) into its parent. |
*/ |
rnode = node_split(node, key, value, rsubtree, &median); |
if (LEAF_NODE(node)) { |
list_prepend(&rnode->leaf_link, &node->leaf_link); |
} |
if (ROOT_NODE(node)) { |
/* |
* We split the root node. Create new root. |
*/ |
t->root = (btree_node_t *) slab_alloc(btree_node_slab, 0); |
node->parent = t->root; |
rnode->parent = t->root; |
node_initialize(t->root); |
/* |
* Left-hand side subtree will be the old root (i.e. node). |
* Right-hand side subtree will be rnode. |
*/ |
t->root->subtree[0] = node; |
t->root->depth = node->depth + 1; |
} |
_btree_insert(t, median, NULL, rnode, node->parent); |
} |
} |
/** Remove B-tree node. |
* |
* @param B-tree. |
* @param key Key to be removed from the B-tree along with its associated value. |
* @param leaf_node If not NULL, pointer to the leaf node where the key is found. |
*/ |
void btree_remove(btree_t *t, btree_key_t key, btree_node_t *leaf_node) |
{ |
btree_node_t *lnode; |
lnode = leaf_node; |
if (!lnode) { |
if (!btree_search(t, key, &lnode)) { |
panic("B-tree %p does not contain key %d\n", t, key); |
} |
} |
_btree_remove(t, key, lnode); |
} |
/** Recursively remove B-tree node. |
* |
* @param B-tree. |
* @param key Key to be removed from the B-tree along with its associated value. |
* @param node Node where the key being removed resides. |
*/ |
void _btree_remove(btree_t *t, btree_key_t key, btree_node_t *node) |
{ |
if (ROOT_NODE(node)) { |
if (node->keys == 1 && node->subtree[0]) { |
/* |
* Free the current root and set new root. |
*/ |
t->root = node->subtree[0]; |
t->root->parent = NULL; |
slab_free(btree_node_slab, node); |
} else { |
/* |
* Remove the key from the root node. |
* Note that the right subtree is removed because when |
* combining two nodes, the left-side sibling is preserved |
* and the right-side sibling is freed. |
*/ |
node_remove_key_and_rsubtree(node, key); |
} |
return; |
} |
if (node->keys <= FILL_FACTOR) { |
/* |
* If the node is below the fill factor, |
* try to borrow keys from left or right sibling. |
*/ |
if (!try_rotation_from_left(node)) |
try_rotation_from_right(node); |
} |
if (node->keys > FILL_FACTOR) { |
int i; |
/* |
* The key can be immediatelly removed. |
* |
* Note that the right subtree is removed because when |
* combining two nodes, the left-side sibling is preserved |
* and the right-side sibling is freed. |
*/ |
node_remove_key_and_rsubtree(node, key); |
for (i = 0; i < node->parent->keys; i++) { |
if (node->parent->key[i] == key) |
node->parent->key[i] = node->key[0]; |
} |
} else { |
index_t idx; |
btree_node_t *rnode, *parent; |
/* |
* The node is below the fill factor as well as its left and right sibling. |
* Resort to combining the node with one of its siblings. |
* The node which is on the left is preserved and the node on the right is |
* freed. |
*/ |
parent = node->parent; |
node_remove_key_and_rsubtree(node, key); |
rnode = node_combine(node); |
if (LEAF_NODE(rnode)) |
list_remove(&rnode->leaf_link); |
idx = find_key_by_subtree(parent, rnode, true); |
ASSERT((int) idx != -1); |
slab_free(btree_node_slab, rnode); |
_btree_remove(t, parent->key[idx], parent); |
} |
} |
/** Search key in a B-tree. |
* |
* @param t B-tree. |
* @param key Key to be searched. |
* @param leaf_node Address where to put pointer to visited leaf node. |
* |
* @return Pointer to value or NULL if there is no such key. |
*/ |
void *btree_search(btree_t *t, btree_key_t key, btree_node_t **leaf_node) |
{ |
btree_node_t *cur, *next; |
/* |
* Iteratively descend to the leaf that can contain the searched key. |
*/ |
for (cur = t->root; cur; cur = next) { |
/* Last iteration will set this with proper leaf node address. */ |
*leaf_node = cur; |
/* |
* The key can be in the leftmost subtree. |
* Test it separately. |
*/ |
if (key < cur->key[0]) { |
next = cur->subtree[0]; |
continue; |
} else { |
void *val; |
int i; |
/* |
* Now if the key is smaller than cur->key[i] |
* it can only mean that the value is in cur->subtree[i] |
* or it is not in the tree at all. |
*/ |
for (i = 1; i < cur->keys; i++) { |
if (key < cur->key[i]) { |
next = cur->subtree[i]; |
val = cur->value[i - 1]; |
if (LEAF_NODE(cur)) |
return key == cur->key[i - 1] ? val : NULL; |
goto descend; |
} |
} |
/* |
* Last possibility is that the key is in the rightmost subtree. |
*/ |
next = cur->subtree[i]; |
val = cur->value[i - 1]; |
if (LEAF_NODE(cur)) |
return key == cur->key[i - 1] ? val : NULL; |
} |
descend: |
; |
} |
/* |
* The key was not found in the *leaf_node and is smaller than any of its keys. |
*/ |
return NULL; |
} |
/** Return pointer to B-tree leaf node's left neighbour. |
* |
* @param t B-tree. |
* @param node Node whose left neighbour will be returned. |
* |
* @return Left neighbour of the node or NULL if the node does not have the left neighbour. |
*/ |
btree_node_t *btree_leaf_node_left_neighbour(btree_t *t, btree_node_t *node) |
{ |
ASSERT(LEAF_NODE(node)); |
if (node->leaf_link.prev != &t->leaf_head) |
return list_get_instance(node->leaf_link.prev, btree_node_t, leaf_link); |
else |
return NULL; |
} |
/** Return pointer to B-tree leaf node's right neighbour. |
* |
* @param t B-tree. |
* @param node Node whose right neighbour will be returned. |
* |
* @return Right neighbour of the node or NULL if the node does not have the right neighbour. |
*/ |
btree_node_t *btree_leaf_node_right_neighbour(btree_t *t, btree_node_t *node) |
{ |
ASSERT(LEAF_NODE(node)); |
if (node->leaf_link.next != &t->leaf_head) |
return list_get_instance(node->leaf_link.next, btree_node_t, leaf_link); |
else |
return NULL; |
} |
/** Initialize B-tree node. |
* |
* @param node B-tree node. |
*/ |
void node_initialize(btree_node_t *node) |
{ |
int i; |
node->keys = 0; |
/* Clean also space for the extra key. */ |
for (i = 0; i < BTREE_MAX_KEYS + 1; i++) { |
node->key[i] = 0; |
node->value[i] = NULL; |
node->subtree[i] = NULL; |
} |
node->subtree[i] = NULL; |
node->parent = NULL; |
link_initialize(&node->leaf_link); |
link_initialize(&node->bfs_link); |
node->depth = 0; |
} |
/** Insert key-value-lsubtree triplet into B-tree node. |
* |
* It is actually possible to have more keys than BTREE_MAX_KEYS. |
* This feature is used during insert by right rotation. |
* |
* @param node B-tree node into wich the new key is to be inserted. |
* @param key The key to be inserted. |
* @param value Pointer to value to be inserted. |
* @param lsubtree Pointer to the left subtree. |
*/ |
void node_insert_key_and_lsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *lsubtree) |
{ |
int i; |
for (i = 0; i < node->keys; i++) { |
if (key < node->key[i]) { |
int j; |
for (j = node->keys; j > i; j--) { |
node->key[j] = node->key[j - 1]; |
node->value[j] = node->value[j - 1]; |
node->subtree[j + 1] = node->subtree[j]; |
} |
node->subtree[j + 1] = node->subtree[j]; |
break; |
} |
} |
node->key[i] = key; |
node->value[i] = value; |
node->subtree[i] = lsubtree; |
node->keys++; |
} |
/** Insert key-value-rsubtree triplet into B-tree node. |
* |
* It is actually possible to have more keys than BTREE_MAX_KEYS. |
* This feature is used during splitting the node when the |
* number of keys is BTREE_MAX_KEYS + 1. Insert by left rotation |
* also makes use of this feature. |
* |
* @param node B-tree node into wich the new key is to be inserted. |
* @param key The key to be inserted. |
* @param value Pointer to value to be inserted. |
* @param rsubtree Pointer to the right subtree. |
*/ |
void node_insert_key_and_rsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree) |
{ |
int i; |
for (i = 0; i < node->keys; i++) { |
if (key < node->key[i]) { |
int j; |
for (j = node->keys; j > i; j--) { |
node->key[j] = node->key[j - 1]; |
node->value[j] = node->value[j - 1]; |
node->subtree[j + 1] = node->subtree[j]; |
} |
break; |
} |
} |
node->key[i] = key; |
node->value[i] = value; |
node->subtree[i + 1] = rsubtree; |
node->keys++; |
} |
/** Remove key and its left subtree pointer from B-tree node. |
* |
* Remove the key and eliminate gaps in node->key array. |
* Note that the value pointer and the left subtree pointer |
* is removed from the node as well. |
* |
* @param node B-tree node. |
* @param key Key to be removed. |
*/ |
void node_remove_key_and_lsubtree(btree_node_t *node, btree_key_t key) |
{ |
int i, j; |
for (i = 0; i < node->keys; i++) { |
if (key == node->key[i]) { |
for (j = i + 1; j < node->keys; j++) { |
node->key[j - 1] = node->key[j]; |
node->value[j - 1] = node->value[j]; |
node->subtree[j - 1] = node->subtree[j]; |
} |
node->subtree[j - 1] = node->subtree[j]; |
node->keys--; |
return; |
} |
} |
panic("node %p does not contain key %d\n", node, key); |
} |
/** Remove key and its right subtree pointer from B-tree node. |
* |
* Remove the key and eliminate gaps in node->key array. |
* Note that the value pointer and the right subtree pointer |
* is removed from the node as well. |
* |
* @param node B-tree node. |
* @param key Key to be removed. |
*/ |
void node_remove_key_and_rsubtree(btree_node_t *node, btree_key_t key) |
{ |
int i, j; |
for (i = 0; i < node->keys; i++) { |
if (key == node->key[i]) { |
for (j = i + 1; j < node->keys; j++) { |
node->key[j - 1] = node->key[j]; |
node->value[j - 1] = node->value[j]; |
node->subtree[j] = node->subtree[j + 1]; |
} |
node->keys--; |
return; |
} |
} |
panic("node %p does not contain key %d\n", node, key); |
} |
/** Split full B-tree node and insert new key-value-right-subtree triplet. |
* |
* This function will split a node and return pointer to a newly created |
* node containing keys greater than or equal to the greater of medians |
* (or median) of the old keys and the newly added key. It will also write |
* the median key to a memory address supplied by the caller. |
* |
* If the node being split is an index node, the median will not be |
* included in the new node. If the node is a leaf node, |
* the median will be copied there. |
* |
* @param node B-tree node wich is going to be split. |
* @param key The key to be inserted. |
* @param value Pointer to the value to be inserted. |
* @param rsubtree Pointer to the right subtree of the key being added. |
* @param median Address in memory, where the median key will be stored. |
* |
* @return Newly created right sibling of node. |
*/ |
btree_node_t *node_split(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree, btree_key_t *median) |
{ |
btree_node_t *rnode; |
int i, j; |
ASSERT(median); |
ASSERT(node->keys == BTREE_MAX_KEYS); |
/* |
* Use the extra space to store the extra node. |
*/ |
node_insert_key_and_rsubtree(node, key, value, rsubtree); |
/* |
* Compute median of keys. |
*/ |
*median = MEDIAN_HIGH(node); |
/* |
* Allocate and initialize new right sibling. |
*/ |
rnode = (btree_node_t *) slab_alloc(btree_node_slab, 0); |
node_initialize(rnode); |
rnode->parent = node->parent; |
rnode->depth = node->depth; |
/* |
* Copy big keys, values and subtree pointers to the new right sibling. |
* If this is an index node, do not copy the median. |
*/ |
i = (int) INDEX_NODE(node); |
for (i += MEDIAN_HIGH_INDEX(node), j = 0; i < node->keys; i++, j++) { |
rnode->key[j] = node->key[i]; |
rnode->value[j] = node->value[i]; |
rnode->subtree[j] = node->subtree[i]; |
/* |
* Fix parent links in subtrees. |
*/ |
if (rnode->subtree[j]) |
rnode->subtree[j]->parent = rnode; |
} |
rnode->subtree[j] = node->subtree[i]; |
if (rnode->subtree[j]) |
rnode->subtree[j]->parent = rnode; |
rnode->keys = j; /* Set number of keys of the new node. */ |
node->keys /= 2; /* Shrink the old node. */ |
return rnode; |
} |
/** Combine node with any of its siblings. |
* |
* The siblings are required to be below the fill factor. |
* |
* @param node Node to combine with one of its siblings. |
* |
* @return Pointer to the rightmost of the two nodes. |
*/ |
btree_node_t *node_combine(btree_node_t *node) |
{ |
index_t idx; |
btree_node_t *rnode; |
int i; |
ASSERT(!ROOT_NODE(node)); |
idx = find_key_by_subtree(node->parent, node, false); |
if (idx == node->parent->keys) { |
/* |
* Rightmost subtree of its parent, combine with the left sibling. |
*/ |
idx--; |
rnode = node; |
node = node->parent->subtree[idx]; |
} else { |
rnode = node->parent->subtree[idx + 1]; |
} |
/* Index nodes need to insert parent node key in between left and right node. */ |
if (INDEX_NODE(node)) |
node->key[node->keys++] = node->parent->key[idx]; |
/* Copy the key-value-subtree triplets from the right node. */ |
for (i = 0; i < rnode->keys; i++) { |
node->key[node->keys + i] = rnode->key[i]; |
node->value[node->keys + i] = rnode->value[i]; |
if (INDEX_NODE(node)) { |
node->subtree[node->keys + i] = rnode->subtree[i]; |
rnode->subtree[i]->parent = node; |
} |
} |
if (INDEX_NODE(node)) { |
node->subtree[node->keys + i] = rnode->subtree[i]; |
rnode->subtree[i]->parent = node; |
} |
node->keys += rnode->keys; |
return rnode; |
} |
/** Find key by its left or right subtree. |
* |
* @param node B-tree node. |
* @param subtree Left or right subtree of a key found in node. |
* @param right If true, subtree is a right subtree. If false, subtree is a left subtree. |
* |
* @return Index of the key associated with the subtree. |
*/ |
index_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, bool right) |
{ |
int i; |
for (i = 0; i < node->keys + 1; i++) { |
if (subtree == node->subtree[i]) |
return i - (int) (right != false); |
} |
panic("node %p does not contain subtree %p\n", node, subtree); |
} |
/** Rotate one key-value-rsubtree triplet from the left sibling to the right sibling. |
* |
* The biggest key and its value and right subtree is rotated from the left node |
* to the right. If the node is an index node, than the parent node key belonging to |
* the left node takes part in the rotation. |
* |
* @param lnode Left sibling. |
* @param rnode Right sibling. |
* @param idx Index of the parent node key that is taking part in the rotation. |
*/ |
void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, index_t idx) |
{ |
btree_key_t key; |
key = lnode->key[lnode->keys - 1]; |
if (LEAF_NODE(lnode)) { |
void *value; |
value = lnode->value[lnode->keys - 1]; |
node_remove_key_and_rsubtree(lnode, key); |
node_insert_key_and_lsubtree(rnode, key, value, NULL); |
lnode->parent->key[idx] = key; |
} else { |
btree_node_t *rsubtree; |
rsubtree = lnode->subtree[lnode->keys]; |
node_remove_key_and_rsubtree(lnode, key); |
node_insert_key_and_lsubtree(rnode, lnode->parent->key[idx], NULL, rsubtree); |
lnode->parent->key[idx] = key; |
/* Fix parent link of the reconnected right subtree. */ |
rsubtree->parent = rnode; |
} |
} |
/** Rotate one key-value-lsubtree triplet from the right sibling to the left sibling. |
* |
* The smallest key and its value and left subtree is rotated from the right node |
* to the left. If the node is an index node, than the parent node key belonging to |
* the right node takes part in the rotation. |
* |
* @param lnode Left sibling. |
* @param rnode Right sibling. |
* @param idx Index of the parent node key that is taking part in the rotation. |
*/ |
void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, index_t idx) |
{ |
btree_key_t key; |
key = rnode->key[0]; |
if (LEAF_NODE(rnode)) { |
void *value; |
value = rnode->value[0]; |
node_remove_key_and_lsubtree(rnode, key); |
node_insert_key_and_rsubtree(lnode, key, value, NULL); |
rnode->parent->key[idx] = rnode->key[0]; |
} else { |
btree_node_t *lsubtree; |
lsubtree = rnode->subtree[0]; |
node_remove_key_and_lsubtree(rnode, key); |
node_insert_key_and_rsubtree(lnode, rnode->parent->key[idx], NULL, lsubtree); |
rnode->parent->key[idx] = key; |
/* Fix parent link of the reconnected left subtree. */ |
lsubtree->parent = lnode; |
} |
} |
/** Insert key-value-rsubtree triplet and rotate the node to the left, if this operation can be done. |
* |
* Left sibling of the node (if it exists) is checked for free space. |
* If there is free space, the key is inserted and the smallest key of |
* the node is moved there. The index node which is the parent of both |
* nodes is fixed. |
* |
* @param node B-tree node. |
* @param inskey Key to be inserted. |
* @param insvalue Value to be inserted. |
* @param rsubtree Right subtree of inskey. |
* |
* @return True if the rotation was performed, false otherwise. |
*/ |
bool try_insert_by_rotation_to_left(btree_node_t *node, btree_key_t inskey, void *insvalue, btree_node_t *rsubtree) |
{ |
index_t idx; |
btree_node_t *lnode; |
/* |
* If this is root node, the rotation can not be done. |
*/ |
if (ROOT_NODE(node)) |
return false; |
idx = find_key_by_subtree(node->parent, node, true); |
if ((int) idx == -1) { |
/* |
* If this node is the leftmost subtree of its parent, |
* the rotation can not be done. |
*/ |
return false; |
} |
lnode = node->parent->subtree[idx]; |
if (lnode->keys < BTREE_MAX_KEYS) { |
/* |
* The rotaion can be done. The left sibling has free space. |
*/ |
node_insert_key_and_rsubtree(node, inskey, insvalue, rsubtree); |
rotate_from_right(lnode, node, idx); |
return true; |
} |
return false; |
} |
/** Insert key-value-rsubtree triplet and rotate the node to the right, if this operation can be done. |
* |
* Right sibling of the node (if it exists) is checked for free space. |
* If there is free space, the key is inserted and the biggest key of |
* the node is moved there. The index node which is the parent of both |
* nodes is fixed. |
* |
* @param node B-tree node. |
* @param inskey Key to be inserted. |
* @param insvalue Value to be inserted. |
* @param rsubtree Right subtree of inskey. |
* |
* @return True if the rotation was performed, false otherwise. |
*/ |
bool try_insert_by_rotation_to_right(btree_node_t *node, btree_key_t inskey, void *insvalue, btree_node_t *rsubtree) |
{ |
index_t idx; |
btree_node_t *rnode; |
/* |
* If this is root node, the rotation can not be done. |
*/ |
if (ROOT_NODE(node)) |
return false; |
idx = find_key_by_subtree(node->parent, node, false); |
if (idx == node->parent->keys) { |
/* |
* If this node is the rightmost subtree of its parent, |
* the rotation can not be done. |
*/ |
return false; |
} |
rnode = node->parent->subtree[idx + 1]; |
if (rnode->keys < BTREE_MAX_KEYS) { |
/* |
* The rotaion can be done. The right sibling has free space. |
*/ |
node_insert_key_and_rsubtree(node, inskey, insvalue, rsubtree); |
rotate_from_left(node, rnode, idx); |
return true; |
} |
return false; |
} |
/** Rotate in a key from the left sibling or from the index node, if this operation can be done. |
* |
* @param rnode Node into which to add key from its left sibling or from the index node. |
* |
* @return True if the rotation was performed, false otherwise. |
*/ |
bool try_rotation_from_left(btree_node_t *rnode) |
{ |
index_t idx; |
btree_node_t *lnode; |
/* |
* If this is root node, the rotation can not be done. |
*/ |
if (ROOT_NODE(rnode)) |
return false; |
idx = find_key_by_subtree(rnode->parent, rnode, true); |
if ((int) idx == -1) { |
/* |
* If this node is the leftmost subtree of its parent, |
* the rotation can not be done. |
*/ |
return false; |
} |
lnode = rnode->parent->subtree[idx]; |
if (lnode->keys > FILL_FACTOR) { |
rotate_from_left(lnode, rnode, idx); |
return true; |
} |
return false; |
} |
/** Rotate in a key from the right sibling or from the index node, if this operation can be done. |
* |
* @param rnode Node into which to add key from its right sibling or from the index node. |
* |
* @return True if the rotation was performed, false otherwise. |
*/ |
bool try_rotation_from_right(btree_node_t *lnode) |
{ |
index_t idx; |
btree_node_t *rnode; |
/* |
* If this is root node, the rotation can not be done. |
*/ |
if (ROOT_NODE(lnode)) |
return false; |
idx = find_key_by_subtree(lnode->parent, lnode, false); |
if (idx == lnode->parent->keys) { |
/* |
* If this node is the rightmost subtree of its parent, |
* the rotation can not be done. |
*/ |
return false; |
} |
rnode = lnode->parent->subtree[idx + 1]; |
if (rnode->keys > FILL_FACTOR) { |
rotate_from_right(lnode, rnode, idx); |
return true; |
} |
return false; |
} |
/** Print B-tree. |
* |
* @param t Print out B-tree. |
*/ |
void btree_print(btree_t *t) |
{ |
int i, depth = t->root->depth; |
link_t head, *cur; |
printf("Printing B-tree:\n"); |
list_initialize(&head); |
list_append(&t->root->bfs_link, &head); |
/* |
* Use BFS search to print out the tree. |
* Levels are distinguished from one another by node->depth. |
*/ |
while (!list_empty(&head)) { |
link_t *hlp; |
btree_node_t *node; |
hlp = head.next; |
ASSERT(hlp != &head); |
node = list_get_instance(hlp, btree_node_t, bfs_link); |
list_remove(hlp); |
ASSERT(node); |
if (node->depth != depth) { |
printf("\n"); |
depth = node->depth; |
} |
printf("("); |
for (i = 0; i < node->keys; i++) { |
printf("%lld%s", node->key[i], i < node->keys - 1 ? "," : ""); |
if (node->depth && node->subtree[i]) { |
list_append(&node->subtree[i]->bfs_link, &head); |
} |
} |
if (node->depth && node->subtree[i]) { |
list_append(&node->subtree[i]->bfs_link, &head); |
} |
printf(")"); |
} |
printf("\n"); |
printf("Printing list of leaves:\n"); |
for (cur = t->leaf_head.next; cur != &t->leaf_head; cur = cur->next) { |
btree_node_t *node; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
ASSERT(node); |
printf("("); |
for (i = 0; i < node->keys; i++) |
printf("%lld%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf(")"); |
} |
printf("\n"); |
} |
//kernel/trunk/generic/src/adt/hash_table.c |
---|
0,0 → 1,174 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file hash_table.c |
* @brief Implementation of generic chained hash table. |
* |
* This file contains implementation of generic chained hash table. |
*/ |
#include <adt/hash_table.h> |
#include <adt/list.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <debug.h> |
#include <mm/slab.h> |
#include <memstr.h> |
/** Create chained hash table. |
* |
* @param h Hash table structure. Will be initialized by this call. |
* @param m Number of slots in the hash table. |
* @param max_keys Maximal number of keys needed to identify an item. |
* @param op Hash table operations structure. |
*/ |
void hash_table_create(hash_table_t *h, count_t m, count_t max_keys, hash_table_operations_t *op) |
{ |
int i; |
ASSERT(h); |
ASSERT(op && op->hash && op->compare); |
ASSERT(max_keys > 0); |
h->entry = malloc(m * sizeof(link_t), 0); |
if (!h->entry) { |
panic("cannot allocate memory for hash table\n"); |
} |
memsetb((__address) h->entry, m * sizeof(link_t), 0); |
for (i = 0; i < m; i++) |
list_initialize(&h->entry[i]); |
h->entries = m; |
h->max_keys = max_keys; |
h->op = op; |
} |
/** Insert item into hash table. |
* |
* @param h Hash table. |
* @param hey Array of all keys necessary to compute hash index. |
* @param item Item to be inserted into the hash table. |
*/ |
void hash_table_insert(hash_table_t *h, __native key[], link_t *item) |
{ |
index_t chain; |
ASSERT(item); |
ASSERT(h && h->op && h->op->hash && h->op->compare); |
chain = h->op->hash(key); |
ASSERT(chain < h->entries); |
list_append(item, &h->entry[chain]); |
} |
/** Search hash table for an item matching keys. |
* |
* @param h Hash table. |
* @param key Array of all keys needed to compute hash index. |
* |
* @return Matching item on success, NULL if there is no such item. |
*/ |
link_t *hash_table_find(hash_table_t *h, __native key[]) |
{ |
link_t *cur; |
index_t chain; |
ASSERT(h && h->op && h->op->hash && h->op->compare); |
chain = h->op->hash(key); |
ASSERT(chain < h->entries); |
/* |
* The hash table is not redundant. |
* Check if the keys are not in place already. |
*/ |
for (cur = h->entry[chain].next; cur != &h->entry[chain]; cur = cur->next) { |
if (h->op->compare(key, h->max_keys, cur)) { |
/* |
* The entry is there. |
*/ |
return cur; |
} |
} |
return NULL; |
} |
/** Remove all matching items from hash table. |
* |
* For each removed item, h->remove_callback() is called. |
* |
* @param h Hash table. |
* @param key Array of keys that will be compared against items of the hash table. |
* @param keys Number of keys in the key array. |
*/ |
void hash_table_remove(hash_table_t *h, __native key[], count_t keys) |
{ |
index_t chain; |
link_t *cur; |
ASSERT(h && h->op && h->op->hash && h->op->compare && h->op->remove_callback); |
ASSERT(keys <= h->max_keys); |
if (keys == h->max_keys) { |
/* |
* All keys are known, hash_table_find() can be used to find the entry. |
*/ |
cur = hash_table_find(h, key); |
if (cur) { |
list_remove(cur); |
h->op->remove_callback(cur); |
} |
return; |
} |
/* |
* Fewer keys were passed. |
* Any partially matching entries are to be removed. |
*/ |
for (chain = 0; chain < h->entries; chain++) { |
for (cur = h->entry[chain].next; cur != &h->entry[chain]; cur = cur->next) { |
if (h->op->compare(key, keys, cur)) { |
link_t *hlp; |
hlp = cur; |
cur = cur->prev; |
list_remove(hlp); |
h->op->remove_callback(hlp); |
continue; |
} |
} |
} |
} |
//kernel/trunk/generic/src/adt/list.c |
---|
0,0 → 1,87 |
/* |
* Copyright (C) 2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file list.c |
* @brief Functions completing doubly linked circular list implementaion. |
* |
* This file contains some of the functions implementing doubly linked circular lists. |
* However, this ADT is mostly implemented in @ref list.h. |
*/ |
#include <adt/list.h> |
/** Check for membership |
* |
* Check whether link is contained in the list head. |
* The membership is defined as pointer equivalence. |
* |
* @param link Item to look for. |
* @param head List to look in. |
* |
* @return true if link is contained in head, false otherwise. |
* |
*/ |
bool list_member(const link_t *link, const link_t *head) |
{ |
bool found = false; |
link_t *hlp = head->next; |
while (hlp != head) { |
if (hlp == link) { |
found = true; |
break; |
} |
hlp = hlp->next; |
} |
return found; |
} |
/** Concatenate two lists |
* |
* Concatenate lists head1 and head2, producing a single |
* list head1 containing items from both (in head1, head2 |
* order) and empty list head2. |
* |
* @param head1 First list and concatenated output |
* @param head2 Second list and empty output. |
* |
*/ |
void list_concat(link_t *head1, link_t *head2) |
{ |
if (list_empty(head2)) |
return; |
head2->next->prev = head1->prev; |
head2->prev->next = head1; |
head1->prev->next = head2->next; |
head1->prev = head2->prev; |
list_initialize(head2); |
} |
//kernel/trunk/generic/src/ipc/ipc.c |
---|
0,0 → 1,530 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/* Lock ordering |
* |
* First the answerbox, then the phone |
*/ |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <ipc/ipc.h> |
#include <errno.h> |
#include <mm/slab.h> |
#include <arch.h> |
#include <proc/task.h> |
#include <memstr.h> |
#include <debug.h> |
#include <print.h> |
#include <proc/thread.h> |
#include <arch/interrupt.h> |
/* Open channel that is assigned automatically to new tasks */ |
answerbox_t *ipc_phone_0 = NULL; |
static slab_cache_t *ipc_call_slab; |
typedef struct { |
SPINLOCK_DECLARE(lock); |
answerbox_t *box; |
} ipc_irq_t; |
static ipc_irq_t *irq_conns = NULL; |
static int irq_conns_size; |
/* Initialize new call */ |
static void _ipc_call_init(call_t *call) |
{ |
memsetb((__address)call, sizeof(*call), 0); |
call->callerbox = &TASK->answerbox; |
call->sender = TASK; |
} |
/** Allocate & initialize call structure |
* |
* The call is initialized, so that the reply will be directed |
* to TASK->answerbox |
* |
* @param flags Parameters for slab_alloc (ATOMIC, etc.) |
*/ |
call_t * ipc_call_alloc(int flags) |
{ |
call_t *call; |
call = slab_alloc(ipc_call_slab, flags); |
_ipc_call_init(call); |
return call; |
} |
/** Initialize allocated call */ |
void ipc_call_static_init(call_t *call) |
{ |
_ipc_call_init(call); |
call->flags |= IPC_CALL_STATIC_ALLOC; |
} |
/** Deallocate call stracuture */ |
void ipc_call_free(call_t *call) |
{ |
slab_free(ipc_call_slab, call); |
} |
/** Initialize answerbox structure |
*/ |
void ipc_answerbox_init(answerbox_t *box) |
{ |
spinlock_initialize(&box->lock, "ipc_box_lock"); |
spinlock_initialize(&box->irq_lock, "ipc_box_irqlock"); |
waitq_initialize(&box->wq); |
list_initialize(&box->connected_phones); |
list_initialize(&box->calls); |
list_initialize(&box->dispatched_calls); |
list_initialize(&box->answers); |
list_initialize(&box->irq_notifs); |
box->task = TASK; |
} |
/** Connect phone to answerbox */ |
void ipc_phone_connect(phone_t *phone, answerbox_t *box) |
{ |
spinlock_lock(&phone->lock); |
ASSERT(!phone->callee); |
phone->busy = IPC_BUSY_CONNECTED; |
phone->callee = box; |
spinlock_lock(&box->lock); |
list_append(&phone->list, &box->connected_phones); |
spinlock_unlock(&box->lock); |
spinlock_unlock(&phone->lock); |
} |
/** Initialize phone structure and connect phone to answerbox |
*/ |
void ipc_phone_init(phone_t *phone) |
{ |
spinlock_initialize(&phone->lock, "phone_lock"); |
phone->callee = NULL; |
phone->busy = IPC_BUSY_FREE; |
atomic_set(&phone->active_calls, 0); |
} |
/** Helper function to facilitate synchronous calls */ |
void ipc_call_sync(phone_t *phone, call_t *request) |
{ |
answerbox_t sync_box; |
ipc_answerbox_init(&sync_box); |
/* We will receive data on special box */ |
request->callerbox = &sync_box; |
ipc_call(phone, request); |
ipc_wait_for_call(&sync_box, 0); |
} |
/** Answer message that was not dispatched and is not entered in |
* any queue |
*/ |
static void _ipc_answer_free_call(call_t *call) |
{ |
answerbox_t *callerbox = call->callerbox; |
call->flags |= IPC_CALL_ANSWERED; |
spinlock_lock(&callerbox->lock); |
list_append(&call->list, &callerbox->answers); |
spinlock_unlock(&callerbox->lock); |
waitq_wakeup(&callerbox->wq, 0); |
} |
/** Answer message, that is in callee queue |
* |
* @param box Answerbox that is answering the message |
* @param call Modified request that is being sent back |
*/ |
void ipc_answer(answerbox_t *box, call_t *call) |
{ |
/* Remove from active box */ |
spinlock_lock(&box->lock); |
list_remove(&call->list); |
spinlock_unlock(&box->lock); |
/* Send back answer */ |
_ipc_answer_free_call(call); |
} |
/** Simulate sending back a message |
* |
* Most errors are better handled by forming a normal backward |
* message and sending it as a normal answer. |
*/ |
void ipc_backsend_err(phone_t *phone, call_t *call, __native err) |
{ |
call->data.phone = phone; |
atomic_inc(&phone->active_calls); |
if (phone->busy == IPC_BUSY_CONNECTED) |
IPC_SET_RETVAL(call->data, EHANGUP); |
else |
IPC_SET_RETVAL(call->data, ENOENT); |
_ipc_answer_free_call(call); |
} |
/* Unsafe unchecking ipc_call */ |
static void _ipc_call(phone_t *phone, answerbox_t *box, call_t *call) |
{ |
if (! (call->flags & IPC_CALL_FORWARDED)) { |
atomic_inc(&phone->active_calls); |
call->data.phone = phone; |
} |
spinlock_lock(&box->lock); |
list_append(&call->list, &box->calls); |
spinlock_unlock(&box->lock); |
waitq_wakeup(&box->wq, 0); |
} |
/** Send a asynchronous request using phone to answerbox |
* |
* @param phone Phone connected to answerbox |
* @param request Request to be sent |
*/ |
int ipc_call(phone_t *phone, call_t *call) |
{ |
answerbox_t *box; |
spinlock_lock(&phone->lock); |
box = phone->callee; |
if (!box) { |
/* Trying to send over disconnected phone */ |
spinlock_unlock(&phone->lock); |
if (call->flags & IPC_CALL_FORWARDED) { |
IPC_SET_RETVAL(call->data, EFORWARD); |
_ipc_answer_free_call(call); |
} else { /* Simulate sending back a message */ |
if (phone->busy == IPC_BUSY_CONNECTED) |
ipc_backsend_err(phone, call, EHANGUP); |
else |
ipc_backsend_err(phone, call, ENOENT); |
} |
return ENOENT; |
} |
_ipc_call(phone, box, call); |
spinlock_unlock(&phone->lock); |
return 0; |
} |
/** Disconnect phone from answerbox |
* |
* It is allowed to call disconnect on already disconnected phone |
* |
* @return 0 - phone disconnected, -1 - the phone was already disconnected |
*/ |
int ipc_phone_hangup(phone_t *phone) |
{ |
answerbox_t *box; |
call_t *call; |
spinlock_lock(&phone->lock); |
box = phone->callee; |
if (!box) { |
if (phone->busy == IPC_BUSY_CONNECTING) { |
spinlock_unlock(&phone->lock); |
return -1; |
} |
/* Already disconnected phone */ |
phone->busy = IPC_BUSY_FREE; |
spinlock_unlock(&phone->lock); |
return 0; |
} |
spinlock_lock(&box->lock); |
list_remove(&phone->list); |
phone->callee = NULL; |
spinlock_unlock(&box->lock); |
call = ipc_call_alloc(0); |
IPC_SET_METHOD(call->data, IPC_M_PHONE_HUNGUP); |
call->flags |= IPC_CALL_DISCARD_ANSWER; |
_ipc_call(phone, box, call); |
phone->busy = IPC_BUSY_FREE; |
spinlock_unlock(&phone->lock); |
return 0; |
} |
/** Forwards call from one answerbox to a new one |
* |
* @param request Request to be forwarded |
* @param newbox Target answerbox |
* @param oldbox Old answerbox |
* @return 0 on forward ok, error code, if there was error |
* |
* - the return value serves only as an information for the forwarder, |
* the original caller is notified automatically with EFORWARD |
*/ |
int ipc_forward(call_t *call, phone_t *newphone, answerbox_t *oldbox) |
{ |
spinlock_lock(&oldbox->lock); |
list_remove(&call->list); |
spinlock_unlock(&oldbox->lock); |
return ipc_call(newphone, call); |
} |
/** Wait for phone call |
* |
* @return Recived message address |
* - to distinguish between call and answer, look at call->flags |
*/ |
call_t * ipc_wait_for_call(answerbox_t *box, int flags) |
{ |
call_t *request; |
ipl_t ipl; |
restart: |
if (flags & IPC_WAIT_NONBLOCKING) { |
if (waitq_sleep_timeout(&box->wq,0,1) == ESYNCH_WOULD_BLOCK) |
return NULL; |
} else |
waitq_sleep(&box->wq); |
spinlock_lock(&box->lock); |
if (!list_empty(&box->irq_notifs)) { |
ipl = interrupts_disable(); |
spinlock_lock(&box->irq_lock); |
request = list_get_instance(box->answers.next, call_t, list); |
list_remove(&request->list); |
spinlock_unlock(&box->irq_lock); |
interrupts_restore(ipl); |
} else if (!list_empty(&box->answers)) { |
/* Handle asynchronous answers */ |
request = list_get_instance(box->answers.next, call_t, list); |
list_remove(&request->list); |
atomic_dec(&request->data.phone->active_calls); |
} else if (!list_empty(&box->calls)) { |
/* Handle requests */ |
request = list_get_instance(box->calls.next, call_t, list); |
list_remove(&request->list); |
/* Append request to dispatch queue */ |
list_append(&request->list, &box->dispatched_calls); |
} else { |
/* This can happen regularly after ipc_cleanup, remove |
* the warning in the future when the IPC is |
* more debugged */ |
printf("WARNING: Spurious IPC wakeup.\n"); |
spinlock_unlock(&box->lock); |
goto restart; |
} |
spinlock_unlock(&box->lock); |
return request; |
} |
/** Answer all calls from list with EHANGUP msg */ |
static void ipc_cleanup_call_list(link_t *lst) |
{ |
call_t *call; |
while (!list_empty(lst)) { |
call = list_get_instance(lst->next, call_t, list); |
list_remove(&call->list); |
IPC_SET_RETVAL(call->data, EHANGUP); |
_ipc_answer_free_call(call); |
} |
} |
/** Disconnect all irq's notifications |
* |
* TODO: It may be better to do some linked list, so that |
* we wouldn't need to go through whole array every cleanup |
*/ |
static void ipc_irq_cleanup(answerbox_t *box) |
{ |
int i; |
ipl_t ipl; |
for (i=0; i < irq_conns_size; i++) { |
ipl = interrupts_disable(); |
spinlock_lock(&irq_conns[i].lock); |
if (irq_conns[i].box == box) |
irq_conns[i].box = NULL; |
spinlock_unlock(&irq_conns[i].lock); |
interrupts_restore(ipl); |
} |
} |
/** Cleans up all IPC communication of the given task |
* |
* |
*/ |
void ipc_cleanup(task_t *task) |
{ |
int i; |
call_t *call; |
phone_t *phone; |
/* Disconnect all our phones ('ipc_phone_hangup') */ |
for (i=0;i < IPC_MAX_PHONES; i++) |
ipc_phone_hangup(&task->phones[i]); |
/* Disconnect all connected irqs */ |
ipc_irq_cleanup(&task->answerbox); |
/* Disconnect all phones connected to our answerbox */ |
restart_phones: |
spinlock_lock(&task->answerbox.lock); |
while (!list_empty(&task->answerbox.connected_phones)) { |
phone = list_get_instance(task->answerbox.connected_phones.next, |
phone_t, |
list); |
if (! spinlock_trylock(&phone->lock)) { |
spinlock_unlock(&task->answerbox.lock); |
goto restart_phones; |
} |
/* Disconnect phone */ |
phone->callee = NULL; |
list_remove(&phone->list); |
spinlock_unlock(&phone->lock); |
} |
/* Answer all messages in 'calls' and 'dispatched_calls' queues */ |
spinlock_lock(&task->answerbox.lock); |
ipc_cleanup_call_list(&task->answerbox.dispatched_calls); |
ipc_cleanup_call_list(&task->answerbox.calls); |
spinlock_unlock(&task->answerbox.lock); |
/* Wait for all async answers to arrive */ |
while (atomic_get(&task->active_calls)) { |
call = ipc_wait_for_call(&task->answerbox, 0); |
ASSERT((call->flags & IPC_CALL_ANSWERED) || (call->flags & IPC_CALL_NOTIF)); |
ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC)); |
atomic_dec(&task->active_calls); |
ipc_call_free(call); |
} |
} |
/** Initialize table of interrupt handlers */ |
static void ipc_irq_make_table(int irqcount) |
{ |
int i; |
irq_conns_size = irqcount; |
irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0); |
for (i=0; i < irqcount; i++) { |
spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock"); |
irq_conns[i].box = NULL; |
} |
} |
void ipc_irq_unregister(answerbox_t *box, int irq) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&irq_conns[irq].lock); |
if (irq_conns[irq].box == box) |
irq_conns[irq].box = NULL; |
spinlock_unlock(&irq_conns[irq].lock); |
interrupts_restore(ipl); |
} |
/** Register an answerbox as a receiving end of interrupts notifications */ |
int ipc_irq_register(answerbox_t *box, int irq) |
{ |
ipl_t ipl; |
ASSERT(irq_conns); |
ipl = interrupts_disable(); |
spinlock_lock(&irq_conns[irq].lock); |
if (irq_conns[irq].box) { |
spinlock_unlock(&irq_conns[irq].lock); |
interrupts_restore(ipl); |
return EEXISTS; |
} |
irq_conns[irq].box = box; |
spinlock_unlock(&irq_conns[irq].lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Notify process that an irq had happend |
* |
* We expect interrupts to be disabled |
*/ |
void ipc_irq_send_notif(int irq) |
{ |
call_t *call; |
ASSERT(irq_conns); |
spinlock_lock(&irq_conns[irq].lock); |
if (irq_conns[irq].box) { |
call = ipc_call_alloc(FRAME_ATOMIC); |
call->flags |= IPC_CALL_NOTIF; |
IPC_SET_METHOD(call->data, IPC_M_INTERRUPT); |
IPC_SET_ARG1(call->data, irq); |
spinlock_lock(&irq_conns[irq].box->irq_lock); |
list_append(&call->list, &irq_conns[irq].box->irq_notifs); |
spinlock_unlock(&irq_conns[irq].box->irq_lock); |
waitq_wakeup(&irq_conns[irq].box->wq, 0); |
} |
spinlock_unlock(&irq_conns[irq].lock); |
} |
/** Initilize ipc subsystem */ |
void ipc_init(void) |
{ |
ipc_call_slab = slab_cache_create("ipc_call", |
sizeof(call_t), |
0, |
NULL, NULL, 0); |
ipc_irq_make_table(IRQ_COUNT); |
} |
//kernel/trunk/generic/src/ipc/sysipc.c |
---|
0,0 → 1,498 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <arch.h> |
#include <proc/task.h> |
#include <errno.h> |
#include <mm/page.h> |
#include <memstr.h> |
#include <debug.h> |
#include <ipc/ipc.h> |
#include <ipc/sysipc.h> |
#include <ipc/ipcrsc.h> |
#include <arch/interrupt.h> |
#include <print.h> |
#include <arch.h> |
#include <proc/thread.h> |
#define GET_CHECK_PHONE(phone,phoneid,err) { \ |
if (phoneid > IPC_MAX_PHONES) { err; } \ |
phone = &TASK->phones[phoneid]; \ |
} |
#define STRUCT_TO_USPACE(dst,src) copy_to_uspace(dst,src,sizeof(*(src))) |
/** Return true if the method is a system method */ |
static inline int is_system_method(__native method) |
{ |
if (method <= IPC_M_LAST_SYSTEM) |
return 1; |
return 0; |
} |
/** Return true if the message with this method is forwardable |
* |
* - some system messages may be forwarded, for some of them |
* it is useless |
*/ |
static inline int is_forwardable(__native method) |
{ |
if (method == IPC_M_PHONE_HUNGUP) |
return 0; /* This message is meant only for the receiver */ |
return 1; |
} |
/****************************************************/ |
/* Functions that preprocess answer before sending |
* it to the recepient |
*/ |
/** Return true if the caller (ipc_answer) should save |
* the old call contents for answer_preprocess |
*/ |
static inline int answer_need_old(call_t *call) |
{ |
if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_TO_ME) |
return 1; |
if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_ME_TO) |
return 1; |
return 0; |
} |
/** Interpret process answer as control information */ |
static inline void answer_preprocess(call_t *answer, ipc_data_t *olddata) |
{ |
int phoneid; |
if (IPC_GET_RETVAL(answer->data) == EHANGUP) { |
/* In case of forward, hangup the forwared phone, |
* not the originator |
*/ |
spinlock_lock(&answer->data.phone->lock); |
spinlock_lock(&TASK->answerbox.lock); |
if (answer->data.phone->callee) { |
list_remove(&answer->data.phone->list); |
answer->data.phone->callee = 0; |
} |
spinlock_unlock(&TASK->answerbox.lock); |
spinlock_unlock(&answer->data.phone->lock); |
} |
if (!olddata) |
return; |
if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECT_TO_ME) { |
phoneid = IPC_GET_ARG3(*olddata); |
if (IPC_GET_RETVAL(answer->data)) { |
/* The connection was not accepted */ |
phone_dealloc(phoneid); |
} else { |
/* The connection was accepted */ |
phone_connect(phoneid,&answer->sender->answerbox); |
/* Set 'phone identification' as arg3 of response */ |
IPC_SET_ARG3(answer->data, (__native)&TASK->phones[phoneid]); |
} |
} else if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECT_ME_TO) { |
/* If the users accepted call, connect */ |
if (!IPC_GET_RETVAL(answer->data)) { |
ipc_phone_connect((phone_t *)IPC_GET_ARG3(*olddata), |
&TASK->answerbox); |
} |
} |
} |
/** Called before the request is sent |
* |
* @return 0 - no error, -1 - report error to user |
*/ |
static int request_preprocess(call_t *call) |
{ |
int newphid; |
switch (IPC_GET_METHOD(call->data)) { |
case IPC_M_CONNECT_ME_TO: |
newphid = phone_alloc(); |
if (newphid < 0) |
return ELIMIT; |
/* Set arg3 for server */ |
IPC_SET_ARG3(call->data, (__native)&TASK->phones[newphid]); |
call->flags |= IPC_CALL_CONN_ME_TO; |
call->private = newphid; |
break; |
default: |
break; |
} |
return 0; |
} |
/****************************************************/ |
/* Functions called to process received call/answer |
* before passing to uspace |
*/ |
/** Do basic kernel processing of received call answer */ |
static void process_answer(call_t *call) |
{ |
if (IPC_GET_RETVAL(call->data) == EHANGUP && \ |
call->flags & IPC_CALL_FORWARDED) |
IPC_SET_RETVAL(call->data, EFORWARD); |
if (call->flags & IPC_CALL_CONN_ME_TO) { |
if (IPC_GET_RETVAL(call->data)) |
phone_dealloc(call->private); |
else |
IPC_SET_ARG3(call->data, call->private); |
} |
} |
/** Do basic kernel processing of received call request |
* |
* @return 0 - the call should be passed to userspace, 1 - ignore call |
*/ |
static int process_request(answerbox_t *box,call_t *call) |
{ |
int phoneid; |
if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_TO_ME) { |
phoneid = phone_alloc(); |
if (phoneid < 0) { /* Failed to allocate phone */ |
IPC_SET_RETVAL(call->data, ELIMIT); |
ipc_answer(box,call); |
return -1; |
} |
IPC_SET_ARG3(call->data, phoneid); |
} |
return 0; |
} |
/** Send a call over IPC, wait for reply, return to user |
* |
* @return Call identification, returns -1 on fatal error, |
-2 on 'Too many async request, handle answers first |
*/ |
__native sys_ipc_call_sync_fast(__native phoneid, __native method, |
__native arg1, ipc_data_t *data) |
{ |
call_t call; |
phone_t *phone; |
int res; |
GET_CHECK_PHONE(phone, phoneid, return ENOENT); |
ipc_call_static_init(&call); |
IPC_SET_METHOD(call.data, method); |
IPC_SET_ARG1(call.data, arg1); |
if (!(res=request_preprocess(&call))) { |
ipc_call_sync(phone, &call); |
process_answer(&call); |
} else |
IPC_SET_RETVAL(call.data, res); |
STRUCT_TO_USPACE(&data->args, &call.data.args); |
return 0; |
} |
/** Synchronous IPC call allowing to send whole message */ |
__native sys_ipc_call_sync(__native phoneid, ipc_data_t *question, |
ipc_data_t *reply) |
{ |
call_t call; |
phone_t *phone; |
int res; |
ipc_call_static_init(&call); |
copy_from_uspace(&call.data.args, &question->args, sizeof(call.data.args)); |
GET_CHECK_PHONE(phone, phoneid, return ENOENT); |
if (!(res=request_preprocess(&call))) { |
ipc_call_sync(phone, &call); |
process_answer(&call); |
} else |
IPC_SET_RETVAL(call.data, res); |
STRUCT_TO_USPACE(&reply->args, &call.data.args); |
return 0; |
} |
/** Check that the task did not exceed allowed limit |
* |
* @return 0 - Limit OK, -1 - limit exceeded |
*/ |
static int check_call_limit(void) |
{ |
if (atomic_preinc(&TASK->active_calls) > IPC_MAX_ASYNC_CALLS) { |
atomic_dec(&TASK->active_calls); |
return -1; |
} |
return 0; |
} |
/** Send an asynchronous call over ipc |
* |
* @return Call identification, returns -1 on fatal error, |
-2 on 'Too many async request, handle answers first |
*/ |
__native sys_ipc_call_async_fast(__native phoneid, __native method, |
__native arg1, __native arg2) |
{ |
call_t *call; |
phone_t *phone; |
int res; |
if (check_call_limit()) |
return IPC_CALLRET_TEMPORARY; |
GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL); |
call = ipc_call_alloc(0); |
IPC_SET_METHOD(call->data, method); |
IPC_SET_ARG1(call->data, arg1); |
IPC_SET_ARG2(call->data, arg2); |
if (!(res=request_preprocess(call))) |
ipc_call(phone, call); |
else |
ipc_backsend_err(phone, call, res); |
return (__native) call; |
} |
/** Synchronous IPC call allowing to send whole message |
* |
* @return The same as sys_ipc_call_async |
*/ |
__native sys_ipc_call_async(__native phoneid, ipc_data_t *data) |
{ |
call_t *call; |
phone_t *phone; |
int res; |
if (check_call_limit()) |
return IPC_CALLRET_TEMPORARY; |
GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL); |
call = ipc_call_alloc(0); |
copy_from_uspace(&call->data.args, &data->args, sizeof(call->data.args)); |
if (!(res=request_preprocess(call))) |
ipc_call(phone, call); |
else |
ipc_backsend_err(phone, call, res); |
return (__native) call; |
} |
/** Forward received call to another destination |
* |
* The arg1 and arg2 are changed in the forwarded message |
* |
* Warning: If implementing non-fast version, make sure that |
* arg3 is not rewritten for certain system IPC |
*/ |
__native sys_ipc_forward_fast(__native callid, __native phoneid, |
__native method, __native arg1) |
{ |
call_t *call; |
phone_t *phone; |
call = get_call(callid); |
if (!call) |
return ENOENT; |
call->flags |= IPC_CALL_FORWARDED; |
GET_CHECK_PHONE(phone, phoneid, { |
IPC_SET_RETVAL(call->data, EFORWARD); |
ipc_answer(&TASK->answerbox, call); |
return ENOENT; |
}); |
if (!is_forwardable(IPC_GET_METHOD(call->data))) { |
IPC_SET_RETVAL(call->data, EFORWARD); |
ipc_answer(&TASK->answerbox, call); |
return EPERM; |
} |
/* Userspace is not allowed to change method of system methods |
* on forward, allow changing ARG1 and ARG2 by means of method and arg1 |
*/ |
if (is_system_method(IPC_GET_METHOD(call->data))) { |
if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_TO_ME) |
phone_dealloc(IPC_GET_ARG3(call->data)); |
IPC_SET_ARG1(call->data, method); |
IPC_SET_ARG2(call->data, arg1); |
} else { |
IPC_SET_METHOD(call->data, method); |
IPC_SET_ARG1(call->data, arg1); |
} |
return ipc_forward(call, phone, &TASK->answerbox); |
} |
/** Send IPC answer */ |
__native sys_ipc_answer_fast(__native callid, __native retval, |
__native arg1, __native arg2) |
{ |
call_t *call; |
ipc_data_t saved_data; |
int saveddata = 0; |
call = get_call(callid); |
if (!call) |
return ENOENT; |
if (answer_need_old(call)) { |
memcpy(&saved_data, &call->data, sizeof(call->data)); |
saveddata = 1; |
} |
IPC_SET_RETVAL(call->data, retval); |
IPC_SET_ARG1(call->data, arg1); |
IPC_SET_ARG2(call->data, arg2); |
answer_preprocess(call, saveddata ? &saved_data : NULL); |
ipc_answer(&TASK->answerbox, call); |
return 0; |
} |
/** Send IPC answer */ |
__native sys_ipc_answer(__native callid, ipc_data_t *data) |
{ |
call_t *call; |
ipc_data_t saved_data; |
int saveddata = 0; |
call = get_call(callid); |
if (!call) |
return ENOENT; |
if (answer_need_old(call)) { |
memcpy(&saved_data, &call->data, sizeof(call->data)); |
saveddata = 1; |
} |
copy_from_uspace(&call->data.args, &data->args, |
sizeof(call->data.args)); |
answer_preprocess(call, saveddata ? &saved_data : NULL); |
ipc_answer(&TASK->answerbox, call); |
return 0; |
} |
/** Hang up the phone |
* |
*/ |
__native sys_ipc_hangup(int phoneid) |
{ |
phone_t *phone; |
GET_CHECK_PHONE(phone, phoneid, return ENOENT); |
if (ipc_phone_hangup(phone)) |
return -1; |
return 0; |
} |
/** Wait for incoming ipc call or answer |
* |
* @param calldata Pointer to buffer where the call/answer data is stored |
* @param flags |
* @return Callid, if callid & 1, then the call is answer |
*/ |
__native sys_ipc_wait_for_call(ipc_data_t *calldata, __native flags) |
{ |
call_t *call; |
restart: |
call = ipc_wait_for_call(&TASK->answerbox, flags); |
if (!call) |
return 0; |
if (call->flags & IPC_CALL_NOTIF) { |
ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC)); |
STRUCT_TO_USPACE(&calldata->args, &call->data.args); |
ipc_call_free(call); |
return ((__native)call) | IPC_CALLID_NOTIFICATION; |
} |
if (call->flags & IPC_CALL_ANSWERED) { |
process_answer(call); |
ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC)); |
atomic_dec(&TASK->active_calls); |
if (call->flags & IPC_CALL_DISCARD_ANSWER) { |
ipc_call_free(call); |
goto restart; |
} |
STRUCT_TO_USPACE(&calldata->args, &call->data.args); |
ipc_call_free(call); |
return ((__native)call) | IPC_CALLID_ANSWERED; |
} |
if (process_request(&TASK->answerbox, call)) |
goto restart; |
/* Include phone address('id') of the caller in the request, |
* copy whole call->data, not only call->data.args */ |
STRUCT_TO_USPACE(calldata, &call->data); |
return (__native)call; |
} |
/** Connect irq handler to task */ |
__native sys_ipc_register_irq(__native irq) |
{ |
if (irq >= IRQ_COUNT) |
return -ELIMIT; |
irq_ipc_bind_arch(irq); |
return ipc_irq_register(&TASK->answerbox, irq); |
} |
/* Disconnect irq handler from task */ |
__native sys_ipc_unregister_irq(__native irq) |
{ |
if (irq >= IRQ_COUNT) |
return -ELIMIT; |
ipc_irq_unregister(&TASK->answerbox, irq); |
return 0; |
} |
//kernel/trunk/generic/src/ipc/ipcrsc.c |
---|
0,0 → 1,205 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/* IPC resources management |
* |
* The goal of this source code is to properly manage IPC resources |
* and allow straight and clean clean-up procedure upon task termination. |
* |
* The pattern of usage of the resources is: |
* - allocate empty phone slot, connect | deallocate slot |
* - disconnect connected phone (some messages might be on the fly) |
* - find phone in slot and send a message using phone |
* - answer message to phone |
* - hangup phone (the caller has hung up) |
* - hangup phone (the answerbox is exiting) |
* |
* Locking strategy |
* |
* - To use a phone, disconnect a phone etc., the phone must be |
* first locked and then checked that it is connected |
* - To connect an allocated phone it need not be locked (assigning |
* pointer is atomic on all platforms) |
* |
* - To find an empty phone slot, the TASK must be locked |
* - To answer a message, the answerbox must be locked |
* - The locking of phone and answerbox is done at the ipc_ level. |
* It is perfectly correct to pass unconnected phone to these functions |
* and proper reply will be generated. |
* |
* Locking order |
* |
* - first phone, then answerbox |
* + Easy locking on calls |
* - Very hard traversing list of phones when disconnecting because |
* the phones may disconnect during traversal of list of connected phones. |
* The only possibility is try_lock with restart of list traversal. |
* |
* Destroying is less frequent, this approach is taken. |
* |
* Phone call |
* |
* *** Connect_me_to *** |
* The caller sends IPC_M_CONNECT_ME_TO to an answerbox. The server |
* receives 'phoneid' of the connecting phone as an ARG3. If it answers |
* with RETVAL=0, the phonecall is accepted, otherwise it is refused. |
* |
* *** Connect_to_me *** |
* The caller sends IPC_M_CONNECT_TO_ME, with special |
* The server receives an automatically |
* opened phoneid. If it accepts (RETVAL=0), it can use the phoneid |
* immediately. |
* Possible race condition can arise, when the client receives messages |
* from new connection before getting response for connect_to_me message. |
* Userspace should implement handshake protocol that would control it. |
* |
* Phone hangup |
* |
* *** The caller hangs up (sys_ipc_hangup) *** |
* - The phone is disconnected (no more messages can be sent over this phone), |
* all in-progress messages are correctly handled. The anwerbox receives |
* IPC_M_PHONE_HUNGUP call from the phone that hung up. When all async |
* calls are answered, the phone is deallocated. |
* |
* *** The answerbox hangs up (ipc_answer(EHANGUP)) |
* - The phone is disconnected. EHANGUP response code is sent |
* to the calling process. All new calls through this phone |
* get a EHUNGUP error code, the task is expected to |
* send an sys_ipc_hangup after cleaning up it's internal structures. |
* |
* Call forwarding |
* |
* The call can be forwarded, so that the answer to call is passed directly |
* to the original sender. However, this poses special problems regarding |
* routing of hangup messages. |
* |
* sys_ipc_hangup -> IPC_M_PHONE_HUNGUP |
* - this message CANNOT be forwarded |
* |
* EHANGUP during forward |
* - The *forwarding* phone will be closed, EFORWARD is sent to receiver. |
* |
* EHANGUP, ENOENT during forward |
* - EFORWARD is sent to the receiver, ipc_forward returns error code EFORWARD |
* |
* Cleanup strategy |
* |
* 1) Disconnect all our phones ('ipc_phone_hangup'). |
* |
* 2) Disconnect all phones connected to answerbox. |
* |
* 3) Answer all messages in 'calls' and 'dispatched_calls' queues with |
* appropriate error code (EHANGUP, EFORWARD). |
* |
* 4) Wait for all async answers to arrive and dispose of them. |
* |
*/ |
#include <synch/spinlock.h> |
#include <ipc/ipc.h> |
#include <arch.h> |
#include <proc/task.h> |
#include <ipc/ipcrsc.h> |
#include <debug.h> |
/** Find call_t * in call table according to callid |
* |
* TODO: Some speedup (hash table?) |
* @return NULL on not found, otherwise pointer to call structure |
*/ |
call_t * get_call(__native callid) |
{ |
link_t *lst; |
call_t *call, *result = NULL; |
spinlock_lock(&TASK->answerbox.lock); |
for (lst = TASK->answerbox.dispatched_calls.next; |
lst != &TASK->answerbox.dispatched_calls; lst = lst->next) { |
call = list_get_instance(lst, call_t, list); |
if ((__native)call == callid) { |
result = call; |
break; |
} |
} |
spinlock_unlock(&TASK->answerbox.lock); |
return result; |
} |
/** Allocate new phone slot in current TASK structure */ |
int phone_alloc(void) |
{ |
int i; |
spinlock_lock(&TASK->lock); |
for (i=0; i < IPC_MAX_PHONES; i++) { |
if (TASK->phones[i].busy==IPC_BUSY_FREE && !atomic_get(&TASK->phones[i].active_calls)) { |
TASK->phones[i].busy = IPC_BUSY_CONNECTING; |
break; |
} |
} |
spinlock_unlock(&TASK->lock); |
if (i >= IPC_MAX_PHONES) |
return -1; |
return i; |
} |
static void phone_deallocp(phone_t *phone) |
{ |
ASSERT(phone->busy == IPC_BUSY_CONNECTING); |
ASSERT(! phone->callee); |
/* atomic operation */ |
phone->busy = IPC_BUSY_FREE; |
} |
/** Free slot from a disconnected phone |
* |
* All already sent messages will be correctly processed |
*/ |
void phone_dealloc(int phoneid) |
{ |
phone_deallocp(&TASK->phones[phoneid]); |
} |
/** Connect phone to a given answerbox |
* |
* @param phoneid The slot that will be connected |
* |
* The procedure _enforces_ that the user first marks the phone |
* busy (e.g. via phone_alloc) and then connects the phone, otherwise |
* race condition may appear. |
*/ |
void phone_connect(int phoneid, answerbox_t *box) |
{ |
phone_t *phone = &TASK->phones[phoneid]; |
ASSERT(phone->busy == IPC_BUSY_CONNECTING); |
ipc_phone_connect(phone, box); |
} |
//kernel/trunk/generic/src/syscall/syscall.c |
---|
0,0 → 1,105 |
/* |
* Copyright (C) 2005 Martin Decky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <syscall/syscall.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <mm/as.h> |
#include <print.h> |
#include <putchar.h> |
#include <errno.h> |
#include <arch.h> |
#include <debug.h> |
#include <ipc/sysipc.h> |
#include <synch/futex.h> |
#include <ddi/ddi.h> |
static __native sys_io(int fd, const void * buf, size_t count) { |
// TODO: buf sanity checks and a lot of other stuff ... |
size_t i; |
for (i = 0; i < count; i++) |
putchar(((char *) buf)[i]); |
return count; |
} |
static __native sys_int_control(int enable) |
{ |
panic("Not implemented."); |
} |
/** Dispatch system call */ |
__native syscall_handler(__native a1, __native a2, __native a3, |
__native a4, __native id) |
{ |
if (id < SYSCALL_END) |
return syscall_table[id](a1,a2,a3,a4); |
else |
panic("Undefined syscall %d", id); |
} |
syshandler_t syscall_table[SYSCALL_END] = { |
sys_io, |
sys_tls_set, |
sys_int_control, |
/* Thread and task related syscalls. */ |
sys_thread_create, |
sys_thread_exit, |
sys_task_get_id, |
/* Synchronization related syscalls. */ |
sys_futex_sleep_timeout, |
sys_futex_wakeup, |
/* Address space related syscalls. */ |
sys_as_area_create, |
sys_as_area_resize, |
sys_as_area_accept, |
sys_as_area_send, |
/* IPC related syscalls. */ |
sys_ipc_call_sync_fast, |
sys_ipc_call_sync, |
sys_ipc_call_async_fast, |
sys_ipc_call_async, |
sys_ipc_answer_fast, |
sys_ipc_answer, |
sys_ipc_forward_fast, |
sys_ipc_wait_for_call, |
sys_ipc_hangup, |
sys_ipc_register_irq, |
sys_ipc_unregister_irq, |
/* DDI related syscalls. */ |
sys_physmem_map, |
sys_iospace_enable |
}; |
//kernel/trunk/generic/src/debug/print.c |
---|
0,0 → 1,694 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* Copyright (C) 2006 Josef Cejka |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <putchar.h> |
#include <print.h> |
#include <synch/spinlock.h> |
#include <arch/arg.h> |
#include <arch/asm.h> |
#include <arch.h> |
SPINLOCK_INITIALIZE(printflock); /**< printf spinlock */ |
#define __PRINTF_FLAG_PREFIX 0x00000001 /* show prefixes 0x or 0 */ |
#define __PRINTF_FLAG_SIGNED 0x00000002 /* signed / unsigned number */ |
#define __PRINTF_FLAG_ZEROPADDED 0x00000004 /* print leading zeroes */ |
#define __PRINTF_FLAG_LEFTALIGNED 0x00000010 /* align to left */ |
#define __PRINTF_FLAG_SHOWPLUS 0x00000020 /* always show + sign */ |
#define __PRINTF_FLAG_SPACESIGN 0x00000040 /* print space instead of plus */ |
#define __PRINTF_FLAG_BIGCHARS 0x00000080 /* show big characters */ |
#define __PRINTF_FLAG_NEGATIVE 0x00000100 /* number has - sign */ |
#define PRINT_NUMBER_BUFFER_SIZE (64+5) /* Buffer big enought for 64 bit number |
* printed in base 2, sign, prefix and |
* 0 to terminate string.. (last one is only for better testing |
* end of buffer by zero-filling subroutine) |
*/ |
typedef enum { |
PrintfQualifierByte = 0, |
PrintfQualifierShort, |
PrintfQualifierInt, |
PrintfQualifierLong, |
PrintfQualifierLongLong, |
PrintfQualifierNative, |
PrintfQualifierPointer |
} qualifier_t; |
static char digits_small[] = "0123456789abcdef"; /* Small hexadecimal characters */ |
static char digits_big[] = "0123456789ABCDEF"; /* Big hexadecimal characters */ |
static inline int isdigit(int c) |
{ |
return ((c >= '0' )&&( c <= '9')); |
} |
static __native strlen(const char *str) |
{ |
__native counter = 0; |
while (str[counter] != 0) { |
counter++; |
} |
return counter; |
} |
/** Print one string without appending newline to the end. |
* |
* Do not use this function directly - printflock is not locked here. |
* |
*/ |
static int putstr(const char *str) |
{ |
int count; |
if (str == NULL) { |
str = "(NULL)"; |
} |
for (count = 0; str[count] != 0; count++) { |
putchar(str[count]); |
} |
return count; |
} |
/** Print count characters from buffer to output. |
* |
* @param buffer Address of the buffer with charaters to be printed. |
* @param count Number of characters to be printed. |
* |
* @return Number of characters printed. |
*/ |
static int putnchars(const char *buffer, __native count) |
{ |
int i; |
if (buffer == NULL) { |
buffer = "(NULL)"; |
count = 6; |
} |
for (i = 0; i < count; i++) { |
putchar(buffer[i]); |
} |
return count; |
} |
/** Print one formatted character |
* |
* @param c Character to print. |
* @param width |
* @param flags |
* @return Number of printed characters or EOF. |
*/ |
static int print_char(char c, int width, __u64 flags) |
{ |
int counter = 0; |
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) { |
while (--width > 0) { /* one space is consumed by character itself hence the predecrement */ |
/* FIXME: painfully slow */ |
putchar(' '); |
++counter; |
} |
} |
putchar(c); |
++counter; |
while (--width > 0) { /* one space is consumed by character itself hence the predecrement */ |
putchar(' '); |
++counter; |
} |
return counter; |
} |
/** Print one string |
* @param s string |
* @param width |
* @param precision |
* @param flags |
* @return number of printed characters or EOF |
*/ |
static int print_string(char *s, int width, int precision, __u64 flags) |
{ |
int counter = 0; |
__native size; |
if (s == NULL) { |
return putstr("(NULL)"); |
} |
size = strlen(s); |
/* print leading spaces */ |
if (precision == 0) |
precision = size; |
width -= precision; |
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) { |
while (width-- > 0) { |
putchar(' '); |
counter++; |
} |
} |
while (precision > size) { |
precision--; |
putchar(' '); |
++counter; |
} |
if (putnchars(s, precision) == EOF) { |
return EOF; |
} |
counter += precision; |
while (width-- > 0) { |
putchar(' '); |
++counter; |
} |
return ++counter; |
} |
/** Print number in given base |
* |
* Print significant digits of a number in given |
* base. |
* |
* @param num Number to print. |
* @param width |
* @param precision |
* @param base Base to print the number in (should |
* be in range 2 .. 16). |
* @param flags output modifiers |
* @return number of written characters or EOF. |
*/ |
static int print_number(__u64 num, int width, int precision, int base , __u64 flags) |
{ |
char *digits = digits_small; |
char d[PRINT_NUMBER_BUFFER_SIZE]; /* this is good enough even for base == 2, prefix and sign */ |
char *ptr = &d[PRINT_NUMBER_BUFFER_SIZE - 1]; |
int size = 0; |
int number_size; /* size of plain number */ |
int written = 0; |
char sgn; |
if (flags & __PRINTF_FLAG_BIGCHARS) |
digits = digits_big; |
*ptr-- = 0; /* Put zero at end of string */ |
if (num == 0) { |
*ptr-- = '0'; |
size++; |
} else { |
do { |
*ptr-- = digits[num % base]; |
size++; |
} while (num /= base); |
} |
number_size = size; |
/* Collect sum of all prefixes/signs/... to calculate padding and leading zeroes */ |
if (flags & __PRINTF_FLAG_PREFIX) { |
switch(base) { |
case 2: /* Binary formating is not standard, but usefull */ |
size += 2; |
break; |
case 8: |
size++; |
break; |
case 16: |
size += 2; |
break; |
} |
} |
sgn = 0; |
if (flags & __PRINTF_FLAG_SIGNED) { |
if (flags & __PRINTF_FLAG_NEGATIVE) { |
sgn = '-'; |
size++; |
} else if (flags & __PRINTF_FLAG_SHOWPLUS) { |
sgn = '+'; |
size++; |
} else if (flags & __PRINTF_FLAG_SPACESIGN) { |
sgn = ' '; |
size++; |
} |
} |
if (flags & __PRINTF_FLAG_LEFTALIGNED) { |
flags &= ~__PRINTF_FLAG_ZEROPADDED; |
} |
/* if number is leftaligned or precision is specified then zeropadding is ignored */ |
if (flags & __PRINTF_FLAG_ZEROPADDED) { |
if ((precision == 0) && (width > size)) { |
precision = width - size + number_size; |
} |
} |
/* print leading spaces */ |
if (number_size > precision) /* We must print whole number not only a part */ |
precision = number_size; |
width -= precision + size - number_size; |
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) { |
while (width-- > 0) { |
putchar(' '); |
written++; |
} |
} |
/* print sign */ |
if (sgn) { |
putchar(sgn); |
written++; |
} |
/* print prefix */ |
if (flags & __PRINTF_FLAG_PREFIX) { |
switch(base) { |
case 2: /* Binary formating is not standard, but usefull */ |
putchar('0'); |
if (flags & __PRINTF_FLAG_BIGCHARS) { |
putchar('B'); |
} else { |
putchar('b'); |
} |
written += 2; |
break; |
case 8: |
putchar('o'); |
written++; |
break; |
case 16: |
putchar('0'); |
if (flags & __PRINTF_FLAG_BIGCHARS) { |
putchar('X'); |
} else { |
putchar('x'); |
} |
written += 2; |
break; |
} |
} |
/* print leading zeroes */ |
precision -= number_size; |
while (precision-- > 0) { |
putchar('0'); |
written++; |
} |
/* print number itself */ |
written += putstr(++ptr); |
/* print ending spaces */ |
while (width-- > 0) { |
putchar(' '); |
written++; |
} |
return written; |
} |
/** Print formatted string. |
* |
* Print string formatted according to the fmt parameter |
* and variadic arguments. Each formatting directive |
* must have the following form: |
* |
* \% [ FLAGS ] [ WIDTH ] [ .PRECISION ] [ TYPE ] CONVERSION |
* |
* FLAGS:@n |
* "#" Force to print prefix. |
* For conversion \%o the prefix is 0, for %x and \%X prefixes are 0x and 0X |
* and for conversion \%b the prefix is 0b. |
* |
* "-" Align to left. |
* |
* "+" Print positive sign just as negative. |
* |
* " " If the printed number is positive and "+" flag is not set, print space in |
* place of sign. |
* |
* "0" Print 0 as padding instead of spaces. Zeroes are placed between sign and the |
* rest of the number. This flag is ignored if "-" flag is specified. |
* |
* WIDTH:@n |
* Specify minimal width of printed argument. If it is bigger, width is ignored. |
* If width is specified with a "*" character instead of number, width is taken from |
* parameter list. And integer parameter is expected before parameter for processed |
* conversion specification. If this value is negative its absolute value is taken |
* and the "-" flag is set. |
* |
* PRECISION:@n |
* Value precision. For numbers it specifies minimum valid numbers. |
* Smaller numbers are printed with leading zeroes. Bigger numbers are not affected. |
* Strings with more than precision characters are cut off. |
* Just as with width, an "*" can be used used instead of a number. |
* An integer value is then expected in parameters. When both width and precision |
* are specified using "*", the first parameter is used for width and the second one |
* for precision. |
* |
* TYPE:@n |
* "hh" Signed or unsigned char.@n |
* "h" Signed or usigned short.@n |
* "" Signed or usigned int (default value).@n |
* "l" Signed or usigned long int.@n |
* "ll" Signed or usigned long long int.@n |
* "z" __native (non-standard extension).@n |
* |
* |
* CONVERSION:@n |
* % Print percentile character itself. |
* |
* c Print single character. |
* |
* s Print zero terminated string. If a NULL value is passed as value, "(NULL)" is printed instead. |
* |
* P, p Print value of a pointer. Void * value is expected and it is printed in hexadecimal notation with prefix |
* (as with \%#X or \%#x for 32bit or \%#X / \%#x for 64bit long pointers). |
* |
* b Print value as unsigned binary number. Prefix is not printed by default. (Nonstandard extension.) |
* |
* o Print value as unsigned octal number. Prefix is not printed by default. |
* |
* d,i Print signed decimal number. There is no difference between d and i conversion. |
* |
* u Print unsigned decimal number. |
* |
* X, x Print hexadecimal number with upper- or lower-case. Prefix is not printed by default. |
* |
* All other characters from fmt except the formatting directives |
* are printed in verbatim. |
* |
* @param fmt Formatting NULL terminated string. |
* @return Number of printed characters or negative value on failure. |
*/ |
int printf(const char *fmt, ...) |
{ |
int irqpri; |
int i = 0, j = 0; /* i is index of currently processed char from fmt, j is index to the first not printed nonformating character */ |
int end; |
int counter; /* counter of printed characters */ |
int retval; /* used to store return values from called functions */ |
va_list ap; |
char c; |
qualifier_t qualifier; /* type of argument */ |
int base; /* base in which will be parameter (numbers only) printed */ |
__u64 number; /* argument value */ |
__native size; /* byte size of integer parameter */ |
int width, precision; |
__u64 flags; |
counter = 0; |
va_start(ap, fmt); |
irqpri = interrupts_disable(); |
spinlock_lock(&printflock); |
while ((c = fmt[i])) { |
/* control character */ |
if (c == '%' ) { |
/* print common characters if any processed */ |
if (i > j) { |
if ((retval = putnchars(&fmt[j], (__native)(i - j))) == EOF) { /* error */ |
counter = -counter; |
goto out; |
} |
counter += retval; |
} |
j = i; |
/* parse modifiers */ |
flags = 0; |
end = 0; |
do { |
++i; |
switch (c = fmt[i]) { |
case '#': flags |= __PRINTF_FLAG_PREFIX; break; |
case '-': flags |= __PRINTF_FLAG_LEFTALIGNED; break; |
case '+': flags |= __PRINTF_FLAG_SHOWPLUS; break; |
case ' ': flags |= __PRINTF_FLAG_SPACESIGN; break; |
case '0': flags |= __PRINTF_FLAG_ZEROPADDED; break; |
default: end = 1; |
}; |
} while (end == 0); |
/* width & '*' operator */ |
width = 0; |
if (isdigit(fmt[i])) { |
while (isdigit(fmt[i])) { |
width *= 10; |
width += fmt[i++] - '0'; |
} |
} else if (fmt[i] == '*') { |
/* get width value from argument list*/ |
i++; |
width = (int)va_arg(ap, int); |
if (width < 0) { |
/* negative width means to set '-' flag */ |
width *= -1; |
flags |= __PRINTF_FLAG_LEFTALIGNED; |
} |
} |
/* precision and '*' operator */ |
precision = 0; |
if (fmt[i] == '.') { |
++i; |
if (isdigit(fmt[i])) { |
while (isdigit(fmt[i])) { |
precision *= 10; |
precision += fmt[i++] - '0'; |
} |
} else if (fmt[i] == '*') { |
/* get precision value from argument list*/ |
i++; |
precision = (int)va_arg(ap, int); |
if (precision < 0) { |
/* negative precision means to ignore it */ |
precision = 0; |
} |
} |
} |
switch (fmt[i++]) { |
/** TODO: unimplemented qualifiers: |
* t ptrdiff_t - ISO C 99 |
*/ |
case 'h': /* char or short */ |
qualifier = PrintfQualifierShort; |
if (fmt[i] == 'h') { |
i++; |
qualifier = PrintfQualifierByte; |
} |
break; |
case 'l': /* long or long long*/ |
qualifier = PrintfQualifierLong; |
if (fmt[i] == 'l') { |
i++; |
qualifier = PrintfQualifierLongLong; |
} |
break; |
case 'z': /* __native */ |
qualifier = PrintfQualifierNative; |
break; |
default: |
qualifier = PrintfQualifierInt; /* default type */ |
--i; |
} |
base = 10; |
switch (c = fmt[i]) { |
/* |
* String and character conversions. |
*/ |
case 's': |
if ((retval = print_string(va_arg(ap, char*), width, precision, flags)) == EOF) { |
counter = -counter; |
goto out; |
}; |
counter += retval; |
j = i + 1; |
goto next_char; |
case 'c': |
c = va_arg(ap, unsigned int); |
if ((retval = print_char(c, width, flags )) == EOF) { |
counter = -counter; |
goto out; |
}; |
counter += retval; |
j = i + 1; |
goto next_char; |
/* |
* Integer values |
*/ |
case 'P': /* pointer */ |
flags |= __PRINTF_FLAG_BIGCHARS; |
case 'p': |
flags |= __PRINTF_FLAG_PREFIX; |
base = 16; |
qualifier = PrintfQualifierPointer; |
break; |
case 'b': |
base = 2; |
break; |
case 'o': |
base = 8; |
break; |
case 'd': |
case 'i': |
flags |= __PRINTF_FLAG_SIGNED; |
case 'u': |
break; |
case 'X': |
flags |= __PRINTF_FLAG_BIGCHARS; |
case 'x': |
base = 16; |
break; |
/* percentile itself */ |
case '%': |
j = i; |
goto next_char; |
/* |
* Bad formatting. |
*/ |
default: |
/* Unknown format |
* now, the j is index of '%' so we will |
* print whole bad format sequence |
*/ |
goto next_char; |
} |
/* Print integers */ |
/* print number */ |
switch (qualifier) { |
case PrintfQualifierByte: |
size = sizeof(unsigned char); |
number = (__u64)va_arg(ap, unsigned int); |
break; |
case PrintfQualifierShort: |
size = sizeof(unsigned short); |
number = (__u64)va_arg(ap, unsigned int); |
break; |
case PrintfQualifierInt: |
size = sizeof(unsigned int); |
number = (__u64)va_arg(ap, unsigned int); |
break; |
case PrintfQualifierLong: |
size = sizeof(unsigned long); |
number = (__u64)va_arg(ap, unsigned long); |
break; |
case PrintfQualifierLongLong: |
size = sizeof(unsigned long long); |
number = (__u64)va_arg(ap, unsigned long long); |
break; |
case PrintfQualifierPointer: |
size = sizeof(void *); |
number = (__u64)(unsigned long)va_arg(ap, void *); |
break; |
case PrintfQualifierNative: |
size = sizeof(__native); |
number = (__u64)va_arg(ap, __native); |
break; |
default: /* Unknown qualifier */ |
counter = -counter; |
goto out; |
} |
if (flags & __PRINTF_FLAG_SIGNED) { |
if (number & (0x1 << (size*8 - 1))) { |
flags |= __PRINTF_FLAG_NEGATIVE; |
if (size == sizeof(__u64)) { |
number = -((__s64)number); |
} else { |
number = ~number; |
number &= (~((0xFFFFFFFFFFFFFFFFll) << (size * 8))); |
number++; |
} |
} |
} |
if ((retval = print_number(number, width, precision, base, flags)) == EOF ) { |
counter = -counter; |
goto out; |
}; |
counter += retval; |
j = i + 1; |
} |
next_char: |
++i; |
} |
if (i > j) { |
if ((retval = putnchars(&fmt[j], (__native)(i - j))) == EOF) { /* error */ |
counter = -counter; |
goto out; |
} |
counter += retval; |
} |
out: |
spinlock_unlock(&printflock); |
interrupts_restore(irqpri); |
va_end(ap); |
return counter; |
} |
//kernel/trunk/generic/src/debug/symtab.c |
---|
0,0 → 1,190 |
/* |
* Copyright (C) 2005 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <symtab.h> |
#include <typedefs.h> |
#include <arch/byteorder.h> |
#include <func.h> |
#include <print.h> |
/** Return entry that seems most likely to correspond to address |
* |
* Return entry that seems most likely to correspond |
* to address passed in the argument. |
* |
* @param addr Address. |
* |
* @return Pointer to respective symbol string on success, NULL otherwise. |
*/ |
char * get_symtab_entry(__native addr) |
{ |
count_t i; |
for (i=1;symbol_table[i].address_le;++i) { |
if (addr < __u64_le2host(symbol_table[i].address_le)) |
break; |
} |
if (addr >= __u64_le2host(symbol_table[i-1].address_le)) |
return symbol_table[i-1].symbol_name; |
return NULL; |
} |
/** Find symbols that match the parameter forward and print them |
* |
* @param name - search string |
* @param startpos - starting position, changes to found position |
* @return Pointer to the part of string that should be completed or NULL |
*/ |
static char * symtab_search_one(const char *name, int *startpos) |
{ |
int namelen = strlen(name); |
char *curname; |
int i,j; |
int colonoffset = -1; |
for (i=0;name[i];i++) |
if (name[i] == ':') { |
colonoffset = i; |
break; |
} |
for (i=*startpos;symbol_table[i].address_le;++i) { |
/* Find a ':' in name */ |
curname = symbol_table[i].symbol_name; |
for (j=0; curname[j] && curname[j] != ':'; j++) |
; |
if (!curname[j]) |
continue; |
j -= colonoffset; |
curname += j; |
if (strlen(curname) < namelen) |
continue; |
if (strncmp(curname, name, namelen) == 0) { |
*startpos = i; |
return curname+namelen; |
} |
} |
return NULL; |
} |
/** Return address that corresponds to the entry |
* |
* Search symbol table, and if there is one match, return it |
* |
* @param name Name of the symbol |
* @return 0 - Not found, -1 - Duplicate symbol, other - address of symbol |
*/ |
__address get_symbol_addr(const char *name) |
{ |
count_t found = 0; |
__address addr = NULL; |
char *hint; |
int i; |
i = 0; |
while ((hint=symtab_search_one(name, &i))) { |
if (!strlen(hint)) { |
addr = __u64_le2host(symbol_table[i].address_le); |
found++; |
} |
i++; |
} |
if (found > 1) |
return ((__address) -1); |
return addr; |
} |
/** Find symbols that match parameter and prints them */ |
void symtab_print_search(const char *name) |
{ |
int i; |
__address addr; |
char *realname; |
i = 0; |
while (symtab_search_one(name, &i)) { |
addr = __u64_le2host(symbol_table[i].address_le); |
realname = symbol_table[i].symbol_name; |
printf("%.*p: %s\n", sizeof(__address) * 2, addr, realname); |
i++; |
} |
} |
/** Symtab completion |
* |
* @param input - Search string, completes to symbol name |
* @returns - 0 - nothing found, 1 - success, >1 print duplicates |
*/ |
int symtab_compl(char *input) |
{ |
char output[MAX_SYMBOL_NAME+1]; |
int startpos = 0; |
char *foundtxt; |
int found = 0; |
int i; |
char *name = input; |
/* Allow completion of pointers */ |
if (name[0] == '*' || name[0] == '&') |
name++; |
/* Do not print everything */ |
if (!strlen(name)) |
return 0; |
output[0] = '\0'; |
while ((foundtxt = symtab_search_one(name, &startpos))) { |
startpos++; |
if (!found) |
strncpy(output, foundtxt, strlen(foundtxt)+1); |
else { |
for (i=0; output[i] && foundtxt[i] && output[i]==foundtxt[i]; i++) |
; |
output[i] = '\0'; |
} |
found++; |
} |
if (!found) |
return 0; |
if (found > 1 && !strlen(output)) { |
printf("\n"); |
startpos = 0; |
while ((foundtxt = symtab_search_one(name, &startpos))) { |
printf("%s\n", symbol_table[startpos].symbol_name); |
startpos++; |
} |
} |
strncpy(input, output, MAX_SYMBOL_NAME); |
return found; |
} |
//kernel/trunk/generic/src/synch/rwlock.c |
---|
0,0 → 1,380 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file rwlock.c |
* @brief Reader/Writer locks. |
* |
* A reader/writer lock can be held by multiple readers at a time. |
* Or it can be exclusively held by a sole writer at a time. |
* |
* These locks are not recursive. |
* Because technique called direct hand-off is used, neither readers |
* nor writers will suffer starvation. |
* |
* If there is a writer followed by a reader waiting for the rwlock |
* and the writer times out, all leading readers are automatically woken up |
* and allowed in. |
*/ |
/* |
* NOTE ON rwlock_holder_type |
* This field is set on an attempt to acquire the exclusive mutex |
* to the respective value depending whether the caller is a reader |
* or a writer. The field is examined only if the thread had been |
* previously blocked on the exclusive mutex. Thus it is save |
* to store the rwlock type in the thread structure, because |
* each thread can block on only one rwlock at a time. |
*/ |
#include <synch/rwlock.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <synch/waitq.h> |
#include <synch/synch.h> |
#include <adt/list.h> |
#include <typedefs.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <proc/thread.h> |
#include <panic.h> |
#define ALLOW_ALL 0 |
#define ALLOW_READERS_ONLY 1 |
static void let_others_in(rwlock_t *rwl, int readers_only); |
static void release_spinlock(void *arg); |
/** Initialize reader/writer lock |
* |
* Initialize reader/writer lock. |
* |
* @param rwl Reader/Writer lock. |
*/ |
void rwlock_initialize(rwlock_t *rwl) { |
spinlock_initialize(&rwl->lock, "rwlock_t"); |
mutex_initialize(&rwl->exclusive); |
rwl->readers_in = 0; |
} |
/** Acquire reader/writer lock for reading |
* |
* Acquire reader/writer lock for reading. |
* Timeout and willingness to block may be specified. |
* |
* @param rwl Reader/Writer lock. |
* @param usec Timeout in microseconds. |
* @param trylock Switches between blocking and non-blocking mode. |
* |
* For exact description of possible combinations of |
* @usec and @trylock, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
{ |
ipl_t ipl; |
int rc; |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_WRITER; |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
/* |
* Writers take the easy part. |
* They just need to acquire the exclusive mutex. |
*/ |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
if (SYNCH_FAILED(rc)) { |
/* |
* Lock operation timed out. |
* The state of rwl is UNKNOWN at this point. |
* No claims about its holder can be made. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
/* |
* Now when rwl is locked, we can inspect it again. |
* If it is held by some readers already, we can let |
* readers from the head of the wait queue in. |
*/ |
if (rwl->readers_in) |
let_others_in(rwl, ALLOW_READERS_ONLY); |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
} |
return rc; |
} |
/** Acquire reader/writer lock for writing |
* |
* Acquire reader/writer lock for writing. |
* Timeout and willingness to block may be specified. |
* |
* @param rwl Reader/Writer lock. |
* @param usec Timeout in microseconds. |
* @param trylock Switches between blocking and non-blocking mode. |
* |
* For exact description of possible combinations of |
* usec and trylock, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
{ |
int rc; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_READER; |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&rwl->lock); |
/* |
* Find out whether we can get what we want without blocking. |
*/ |
rc = mutex_trylock(&rwl->exclusive); |
if (SYNCH_FAILED(rc)) { |
/* |
* 'exclusive' mutex is being held by someone else. |
* If the holder is a reader and there is no one |
* else waiting for it, we can enter the critical |
* section. |
*/ |
if (rwl->readers_in) { |
spinlock_lock(&rwl->exclusive.sem.wq.lock); |
if (list_empty(&rwl->exclusive.sem.wq.head)) { |
/* |
* We can enter. |
*/ |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
goto shortcut; |
} |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
} |
/* |
* In order to prevent a race condition when a reader |
* could block another reader at the head of the waitq, |
* we register a function to unlock rwl->lock |
* after this thread is put asleep. |
*/ |
#ifdef CONFIG_SMP |
thread_register_call_me(release_spinlock, &rwl->lock); |
#else |
thread_register_call_me(release_spinlock, NULL); |
#endif |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
switch (rc) { |
case ESYNCH_WOULD_BLOCK: |
/* |
* release_spinlock() wasn't called |
*/ |
thread_register_call_me(NULL, NULL); |
spinlock_unlock(&rwl->lock); |
case ESYNCH_TIMEOUT: |
/* |
* The sleep timeouted. |
* We just restore interrupt priority level. |
*/ |
case ESYNCH_OK_BLOCKED: |
/* |
* We were woken with rwl->readers_in already incremented. |
* Note that this arrangement avoids race condition between |
* two concurrent readers. (Race is avoided if 'exclusive' is |
* locked at the same time as 'readers_in' is incremented. |
* Same time means both events happen atomically when |
* rwl->lock is held.) |
*/ |
interrupts_restore(ipl); |
break; |
case ESYNCH_OK_ATOMIC: |
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n"); |
break; |
default: |
panic("invalid ESYNCH\n"); |
break; |
} |
return rc; |
} |
shortcut: |
/* |
* We can increment readers_in only if we didn't go to sleep. |
* For sleepers, rwlock_let_others_in() will do the job. |
*/ |
rwl->readers_in++; |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
return ESYNCH_OK_ATOMIC; |
} |
/** Release reader/writer lock held by writer |
* |
* Release reader/writer lock held by writer. |
* Handoff reader/writer lock ownership directly |
* to waiting readers or a writer. |
* |
* @param rwl Reader/Writer lock. |
*/ |
void rwlock_write_unlock(rwlock_t *rwl) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
let_others_in(rwl, ALLOW_ALL); |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
} |
/** Release reader/writer lock held by reader |
* |
* Release reader/writer lock held by reader. |
* Handoff reader/writer lock ownership directly |
* to a waiting writer or don't do anything if more |
* readers poses the lock. |
* |
* @param rwl Reader/Writer lock. |
*/ |
void rwlock_read_unlock(rwlock_t *rwl) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
if (!--rwl->readers_in) |
let_others_in(rwl, ALLOW_ALL); |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
} |
/** Direct handoff of reader/writer lock ownership. |
* |
* Direct handoff of reader/writer lock ownership |
* to waiting readers or a writer. |
* |
* Must be called with rwl->lock locked. |
* Must be called with interrupts_disable()'d. |
* |
* @param rwl Reader/Writer lock. |
* @param readers_only See the description below. |
* |
* If readers_only is false: (unlock scenario) |
* Let the first sleeper on 'exclusive' mutex in, no matter |
* whether it is a reader or a writer. If there are more leading |
* readers in line, let each of them in. |
* |
* Otherwise: (timeout scenario) |
* Let all leading readers in. |
*/ |
void let_others_in(rwlock_t *rwl, int readers_only) |
{ |
rwlock_type_t type = RWLOCK_NONE; |
thread_t *t = NULL; |
bool one_more = true; |
spinlock_lock(&rwl->exclusive.sem.wq.lock); |
if (!list_empty(&rwl->exclusive.sem.wq.head)) |
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); |
do { |
if (t) { |
spinlock_lock(&t->lock); |
type = t->rwlock_holder_type; |
spinlock_unlock(&t->lock); |
} |
/* |
* If readers_only is true, we wake all leading readers |
* if and only if rwl is locked by another reader. |
* Assumption: readers_only ==> rwl->readers_in |
*/ |
if (readers_only && (type != RWLOCK_READER)) |
break; |
if (type == RWLOCK_READER) { |
/* |
* Waking up a reader. |
* We are responsible for incrementing rwl->readers_in for it. |
*/ |
rwl->readers_in++; |
} |
/* |
* Only the last iteration through this loop can increment |
* rwl->exclusive.sem.wq.missed_wakeup's. All preceeding |
* iterations will wake up a thread. |
*/ |
/* We call the internal version of waitq_wakeup, which |
* relies on the fact that the waitq is already locked. |
*/ |
_waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); |
t = NULL; |
if (!list_empty(&rwl->exclusive.sem.wq.head)) { |
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); |
if (t) { |
spinlock_lock(&t->lock); |
if (t->rwlock_holder_type != RWLOCK_READER) |
one_more = false; |
spinlock_unlock(&t->lock); |
} |
} |
} while ((type == RWLOCK_READER) && t && one_more); |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
} |
/** Release spinlock callback |
* |
* This is a callback function invoked from the scheduler. |
* The callback is registered in _rwlock_read_lock_timeout(). |
* |
* @param arg Spinlock. |
*/ |
void release_spinlock(void *arg) |
{ |
spinlock_unlock((spinlock_t *) arg); |
} |
//kernel/trunk/generic/src/synch/mutex.c |
---|
0,0 → 1,72 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <synch/mutex.h> |
#include <synch/semaphore.h> |
#include <synch/synch.h> |
/** Initialize mutex |
* |
* Initialize mutex. |
* |
* @param mtx Mutex. |
*/ |
void mutex_initialize(mutex_t *mtx) |
{ |
semaphore_initialize(&mtx->sem, 1); |
} |
/** Acquire mutex |
* |
* Acquire mutex. |
* Timeout mode and non-blocking mode can be requested. |
* |
* @param mtx Mutex. |
* @param usec Timeout in microseconds. |
* @param trylock Switches between blocking and non-blocking mode. |
* |
* For exact description of possible combinations of |
* usec and trylock, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock) |
{ |
return _semaphore_down_timeout(&mtx->sem, usec, trylock); |
} |
/** Release mutex |
* |
* Release mutex. |
* |
* @param mtx Mutex. |
*/ |
void mutex_unlock(mutex_t *mtx) |
{ |
semaphore_up(&mtx->sem); |
} |
//kernel/trunk/generic/src/synch/semaphore.c |
---|
0,0 → 1,86 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <synch/semaphore.h> |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
#include <synch/synch.h> |
#include <arch/asm.h> |
#include <arch.h> |
/** Initialize semaphore |
* |
* Initialize semaphore. |
* |
* @param s Semaphore. |
* @param val Maximal number of threads allowed to enter critical section. |
*/ |
void semaphore_initialize(semaphore_t *s, int val) |
{ |
ipl_t ipl; |
waitq_initialize(&s->wq); |
ipl = interrupts_disable(); |
spinlock_lock(&s->wq.lock); |
s->wq.missed_wakeups = val; |
spinlock_unlock(&s->wq.lock); |
interrupts_restore(ipl); |
} |
/** Semaphore down |
* |
* Semaphore down. |
* Conditional mode and mode with timeout can be requested. |
* |
* @param s Semaphore. |
* @param usec Timeout in microseconds. |
* @param trydown Switches between blocking and non-blocking mode. |
* |
* For exact description of possible combinations of |
* usec and trydown, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown) |
{ |
return waitq_sleep_timeout(&s->wq, usec, trydown); |
} |
/** Semaphore up |
* |
* Semaphore up. |
* |
* @param s Semaphore. |
*/ |
void semaphore_up(semaphore_t *s) |
{ |
waitq_wakeup(&s->wq, WAKEUP_FIRST); |
} |
//kernel/trunk/generic/src/synch/waitq.c |
---|
0,0 → 1,348 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file waitq.c |
* @brief Wait queue. |
* |
* Wait queue is the basic synchronization primitive upon all |
* other synchronization primitives build. |
* |
* It allows threads to wait for an event in first-come, first-served |
* fashion. Conditional operation as well as timeouts and interruptions |
* are supported. |
*/ |
#include <synch/waitq.h> |
#include <synch/synch.h> |
#include <synch/spinlock.h> |
#include <proc/thread.h> |
#include <proc/scheduler.h> |
#include <arch/asm.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <time/timeout.h> |
#include <arch.h> |
#include <context.h> |
#include <adt/list.h> |
static void waitq_timeouted_sleep(void *data); |
/** Initialize wait queue |
* |
* Initialize wait queue. |
* |
* @param wq Pointer to wait queue to be initialized. |
*/ |
void waitq_initialize(waitq_t *wq) |
{ |
spinlock_initialize(&wq->lock, "waitq_lock"); |
list_initialize(&wq->head); |
wq->missed_wakeups = 0; |
} |
/** Handle timeout during waitq_sleep_timeout() call |
* |
* This routine is called when waitq_sleep_timeout() timeouts. |
* Interrupts are disabled. |
* |
* It is supposed to try to remove 'its' thread from the wait queue; |
* it can eventually fail to achieve this goal when these two events |
* overlap. In that case it behaves just as though there was no |
* timeout at all. |
* |
* @param data Pointer to the thread that called waitq_sleep_timeout(). |
*/ |
void waitq_timeouted_sleep(void *data) |
{ |
thread_t *t = (thread_t *) data; |
waitq_t *wq; |
bool do_wakeup = false; |
spinlock_lock(&threads_lock); |
if (!thread_exists(t)) |
goto out; |
grab_locks: |
spinlock_lock(&t->lock); |
if ((wq = t->sleep_queue)) { /* assignment */ |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
goto grab_locks; /* avoid deadlock */ |
} |
list_remove(&t->wq_link); |
t->saved_context = t->sleep_timeout_context; |
do_wakeup = true; |
spinlock_unlock(&wq->lock); |
t->sleep_queue = NULL; |
} |
t->timeout_pending = false; |
spinlock_unlock(&t->lock); |
if (do_wakeup) |
thread_ready(t); |
out: |
spinlock_unlock(&threads_lock); |
} |
/** Interrupt sleeping thread. |
* |
* This routine attempts to interrupt a thread from its sleep in a waitqueue. |
* If the thread is not found sleeping, no action is taken. |
* |
* @param t Thread to be interrupted. |
*/ |
void waitq_interrupt_sleep(thread_t *t) |
{ |
waitq_t *wq; |
bool do_wakeup = false; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
if (!thread_exists(t)) |
goto out; |
grab_locks: |
spinlock_lock(&t->lock); |
if ((wq = t->sleep_queue)) { /* assignment */ |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
goto grab_locks; /* avoid deadlock */ |
} |
list_remove(&t->wq_link); |
t->saved_context = t->sleep_interruption_context; |
do_wakeup = true; |
spinlock_unlock(&wq->lock); |
t->sleep_queue = NULL; |
} |
spinlock_unlock(&t->lock); |
if (do_wakeup) |
thread_ready(t); |
out: |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
} |
/** Sleep until either wakeup, timeout or interruption occurs |
* |
* This is a sleep implementation which allows itself to be |
* interrupted from the sleep, restoring a failover context. |
* |
* Sleepers are organised in FIFO fashion in a structure called wait queue. |
* |
* This function is really basic in that other functions as waitq_sleep() |
* and all the *_timeout() functions use it. |
* |
* @param wq Pointer to wait queue. |
* @param usec Timeout in microseconds. |
* @param nonblocking Blocking vs. non-blocking operation mode switch. |
* |
* If usec is greater than zero, regardless of the value of nonblocking, |
* the call will not return until either timeout or wakeup comes. |
* |
* If usec is zero and @nonblocking is zero (false), the call |
* will not return until wakeup comes. |
* |
* If usec is zero and nonblocking is non-zero (true), the call will |
* immediately return, reporting either success or failure. |
* |
* @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, |
* ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
* |
* @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
* of the call there was no pending wakeup. |
* |
* @li ESYNCH_TIMEOUT means that the sleep timed out. |
* |
* @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
* |
* @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
* a pending wakeup at the time of the call. The caller was not put |
* asleep at all. |
* |
* @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
* attempted. |
*/ |
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
{ |
volatile ipl_t ipl; /* must be live after context_restore() */ |
restart: |
ipl = interrupts_disable(); |
/* |
* Busy waiting for a delayed timeout. |
* This is an important fix for the race condition between |
* a delayed timeout and a next call to waitq_sleep_timeout(). |
* Simply, the thread is not allowed to go to sleep if |
* there are timeouts in progress. |
*/ |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
goto restart; |
} |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&wq->lock); |
/* checks whether to go to sleep at all */ |
if (wq->missed_wakeups) { |
wq->missed_wakeups--; |
spinlock_unlock(&wq->lock); |
interrupts_restore(ipl); |
return ESYNCH_OK_ATOMIC; |
} |
else { |
if (nonblocking && (usec == 0)) { |
/* return immediatelly instead of going to sleep */ |
spinlock_unlock(&wq->lock); |
interrupts_restore(ipl); |
return ESYNCH_WOULD_BLOCK; |
} |
} |
/* |
* Now we are firmly decided to go to sleep. |
*/ |
spinlock_lock(&THREAD->lock); |
/* |
* Set context that will be restored if the sleep |
* of this thread is ever interrupted. |
*/ |
if (!context_save(&THREAD->sleep_interruption_context)) { |
/* Short emulation of scheduler() return code. */ |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
return ESYNCH_INTERRUPTED; |
} |
if (usec) { |
/* We use the timeout variant. */ |
if (!context_save(&THREAD->sleep_timeout_context)) { |
/* Short emulation of scheduler() return code. */ |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
return ESYNCH_TIMEOUT; |
} |
THREAD->timeout_pending = true; |
timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
} |
list_append(&THREAD->wq_link, &wq->head); |
/* |
* Suspend execution. |
*/ |
THREAD->state = Sleeping; |
THREAD->sleep_queue = wq; |
spinlock_unlock(&THREAD->lock); |
scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
interrupts_restore(ipl); |
return ESYNCH_OK_BLOCKED; |
} |
/** Wake up first thread sleeping in a wait queue |
* |
* Wake up first thread sleeping in a wait queue. |
* This is the SMP- and IRQ-safe wrapper meant for |
* general use. |
* |
* Besides its 'normal' wakeup operation, it attempts |
* to unregister possible timeout. |
* |
* @param wq Pointer to wait queue. |
* @param all If this is non-zero, all sleeping threads |
* will be woken up and missed count will be zeroed. |
*/ |
void waitq_wakeup(waitq_t *wq, bool all) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&wq->lock); |
_waitq_wakeup_unsafe(wq, all); |
spinlock_unlock(&wq->lock); |
interrupts_restore(ipl); |
} |
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
* |
* This is the internal SMP- and IRQ-unsafe version |
* of waitq_wakeup(). It assumes wq->lock is already |
* locked and interrupts are already disabled. |
* |
* @param wq Pointer to wait queue. |
* @param all If this is non-zero, all sleeping threads |
* will be woken up and missed count will be zeroed. |
*/ |
void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
{ |
thread_t *t; |
loop: |
if (list_empty(&wq->head)) { |
wq->missed_wakeups++; |
if (all) |
wq->missed_wakeups = 0; |
return; |
} |
t = list_get_instance(wq->head.next, thread_t, wq_link); |
list_remove(&t->wq_link); |
spinlock_lock(&t->lock); |
if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
t->timeout_pending = false; |
t->sleep_queue = NULL; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
if (all) |
goto loop; |
} |
//kernel/trunk/generic/src/synch/futex.c |
---|
0,0 → 1,256 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* Kernel backend for futexes. |
* Deallocation of orphaned kernel-side futex structures is not currently implemented. |
*/ |
#include <synch/futex.h> |
#include <synch/rwlock.h> |
#include <synch/spinlock.h> |
#include <synch/synch.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/slab.h> |
#include <proc/thread.h> |
#include <genarch/mm/page_pt.h> |
#include <genarch/mm/page_ht.h> |
#include <adt/hash_table.h> |
#include <adt/list.h> |
#include <arch.h> |
#include <align.h> |
#include <panic.h> |
#include <errno.h> |
#include <print.h> |
#define FUTEX_HT_SIZE 1024 /* keep it a power of 2 */ |
static void futex_initialize(futex_t *futex); |
static futex_t *futex_find(__address paddr); |
static index_t futex_ht_hash(__native *key); |
static bool futex_ht_compare(__native *key, count_t keys, link_t *item); |
static void futex_ht_remove_callback(link_t *item); |
/** Read-write lock protecting global futex hash table. */ |
static rwlock_t futex_ht_lock; |
/** Futex hash table. */ |
static hash_table_t futex_ht; |
/** Futex hash table operations. */ |
static hash_table_operations_t futex_ht_ops = { |
.hash = futex_ht_hash, |
.compare = futex_ht_compare, |
.remove_callback = futex_ht_remove_callback |
}; |
/** Initialize futex subsystem. */ |
void futex_init(void) |
{ |
rwlock_initialize(&futex_ht_lock); |
hash_table_create(&futex_ht, FUTEX_HT_SIZE, 1, &futex_ht_ops); |
} |
/** Initialize kernel futex structure. |
* |
* @param futex Kernel futex structure. |
*/ |
void futex_initialize(futex_t *futex) |
{ |
waitq_initialize(&futex->wq); |
link_initialize(&futex->ht_link); |
futex->paddr = 0; |
} |
/** Sleep in futex wait queue. |
* |
* @param uaddr Userspace address of the futex counter. |
* @param usec If non-zero, number of microseconds this thread is willing to sleep. |
* @param trydown If usec is zero and trydown is non-zero, conditional operation will be attempted. |
* |
* @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h. |
* If there is no physical mapping for uaddr ENOENT is returned. |
*/ |
__native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int trydown) |
{ |
futex_t *futex; |
__address paddr; |
pte_t *t; |
ipl_t ipl; |
ipl = interrupts_disable(); |
/* |
* Find physical address of futex counter. |
*/ |
page_table_lock(AS, true); |
t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE)); |
if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
return (__native) ENOENT; |
} |
paddr = PFN2ADDR(PTE_GET_FRAME(t)) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
futex = futex_find(paddr); |
return (__native) waitq_sleep_timeout(&futex->wq, usec, trydown); |
} |
/** Wakeup one thread waiting in futex wait queue. |
* |
* @param uaddr Userspace address of the futex counter. |
* |
* @return ENOENT if there is no futex associated with the address |
* or if there is no physical mapping for uaddr. |
*/ |
__native sys_futex_wakeup(__address uaddr) |
{ |
futex_t *futex; |
__address paddr; |
pte_t *t; |
ipl_t ipl; |
ipl = interrupts_disable(); |
/* |
* Find physical address of futex counter. |
*/ |
page_table_lock(AS, true); |
t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE)); |
if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
return (__native) ENOENT; |
} |
paddr = PFN2ADDR(PTE_GET_FRAME(t)) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
futex = futex_find(paddr); |
waitq_wakeup(&futex->wq, WAKEUP_FIRST); |
return 0; |
} |
/** Find kernel address of the futex structure corresponding to paddr. |
* |
* If the structure does not exist alreay, a new one is created. |
* |
* @param paddr Physical address of the userspace futex counter. |
* |
* @return Address of the kernel futex structure. |
*/ |
futex_t *futex_find(__address paddr) |
{ |
link_t *item; |
futex_t *futex; |
/* |
* Find the respective futex structure |
* or allocate new one if it does not exist already. |
*/ |
rwlock_read_lock(&futex_ht_lock); |
item = hash_table_find(&futex_ht, &paddr); |
if (item) { |
futex = hash_table_get_instance(item, futex_t, ht_link); |
rwlock_read_unlock(&futex_ht_lock); |
} else { |
/* |
* Upgrade to writer is not currently supported, |
* Therefore, it is necessary to release the read lock |
* and reacquire it as a writer. |
*/ |
rwlock_read_unlock(&futex_ht_lock); |
rwlock_write_lock(&futex_ht_lock); |
/* |
* Avoid possible race condition by searching |
* the hash table once again with write access. |
*/ |
item = hash_table_find(&futex_ht, &paddr); |
if (item) { |
futex = hash_table_get_instance(item, futex_t, ht_link); |
rwlock_write_unlock(&futex_ht_lock); |
} else { |
futex = (futex_t *) malloc(sizeof(futex_t), 0); |
futex_initialize(futex); |
futex->paddr = paddr; |
hash_table_insert(&futex_ht, &paddr, &futex->ht_link); |
rwlock_write_unlock(&futex_ht_lock); |
} |
} |
return futex; |
} |
/** Compute hash index into futex hash table. |
* |
* @param key Address where the key (i.e. physical address of futex counter) is stored. |
* |
* @return Index into futex hash table. |
*/ |
index_t futex_ht_hash(__native *key) |
{ |
return *key & (FUTEX_HT_SIZE-1); |
} |
/** Compare futex hash table item with a key. |
* |
* @param key Address where the key (i.e. physical address of futex counter) is stored. |
* |
* @return True if the item matches the key. False otherwise. |
*/ |
bool futex_ht_compare(__native *key, count_t keys, link_t *item) |
{ |
futex_t *futex; |
ASSERT(keys == 1); |
futex = hash_table_get_instance(item, futex_t, ht_link); |
return *key == futex->paddr; |
} |
/** Callback for removal items from futex hash table. |
* |
* @param item Item removed from the hash table. |
*/ |
void futex_ht_remove_callback(link_t *item) |
{ |
futex_t *futex; |
futex = hash_table_get_instance(item, futex_t, ht_link); |
free(futex); |
} |
//kernel/trunk/generic/src/synch/condvar.c |
---|
0,0 → 1,91 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <synch/condvar.h> |
#include <synch/mutex.h> |
#include <synch/waitq.h> |
#include <synch/synch.h> |
/** Initialize condition variable |
* |
* Initialize condition variable. |
* |
* @param cv Condition variable. |
*/ |
void condvar_initialize(condvar_t *cv) |
{ |
waitq_initialize(&cv->wq); |
} |
/** Signal the condition has become true |
* |
* Signal the condition has become true |
* to the first waiting thread by waking it up. |
* |
* @param cv Condition variable. |
*/ |
void condvar_signal(condvar_t *cv) |
{ |
waitq_wakeup(&cv->wq, WAKEUP_FIRST); |
} |
/** Signal the condition has become true |
* |
* Signal the condition has become true |
* to all waiting threads by waking them up. |
* |
* @param cv Condition variable. |
*/ |
void condvar_broadcast(condvar_t *cv) |
{ |
waitq_wakeup(&cv->wq, WAKEUP_ALL); |
} |
/** Wait for the condition becoming true |
* |
* Wait for the condition becoming true. |
* |
* @param cv Condition variable. |
* @param mtx Mutex. |
* @param usec Timeout value in microseconds. |
* @param trywait Blocking versus non-blocking operation mode switch. |
* |
* For exact description of possible combinations of |
* usec and trywait, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int trywait) |
{ |
int rc; |
mutex_unlock(mtx); |
rc = waitq_sleep_timeout(&cv->wq, usec, trywait); |
mutex_lock(mtx); |
return rc; |
} |
//kernel/trunk/generic/src/synch/spinlock.c |
---|
0,0 → 1,121 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <synch/spinlock.h> |
#include <atomic.h> |
#include <arch/barrier.h> |
#include <arch.h> |
#include <preemption.h> |
#include <print.h> |
#include <debug.h> |
#include <symtab.h> |
#ifdef CONFIG_SMP |
/** Initialize spinlock |
* |
* Initialize spinlock. |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
void spinlock_initialize(spinlock_t *sl, char *name) |
{ |
atomic_set(&sl->val, 0); |
#ifdef CONFIG_DEBUG_SPINLOCK |
sl->name = name; |
#endif |
} |
/** Lock spinlock |
* |
* Lock spinlock. |
* This version has limitted ability to report |
* possible occurence of deadlock. |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
void spinlock_lock_debug(spinlock_t *sl) |
{ |
count_t i = 0; |
char *symbol; |
bool deadlock_reported = false; |
preemption_disable(); |
while (test_and_set(&sl->val)) { |
if (i++ > 300000 && sl!=&printflock) { |
printf("cpu%d: looping on spinlock %.*p:%s, caller=%.*p", |
CPU->id, sizeof(__address) * 2, sl, sl->name, sizeof(__address) * 2, CALLER); |
symbol = get_symtab_entry(CALLER); |
if (symbol) |
printf("(%s)", symbol); |
printf("\n"); |
i = 0; |
deadlock_reported = true; |
} |
} |
if (deadlock_reported) |
printf("cpu%d: not deadlocked\n", CPU->id); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
CS_ENTER_BARRIER(); |
} |
#endif |
/** Lock spinlock conditionally |
* |
* Lock spinlock conditionally. |
* If the spinlock is not available at the moment, |
* signal failure. |
* |
* @param sl Pointer to spinlock_t structure. |
* |
* @return Zero on failure, non-zero otherwise. |
*/ |
int spinlock_trylock(spinlock_t *sl) |
{ |
int rc; |
preemption_disable(); |
rc = !test_and_set(&sl->val); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
CS_ENTER_BARRIER(); |
if (!rc) |
preemption_enable(); |
return rc; |
} |
#endif |
//kernel/trunk/generic/src/main/kinit.c |
---|
0,0 → 1,181 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file kinit.c |
* @brief Kernel initialization thread. |
* |
* This file contains kinit kernel thread which carries out |
* high level system initialization. |
* |
* This file is responsible for finishing SMP configuration |
* and creation of userspace init tasks. |
*/ |
#include <main/kinit.h> |
#include <config.h> |
#include <arch.h> |
#include <proc/scheduler.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <panic.h> |
#include <func.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <mm/page.h> |
#include <arch/mm/page.h> |
#include <mm/as.h> |
#include <mm/frame.h> |
#include <print.h> |
#include <memstr.h> |
#include <console/console.h> |
#include <interrupt.h> |
#include <console/kconsole.h> |
#include <security/cap.h> |
#ifdef CONFIG_SMP |
#include <arch/smp/mps.h> |
#endif /* CONFIG_SMP */ |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
#ifdef CONFIG_TEST |
#include <test.h> |
#endif /* CONFIG_TEST */ |
/** Kernel initialization thread. |
* |
* kinit takes care of higher level kernel |
* initialization (i.e. thread creation, |
* userspace initialization etc.). |
* |
* @param arg Not used. |
*/ |
void kinit(void *arg) |
{ |
thread_t *t; |
task_t *utask; |
interrupts_disable(); |
#ifdef CONFIG_SMP |
if (config.cpu_count > 1) { |
/* |
* Create the kmp thread and wait for its completion. |
* cpu1 through cpuN-1 will come up consecutively and |
* not mess together with kcpulb threads. |
* Just a beautification. |
*/ |
if ((t = thread_create(kmp, NULL, TASK, 0, "kmp"))) { |
spinlock_lock(&t->lock); |
t->flags |= X_WIRED; |
t->cpu = &cpus[0]; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
waitq_sleep(&kmp_completion_wq); |
} |
else panic("thread_create/kmp\n"); |
} |
#endif /* CONFIG_SMP */ |
/* |
* Now that all CPUs are up, we can report what we've found. |
*/ |
cpu_list(); |
#ifdef CONFIG_SMP |
if (config.cpu_count > 1) { |
int i; |
/* |
* For each CPU, create its load balancing thread. |
*/ |
for (i = 0; i < config.cpu_count; i++) { |
if ((t = thread_create(kcpulb, NULL, TASK, 0, "kcpulb"))) { |
spinlock_lock(&t->lock); |
t->flags |= X_WIRED; |
t->cpu = &cpus[i]; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
} |
else panic("thread_create/kcpulb\n"); |
} |
} |
#endif /* CONFIG_SMP */ |
/* |
* At this point SMP, if present, is configured. |
*/ |
arch_post_smp_init(); |
/* |
* Create kernel console. |
*/ |
if ((t = thread_create(kconsole, "kconsole", TASK, 0, "kconsole"))) |
thread_ready(t); |
else |
panic("thread_create/kconsole\n"); |
interrupts_enable(); |
count_t i; |
for (i = 0; i < init.cnt; i++) { |
/* |
* Run user tasks. |
*/ |
if (init.tasks[i].addr % FRAME_SIZE) |
panic("init[%d].addr is not frame aligned", i); |
utask = task_run_program((void *) init.tasks[i].addr, "USPACE"); |
if (utask) { |
/* |
* Set capabilities to init userspace tasks. |
*/ |
cap_set(utask, CAP_CAP | CAP_MEM_MANAGER | CAP_IO_MANAGER); |
if (!ipc_phone_0) |
ipc_phone_0 = &utask->answerbox; |
} else |
printf("Init task %zd not started.\n", i); |
} |
#ifdef CONFIG_TEST |
test(); |
#endif /* CONFIG_TEST */ |
if (!stdin) { |
while (1) { |
thread_sleep(1); |
printf("kinit... "); |
} |
} |
} |
//kernel/trunk/generic/src/main/main.c |
---|
0,0 → 1,310 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file main.c |
* @brief Main initialization kernel function for all processors. |
* |
* During kernel boot, all processors, after architecture dependent |
* initialization, start executing code found in this file. After |
* bringing up all subsystems, control is passed to scheduler(). |
* |
* The bootstrap processor starts executing main_bsp() while |
* the application processors start executing main_ap(). |
* |
* @see scheduler() |
* @see main_bsp() |
* @see main_ap() |
*/ |
#include <arch/asm.h> |
#include <context.h> |
#include <print.h> |
#include <panic.h> |
#include <debug.h> |
#include <config.h> |
#include <time/clock.h> |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <main/kinit.h> |
#include <main/version.h> |
#include <console/kconsole.h> |
#include <cpu.h> |
#include <align.h> |
#include <interrupt.h> |
#include <arch/mm/memory_init.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <genarch/mm/page_pt.h> |
#include <mm/tlb.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <synch/waitq.h> |
#include <synch/futex.h> |
#include <arch/arch.h> |
#include <arch.h> |
#include <arch/faddr.h> |
#include <typedefs.h> |
#include <ipc/ipc.h> |
#include <macros.h> |
#include <adt/btree.h> |
#ifdef CONFIG_SMP |
#include <arch/smp/apic.h> |
#include <arch/smp/mps.h> |
#endif /* CONFIG_SMP */ |
#include <smp/smp.h> |
config_t config; /**< Global configuration structure. */ |
init_t init = {0}; /**< Initial user-space tasks */ |
context_t ctx; |
/** |
* These 'hardcoded' variables will be intialized by |
* the linker or the low level assembler code with |
* appropriate sizes and addresses. |
*/ |
__address hardcoded_load_address = 0; |
size_t hardcoded_ktext_size = 0; |
size_t hardcoded_kdata_size = 0; |
void main_bsp(void); |
void main_ap(void); |
/* |
* These two functions prevent stack from underflowing during the |
* kernel boot phase when SP is set to the very top of the reserved |
* space. The stack could get corrupted by a fooled compiler-generated |
* pop sequence otherwise. |
*/ |
static void main_bsp_separated_stack(void); |
#ifdef CONFIG_SMP |
static void main_ap_separated_stack(void); |
#endif |
#define CONFIG_STACK_SIZE ((1<<STACK_FRAMES)*STACK_SIZE) |
/** Main kernel routine for bootstrap CPU. |
* |
* Initializes the kernel by bootstrap CPU. |
* This function passes control directly to |
* main_bsp_separated_stack(). |
* |
* Assuming interrupts_disable(). |
* |
*/ |
void main_bsp(void) |
{ |
__address stackaddr; |
config.cpu_count = 1; |
config.cpu_active = 1; |
config.base = hardcoded_load_address; |
config.memory_size = get_memory_size(); |
config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE); |
stackaddr = config.base + config.kernel_size; |
/* Avoid placing kernel on top of init */ |
count_t i; |
bool overlap = false; |
for (i = 0; i < init.cnt; i++) |
if (PA_overlaps(stackaddr, CONFIG_STACK_SIZE, init.tasks[i].addr, init.tasks[i].size)) { |
stackaddr = ALIGN_UP(init.tasks[i].addr + init.tasks[i].size, CONFIG_STACK_SIZE); |
init.tasks[i].size = ALIGN_UP(init.tasks[i].size, CONFIG_STACK_SIZE) + CONFIG_STACK_SIZE; |
overlap = true; |
} |
if (!overlap) |
config.kernel_size += CONFIG_STACK_SIZE; |
context_save(&ctx); |
context_set(&ctx, FADDR(main_bsp_separated_stack), stackaddr, THREAD_STACK_SIZE); |
context_restore(&ctx); |
/* not reached */ |
} |
/** Main kernel routine for bootstrap CPU using new stack. |
* |
* Second part of main_bsp(). |
* |
*/ |
void main_bsp_separated_stack(void) |
{ |
task_t *k; |
thread_t *t; |
count_t i; |
the_initialize(THE); |
/* |
* kconsole data structures must be initialized very early |
* because other subsystems will register their respective |
* commands. |
*/ |
kconsole_init(); |
/* |
* Exception handler initialization, before architecture |
* starts adding its own handlers |
*/ |
exc_init(); |
/* |
* Memory management subsystems initialization. |
*/ |
arch_pre_mm_init(); |
frame_init(); /* Initialize at least 1 memory segment big enough for slab to work */ |
slab_cache_init(); |
btree_init(); |
as_init(); |
page_init(); |
tlb_init(); |
arch_post_mm_init(); |
version_print(); |
printf("%.*p: hardcoded_ktext_size=%zdK, hardcoded_kdata_size=%zdK\n", sizeof(__address) * 2, config.base, hardcoded_ktext_size / 1024, hardcoded_kdata_size / 1024); |
arch_pre_smp_init(); |
smp_init(); |
slab_enable_cpucache(); /* Slab must be initialized AFTER we know the number of processors */ |
printf("config.memory_size=%zdM\n", config.memory_size/(1024*1024)); |
printf("config.cpu_count=%zd\n", config.cpu_count); |
cpu_init(); |
calibrate_delay_loop(); |
timeout_init(); |
scheduler_init(); |
task_init(); |
thread_init(); |
futex_init(); |
for (i = 0; i < init.cnt; i++) |
printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(__address)*2, init.tasks[i].addr, i, init.tasks[i].size); |
ipc_init(); |
/* |
* Create kernel task. |
*/ |
k = task_create(AS_KERNEL, "KERNEL"); |
if (!k) |
panic("can't create kernel task\n"); |
/* |
* Create the first thread. |
*/ |
t = thread_create(kinit, NULL, k, 0, "kinit"); |
if (!t) |
panic("can't create kinit thread\n"); |
thread_ready(t); |
/* |
* This call to scheduler() will return to kinit, |
* starting the thread of kernel threads. |
*/ |
scheduler(); |
/* not reached */ |
} |
#ifdef CONFIG_SMP |
/** Main kernel routine for application CPUs. |
* |
* Executed by application processors, temporary stack |
* is at ctx.sp which was set during BP boot. |
* This function passes control directly to |
* main_ap_separated_stack(). |
* |
* Assuming interrupts_disable()'d. |
* |
*/ |
void main_ap(void) |
{ |
/* |
* Incrementing the active CPU counter will guarantee that the |
* pm_init() will not attempt to build GDT and IDT tables again. |
* Neither frame_init() will do the complete thing. Neither cpu_init() |
* will do. |
*/ |
config.cpu_active++; |
/* |
* The THE structure is well defined because ctx.sp is used as stack. |
*/ |
the_initialize(THE); |
arch_pre_mm_init(); |
frame_init(); |
page_init(); |
tlb_init(); |
arch_post_mm_init(); |
cpu_init(); |
calibrate_delay_loop(); |
l_apic_init(); |
l_apic_debug(); |
the_copy(THE, (the_t *) CPU->stack); |
/* |
* If we woke kmp up before we left the kernel stack, we could |
* collide with another CPU coming up. To prevent this, we |
* switch to this cpu's private stack prior to waking kmp up. |
*/ |
context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
/** Main kernel routine for application CPUs using new stack. |
* |
* Second part of main_ap(). |
* |
*/ |
void main_ap_separated_stack(void) |
{ |
/* |
* Configure timeouts for this cpu. |
*/ |
timeout_init(); |
waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST); |
scheduler(); |
/* not reached */ |
} |
#endif /* CONFIG_SMP */ |
//kernel/trunk/generic/src/main/uinit.c |
---|
0,0 → 1,62 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file uinit.c |
* @brief Userspace bootstrap thread. |
* |
* This file contains uinit kernel thread wich is used to start every |
* userspace thread including threads created by SYS_THREAD_CREATE syscall. |
* |
* @see SYS_THREAD_CREATE |
*/ |
#include <main/uinit.h> |
#include <arch/types.h> |
#include <proc/thread.h> |
#include <userspace.h> |
#include <mm/slab.h> |
/** Thread used to bring up userspace thread. |
* |
* @param arg Pointer to structure containing userspace entry and stack addresses. |
*/ |
void uinit(void *arg) |
{ |
uspace_arg_t uarg; |
uarg.uspace_entry = ((uspace_arg_t *) arg)->uspace_entry; |
uarg.uspace_stack = ((uspace_arg_t *) arg)->uspace_stack; |
uarg.uspace_uarg = ((uspace_arg_t *) arg)->uspace_uarg; |
uarg.uspace_thread_function = NULL; |
uarg.uspace_thread_arg = NULL; |
free((uspace_arg_t *) arg); |
userspace(&uarg); |
} |
//kernel/trunk/generic/src/main/version.c |
---|
0,0 → 1,54 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <main/version.h> |
#include <print.h> |
char *project = "SPARTAN kernel"; |
char *copyright = "Copyright (C) 2001-2006 HelenOS project"; |
char *release = RELEASE; |
char *name = NAME; |
char *arch = ARCH; |
#ifdef REVISION |
char *revision = ", revision " REVISION; |
#else |
char *revision = ""; |
#endif |
#ifdef TIMESTAMP |
char *timestamp = " on " TIMESTAMP; |
#else |
char *timestamp = ""; |
#endif |
/** Print version information. */ |
void version_print(void) |
{ |
printf("%s, release %s (%s)%s\nBuilt%s for %s\n%s\n", project, release, name, revision, timestamp, arch, copyright); |
} |
//kernel/trunk/generic/src/ddi/ddi.c |
---|
0,0 → 1,212 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file ddi.c |
* @brief Device Driver Interface functions. |
* |
* This file contains functions that comprise the Device Driver Interface. |
* These are the functions for mapping physical memory and enabling I/O |
* space to tasks. |
*/ |
#include <ddi/ddi.h> |
#include <ddi/ddi_arg.h> |
#include <proc/task.h> |
#include <security/cap.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/as.h> |
#include <synch/spinlock.h> |
#include <arch.h> |
#include <align.h> |
#include <errno.h> |
/** Map piece of physical memory into virtual address space of specified task. |
* |
* @param id Task ID of the destination task. |
* @param pf Physical frame address of the starting frame. |
* @param vp Virtual page address of the starting page. |
* @param pages Number of pages to map. |
* @param writable If true, the mapping will be created writable. |
* |
* @return 0 on success, EPERM if the caller lacks capabilities to use this syscall, |
* ENOENT if there is no task matching the specified ID and ENOMEM if |
* there was a problem in creating address space area. |
*/ |
static int ddi_physmem_map(task_id_t id, __address pf, __address vp, count_t pages, bool writable) |
{ |
ipl_t ipl; |
cap_t caps; |
task_t *t; |
int flags; |
count_t i; |
/* |
* Make sure the caller is authorised to make this syscall. |
*/ |
caps = cap_get(TASK); |
if (!(caps & CAP_MEM_MANAGER)) |
return EPERM; |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
t = task_find_by_id(id); |
if (!t) { |
/* |
* There is no task with the specified ID. |
*/ |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
/* |
* TODO: We are currently lacking support for task destroying. |
* Once it is added to the kernel, we must take care to |
* synchronize in a way that prevents race conditions here. |
*/ |
/* Lock the task and release the lock protecting tasks_btree. */ |
spinlock_lock(&t->lock); |
spinlock_unlock(&tasks_lock); |
flags = AS_AREA_DEVICE | AS_AREA_READ; |
if (writable) |
flags |= AS_AREA_WRITE; |
if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE)) { |
/* |
* The address space area could not have been created. |
* We report it using ENOMEM. |
*/ |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return ENOMEM; |
} |
/* Initialize page tables. */ |
for (i = 0; i < pages; i++) |
as_set_mapping(t->as, vp + i * PAGE_SIZE, pf + i * FRAME_SIZE); |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Enable range of I/O space for task. |
* |
* @param id Task ID of the destination task. |
* @param ioaddr Starting I/O address. |
* @param size Size of the enabled I/O space.. |
* |
* @return 0 on success, EPERM if the caller lacks capabilities to use this syscall, |
* ENOENT if there is no task matching the specified ID. |
*/ |
static int ddi_iospace_enable(task_id_t id, __address ioaddr, size_t size) |
{ |
ipl_t ipl; |
cap_t caps; |
task_t *t; |
int rc; |
/* |
* Make sure the caller is authorised to make this syscall. |
*/ |
caps = cap_get(TASK); |
if (!(caps & CAP_IO_MANAGER)) |
return EPERM; |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
t = task_find_by_id(id); |
if (!t) { |
/* |
* There is no task with the specified ID. |
*/ |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
/* |
* TODO: We are currently lacking support for task destroying. |
* Once it is added to the kernel, we must take care to |
* synchronize in a way that prevents race conditions here. |
*/ |
/* Lock the task and release the lock protecting tasks_btree. */ |
spinlock_lock(&t->lock); |
spinlock_unlock(&tasks_lock); |
rc = ddi_iospace_enable_arch(t, ioaddr, size); |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return rc; |
} |
/** Wrapper for SYS_MAP_PHYSMEM syscall. |
* |
* @param User space address of memory DDI argument structure. |
* |
* @return 0 on success, otherwise it returns error code found in errno.h |
*/ |
__native sys_physmem_map(ddi_memarg_t *uspace_mem_arg) |
{ |
ddi_memarg_t arg; |
copy_from_uspace(&arg, uspace_mem_arg, sizeof(ddi_memarg_t)); |
return (__native) ddi_physmem_map((task_id_t) arg.task_id, ALIGN_DOWN((__address) arg.phys_base, FRAME_SIZE), |
ALIGN_DOWN((__address) arg.virt_base, PAGE_SIZE), (count_t) arg.pages, |
(bool) arg.writable); |
} |
/** Wrapper for SYS_ENABLE_IOSPACE syscall. |
* |
* @param User space address of DDI argument structure. |
* |
* @return 0 on success, otherwise it returns error code found in errno.h |
*/ |
__native sys_iospace_enable(ddi_ioarg_t *uspace_io_arg) |
{ |
ddi_ioarg_t arg; |
copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); |
return (__native) ddi_iospace_enable((task_id_t) arg.task_id, (__address) arg.ioaddr, (size_t) arg.size); |
} |
__native ddi_int_control(__native enable, __native *flags) |
{ |
if (! cap_get(TASK) & CAP_INT_CONTROL) |
return EPERM; |
return ddi_int_control_arch(enable, flags); |
} |
//kernel/trunk/generic/src/proc/scheduler.c |
---|
0,0 → 1,667 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file scheduler.c |
* @brief Scheduler and load balancing. |
* |
* This file contains the scheduler and kcpulb kernel thread wich |
* performs load-balancing of per-CPU run queues. |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/as.h> |
#include <arch/asm.h> |
#include <arch/faddr.h> |
#include <atomic.h> |
#include <synch/spinlock.h> |
#include <config.h> |
#include <context.h> |
#include <func.h> |
#include <arch.h> |
#include <adt/list.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <cpu.h> |
#include <print.h> |
#include <debug.h> |
static void before_task_runs(void); |
static void before_thread_runs(void); |
static void after_thread_ran(void); |
static void scheduler_separated_stack(void); |
atomic_t nrdy; /**< Number of ready threads in the system. */ |
/** Carry out actions before new task runs. */ |
void before_task_runs(void) |
{ |
before_task_runs_arch(); |
} |
/** Take actions before new thread runs. |
* |
* Perform actions that need to be |
* taken before the newly selected |
* tread is passed control. |
* |
* THREAD->lock is locked on entry |
* |
*/ |
void before_thread_runs(void) |
{ |
before_thread_runs_arch(); |
#ifdef CONFIG_FPU_LAZY |
if(THREAD==CPU->fpu_owner) |
fpu_enable(); |
else |
fpu_disable(); |
#else |
fpu_enable(); |
if (THREAD->fpu_context_exists) |
fpu_context_restore(THREAD->saved_fpu_context); |
else { |
fpu_init(); |
THREAD->fpu_context_exists=1; |
} |
#endif |
} |
/** Take actions after THREAD had run. |
* |
* Perform actions that need to be |
* taken after the running thread |
* had been preempted by the scheduler. |
* |
* THREAD->lock is locked on entry |
* |
*/ |
void after_thread_ran(void) |
{ |
after_thread_ran_arch(); |
} |
#ifdef CONFIG_FPU_LAZY |
void scheduler_fpu_lazy_request(void) |
{ |
restart: |
fpu_enable(); |
spinlock_lock(&CPU->lock); |
/* Save old context */ |
if (CPU->fpu_owner != NULL) { |
spinlock_lock(&CPU->fpu_owner->lock); |
fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
/* don't prevent migration */ |
CPU->fpu_owner->fpu_context_engaged=0; |
spinlock_unlock(&CPU->fpu_owner->lock); |
CPU->fpu_owner = NULL; |
} |
spinlock_lock(&THREAD->lock); |
if (THREAD->fpu_context_exists) { |
fpu_context_restore(THREAD->saved_fpu_context); |
} else { |
/* Allocate FPU context */ |
if (!THREAD->saved_fpu_context) { |
/* Might sleep */ |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&CPU->lock); |
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, |
0); |
/* We may have switched CPUs during slab_alloc */ |
goto restart; |
} |
fpu_init(); |
THREAD->fpu_context_exists=1; |
} |
CPU->fpu_owner=THREAD; |
THREAD->fpu_context_engaged = 1; |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&CPU->lock); |
} |
#endif |
/** Initialize scheduler |
* |
* Initialize kernel scheduler. |
* |
*/ |
void scheduler_init(void) |
{ |
} |
/** Get thread to be scheduled |
* |
* Get the optimal thread to be scheduled |
* according to thread accounting and scheduler |
* policy. |
* |
* @return Thread to be scheduled. |
* |
*/ |
static thread_t *find_best_thread(void) |
{ |
thread_t *t; |
runq_t *r; |
int i; |
ASSERT(CPU != NULL); |
loop: |
interrupts_enable(); |
if (atomic_get(&CPU->nrdy) == 0) { |
/* |
* For there was nothing to run, the CPU goes to sleep |
* until a hardware interrupt or an IPI comes. |
* This improves energy saving and hyperthreading. |
*/ |
/* |
* An interrupt might occur right now and wake up a thread. |
* In such case, the CPU will continue to go to sleep |
* even though there is a runnable thread. |
*/ |
cpu_sleep(); |
goto loop; |
} |
interrupts_disable(); |
for (i = 0; i<RQ_COUNT; i++) { |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
/* |
* If this queue is empty, try a lower-priority queue. |
*/ |
spinlock_unlock(&r->lock); |
continue; |
} |
atomic_dec(&CPU->nrdy); |
atomic_dec(&nrdy); |
r->n--; |
/* |
* Take the first thread from the queue. |
*/ |
t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
list_remove(&t->rq_link); |
spinlock_unlock(&r->lock); |
spinlock_lock(&t->lock); |
t->cpu = CPU; |
t->ticks = us2ticks((i+1)*10000); |
t->priority = i; /* correct rq index */ |
/* |
* Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
*/ |
t->flags &= ~X_STOLEN; |
spinlock_unlock(&t->lock); |
return t; |
} |
goto loop; |
} |
/** Prevent rq starvation |
* |
* Prevent low priority threads from starving in rq's. |
* |
* When the function decides to relink rq's, it reconnects |
* respective pointers so that in result threads with 'pri' |
* greater or equal @start are moved to a higher-priority queue. |
* |
* @param start Threshold priority. |
* |
*/ |
static void relink_rq(int start) |
{ |
link_t head; |
runq_t *r; |
int i, n; |
list_initialize(&head); |
spinlock_lock(&CPU->lock); |
if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
for (i = start; i<RQ_COUNT-1; i++) { |
/* remember and empty rq[i + 1] */ |
r = &CPU->rq[i + 1]; |
spinlock_lock(&r->lock); |
list_concat(&head, &r->rq_head); |
n = r->n; |
r->n = 0; |
spinlock_unlock(&r->lock); |
/* append rq[i + 1] to rq[i] */ |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
list_concat(&r->rq_head, &head); |
r->n += n; |
spinlock_unlock(&r->lock); |
} |
CPU->needs_relink = 0; |
} |
spinlock_unlock(&CPU->lock); |
} |
/** The scheduler |
* |
* The thread scheduling procedure. |
* Passes control directly to |
* scheduler_separated_stack(). |
* |
*/ |
void scheduler(void) |
{ |
volatile ipl_t ipl; |
ASSERT(CPU != NULL); |
ipl = interrupts_disable(); |
if (atomic_get(&haltstate)) |
halt(); |
if (THREAD) { |
spinlock_lock(&THREAD->lock); |
#ifndef CONFIG_FPU_LAZY |
fpu_context_save(THREAD->saved_fpu_context); |
#endif |
if (!context_save(&THREAD->saved_context)) { |
/* |
* This is the place where threads leave scheduler(); |
*/ |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(THREAD->saved_context.ipl); |
return; |
} |
/* |
* Interrupt priority level of preempted thread is recorded here |
* to facilitate scheduler() invocations from interrupts_disable()'d |
* code (e.g. waitq_sleep_timeout()). |
*/ |
THREAD->saved_context.ipl = ipl; |
} |
/* |
* Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
* and preemption counter. At this point THE could be coming either |
* from THREAD's or CPU's stack. |
*/ |
the_copy(THE, (the_t *) CPU->stack); |
/* |
* We may not keep the old stack. |
* Reason: If we kept the old stack and got blocked, for instance, in |
* find_best_thread(), the old thread could get rescheduled by another |
* CPU and overwrite the part of its own stack that was also used by |
* the scheduler on this CPU. |
* |
* Moreover, we have to bypass the compiler-generated POP sequence |
* which is fooled by SP being set to the very top of the stack. |
* Therefore the scheduler() function continues in |
* scheduler_separated_stack(). |
*/ |
context_save(&CPU->saved_context); |
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
/** Scheduler stack switch wrapper |
* |
* Second part of the scheduler() function |
* using new stack. Handling the actual context |
* switch to a new thread. |
* |
* Assume THREAD->lock is held. |
*/ |
void scheduler_separated_stack(void) |
{ |
int priority; |
ASSERT(CPU != NULL); |
if (THREAD) { |
/* must be run after the switch to scheduler stack */ |
after_thread_ran(); |
switch (THREAD->state) { |
case Running: |
spinlock_unlock(&THREAD->lock); |
thread_ready(THREAD); |
break; |
case Exiting: |
thread_destroy(THREAD); |
break; |
case Sleeping: |
/* |
* Prefer the thread after it's woken up. |
*/ |
THREAD->priority = -1; |
/* |
* We need to release wq->lock which we locked in waitq_sleep(). |
* Address of wq->lock is kept in THREAD->sleep_queue. |
*/ |
spinlock_unlock(&THREAD->sleep_queue->lock); |
/* |
* Check for possible requests for out-of-context invocation. |
*/ |
if (THREAD->call_me) { |
THREAD->call_me(THREAD->call_me_with); |
THREAD->call_me = NULL; |
THREAD->call_me_with = NULL; |
} |
spinlock_unlock(&THREAD->lock); |
break; |
default: |
/* |
* Entering state is unexpected. |
*/ |
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
break; |
} |
THREAD = NULL; |
} |
THREAD = find_best_thread(); |
spinlock_lock(&THREAD->lock); |
priority = THREAD->priority; |
spinlock_unlock(&THREAD->lock); |
relink_rq(priority); |
spinlock_lock(&THREAD->lock); |
/* |
* If both the old and the new task are the same, lots of work is avoided. |
*/ |
if (TASK != THREAD->task) { |
as_t *as1 = NULL; |
as_t *as2; |
if (TASK) { |
spinlock_lock(&TASK->lock); |
as1 = TASK->as; |
spinlock_unlock(&TASK->lock); |
} |
spinlock_lock(&THREAD->task->lock); |
as2 = THREAD->task->as; |
spinlock_unlock(&THREAD->task->lock); |
/* |
* Note that it is possible for two tasks to share one address space. |
*/ |
if (as1 != as2) { |
/* |
* Both tasks and address spaces are different. |
* Replace the old one with the new one. |
*/ |
as_switch(as1, as2); |
} |
TASK = THREAD->task; |
before_task_runs(); |
} |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
#endif |
/* |
* Some architectures provide late kernel PA2KA(identity) |
* mapping in a page fault handler. However, the page fault |
* handler uses the kernel stack of the running thread and |
* therefore cannot be used to map it. The kernel stack, if |
* necessary, is to be mapped in before_thread_runs(). This |
* function must be executed before the switch to the new stack. |
*/ |
before_thread_runs(); |
/* |
* Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
*/ |
the_copy(THE, (the_t *) THREAD->kstack); |
context_restore(&THREAD->saved_context); |
/* not reached */ |
} |
#ifdef CONFIG_SMP |
/** Load balancing thread |
* |
* SMP load balancing thread, supervising thread supplies |
* for the CPU it's wired to. |
* |
* @param arg Generic thread argument (unused). |
* |
*/ |
void kcpulb(void *arg) |
{ |
thread_t *t; |
int count, average, i, j, k = 0; |
ipl_t ipl; |
loop: |
/* |
* Work in 1s intervals. |
*/ |
thread_sleep(1); |
not_satisfied: |
/* |
* Calculate the number of threads that will be migrated/stolen from |
* other CPU's. Note that situation can have changed between two |
* passes. Each time get the most up to date counts. |
*/ |
average = atomic_get(&nrdy) / config.cpu_active + 1; |
count = average - atomic_get(&CPU->nrdy); |
if (count <= 0) |
goto satisfied; |
/* |
* Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
*/ |
for (j=RQ_COUNT-1; j >= 0; j--) { |
for (i=0; i < config.cpu_active; i++) { |
link_t *l; |
runq_t *r; |
cpu_t *cpu; |
cpu = &cpus[(i + k) % config.cpu_active]; |
/* |
* Not interested in ourselves. |
* Doesn't require interrupt disabling for kcpulb is X_WIRED. |
*/ |
if (CPU == cpu) |
continue; |
if (atomic_get(&cpu->nrdy) <= average) |
continue; |
ipl = interrupts_disable(); |
r = &cpu->rq[j]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
spinlock_unlock(&r->lock); |
interrupts_restore(ipl); |
continue; |
} |
t = NULL; |
l = r->rq_head.prev; /* search rq from the back */ |
while (l != &r->rq_head) { |
t = list_get_instance(l, thread_t, rq_link); |
/* |
* We don't want to steal CPU-wired threads neither threads already stolen. |
* The latter prevents threads from migrating between CPU's without ever being run. |
* We don't want to steal threads whose FPU context is still in CPU. |
*/ |
spinlock_lock(&t->lock); |
if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
/* |
* Remove t from r. |
*/ |
spinlock_unlock(&t->lock); |
atomic_dec(&cpu->nrdy); |
atomic_dec(&nrdy); |
r->n--; |
list_remove(&t->rq_link); |
break; |
} |
spinlock_unlock(&t->lock); |
l = l->prev; |
t = NULL; |
} |
spinlock_unlock(&r->lock); |
if (t) { |
/* |
* Ready t on local CPU |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
#endif |
t->flags |= X_STOLEN; |
t->state = Entering; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
interrupts_restore(ipl); |
if (--count == 0) |
goto satisfied; |
/* |
* We are not satisfied yet, focus on another CPU next time. |
*/ |
k++; |
continue; |
} |
interrupts_restore(ipl); |
} |
} |
if (atomic_get(&CPU->nrdy)) { |
/* |
* Be a little bit light-weight and let migrated threads run. |
*/ |
scheduler(); |
} else { |
/* |
* We failed to migrate a single thread. |
* Give up this turn. |
*/ |
goto loop; |
} |
goto not_satisfied; |
satisfied: |
goto loop; |
} |
#endif /* CONFIG_SMP */ |
/** Print information about threads & scheduler queues */ |
void sched_print_list(void) |
{ |
ipl_t ipl; |
int cpu,i; |
runq_t *r; |
thread_t *t; |
link_t *cur; |
/* We are going to mess with scheduler structures, |
* let's not be interrupted */ |
ipl = interrupts_disable(); |
for (cpu=0;cpu < config.cpu_count; cpu++) { |
if (!cpus[cpu].active) |
continue; |
spinlock_lock(&cpus[cpu].lock); |
printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
for (i=0; i<RQ_COUNT; i++) { |
r = &cpus[cpu].rq[i]; |
spinlock_lock(&r->lock); |
if (!r->n) { |
spinlock_unlock(&r->lock); |
continue; |
} |
printf("\trq[%d]: ", i); |
for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
t = list_get_instance(cur, thread_t, rq_link); |
printf("%d(%s) ", t->tid, |
thread_states[t->state]); |
} |
printf("\n"); |
spinlock_unlock(&r->lock); |
} |
spinlock_unlock(&cpus[cpu].lock); |
} |
interrupts_restore(ipl); |
} |
//kernel/trunk/generic/src/proc/the.c |
---|
0,0 → 1,69 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file the.c |
* @brief THE structure functions. |
* |
* This file contains functions to manage the THE structure. |
* The THE structure exists at the base address of every kernel |
* stack and carries information about current settings |
* (e.g. current CPU, current thread, task and address space |
* and current preemption counter). |
*/ |
#include <arch.h> |
#include <typedefs.h> |
/** Initialize THE structure |
* |
* Initialize THE structure passed as argument. |
* |
* @param the THE structure to be initialized. |
*/ |
void the_initialize(the_t *the) |
{ |
the->preemption_disabled = 0; |
the->cpu = NULL; |
the->thread = NULL; |
the->task = NULL; |
the->as = NULL; |
} |
/** Copy THE structure |
* |
* Copy the source THE structure to the destination THE structure. |
* |
* @param src The source THE structure. |
* @param dst The destination THE structure. |
*/ |
void the_copy(the_t *src, the_t *dst) |
{ |
*dst = *src; |
} |
//kernel/trunk/generic/src/proc/task.c |
---|
0,0 → 1,237 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file task.c |
* @brief Task management. |
*/ |
#include <main/uinit.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/uarg.h> |
#include <mm/as.h> |
#include <mm/as_arg.h> |
#include <mm/slab.h> |
#include <synch/spinlock.h> |
#include <arch.h> |
#include <panic.h> |
#include <adt/btree.h> |
#include <adt/list.h> |
#include <ipc/ipc.h> |
#include <security/cap.h> |
#include <memstr.h> |
#include <print.h> |
#include <elf.h> |
#ifndef LOADED_PROG_STACK_PAGES_NO |
#define LOADED_PROG_STACK_PAGES_NO 1 |
#endif |
SPINLOCK_INITIALIZE(tasks_lock); |
btree_t tasks_btree; |
static task_id_t task_counter = 0; |
/** Initialize tasks |
* |
* Initialize kernel tasks support. |
* |
*/ |
void task_init(void) |
{ |
TASK = NULL; |
btree_create(&tasks_btree); |
} |
/** Create new task |
* |
* Create new task with no threads. |
* |
* @param as Task's address space. |
* @param name Symbolic name. |
* |
* @return New task's structure |
* |
*/ |
task_t *task_create(as_t *as, char *name) |
{ |
ipl_t ipl; |
task_t *ta; |
int i; |
ta = (task_t *) malloc(sizeof(task_t), 0); |
task_create_arch(ta); |
spinlock_initialize(&ta->lock, "task_ta_lock"); |
list_initialize(&ta->th_head); |
ta->as = as; |
ta->name = name; |
ta->capabilities = 0; |
ipc_answerbox_init(&ta->answerbox); |
for (i=0; i < IPC_MAX_PHONES;i++) |
ipc_phone_init(&ta->phones[i]); |
if (ipc_phone_0) |
ipc_phone_connect(&ta->phones[0], ipc_phone_0); |
atomic_set(&ta->active_calls, 0); |
memsetb((__address) &ta->accept_arg, sizeof(as_area_acptsnd_arg_t), 0); |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
ta->taskid = ++task_counter; |
btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL); |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ta; |
} |
/** Create new task with 1 thread and run it |
* |
* @param program_addr Address of program executable image. |
* @param name Program name. |
* |
* @return Task of the running program or NULL on error. |
*/ |
task_t * task_run_program(void *program_addr, char *name) |
{ |
as_t *as; |
as_area_t *a; |
int rc; |
thread_t *t; |
task_t *task; |
uspace_arg_t *kernel_uarg; |
as = as_create(0); |
ASSERT(as); |
rc = elf_load((elf_header_t *) program_addr, as); |
if (rc != EE_OK) { |
as_free(as); |
return NULL; |
} |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
kernel_uarg->uspace_entry = (void *) ((elf_header_t *) program_addr)->e_entry; |
kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; |
kernel_uarg->uspace_thread_function = NULL; |
kernel_uarg->uspace_thread_arg = NULL; |
kernel_uarg->uspace_uarg = NULL; |
task = task_create(as, name); |
ASSERT(task); |
/* |
* Create the data as_area. |
*/ |
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE, LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE, |
USTACK_ADDRESS, AS_AREA_ATTR_NONE); |
t = thread_create(uinit, kernel_uarg, task, 0, "uinit"); |
ASSERT(t); |
thread_ready(t); |
return task; |
} |
/** Syscall for reading task ID from userspace. |
* |
* @param uspace_task_id Userspace address of 8-byte buffer where to store current task ID. |
* |
* @return Always returns 0. |
*/ |
__native sys_task_get_id(task_id_t *uspace_task_id) |
{ |
/* |
* No need to acquire lock on TASK because taskid |
* remains constant for the lifespan of the task. |
*/ |
copy_to_uspace(uspace_task_id, &TASK->taskid, sizeof(TASK->taskid)); |
return 0; |
} |
/** Find task structure corresponding to task ID. |
* |
* The tasks_lock must be already held by the caller of this function |
* and interrupts must be disabled. |
* |
* @param id Task ID. |
* |
* @return Task structure address or NULL if there is no such task ID. |
*/ |
task_t *task_find_by_id(task_id_t id) |
{ |
btree_node_t *leaf; |
return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf); |
} |
/** Print task list */ |
void task_print_list(void) |
{ |
link_t *cur; |
ipl_t ipl; |
/* Messing with thread structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
task_t *t; |
int j; |
t = (task_t *) node->value[i]; |
spinlock_lock(&t->lock); |
printf("%s: address=%#zX, taskid=%#llX, as=%#zX, ActiveCalls: %zd", |
t->name, t, t->taskid, t->as, atomic_get(&t->active_calls)); |
for (j=0; j < IPC_MAX_PHONES; j++) { |
if (t->phones[j].callee) |
printf(" Ph(%zd): %#zX ", j, t->phones[j].callee); |
} |
printf("\n"); |
spinlock_unlock(&t->lock); |
} |
} |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
} |
//kernel/trunk/generic/src/proc/thread.c |
---|
0,0 → 1,490 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file thread.c |
* @brief Thread management functions. |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/uarg.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <synch/synch.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <synch/rwlock.h> |
#include <cpu.h> |
#include <func.h> |
#include <context.h> |
#include <adt/btree.h> |
#include <adt/list.h> |
#include <typedefs.h> |
#include <time/clock.h> |
#include <config.h> |
#include <arch/interrupt.h> |
#include <smp/ipi.h> |
#include <arch/faddr.h> |
#include <atomic.h> |
#include <memstr.h> |
#include <print.h> |
#include <mm/slab.h> |
#include <debug.h> |
#include <main/uinit.h> |
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
/** Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
SPINLOCK_INITIALIZE(threads_lock); |
btree_t threads_btree; /**< B+tree of all threads. */ |
SPINLOCK_INITIALIZE(tidlock); |
__u32 last_tid = 0; |
static slab_cache_t *thread_slab; |
#ifdef ARCH_HAS_FPU |
slab_cache_t *fpu_context_slab; |
#endif |
/** Thread wrapper |
* |
* This wrapper is provided to ensure that every thread |
* makes a call to thread_exit() when its implementing |
* function returns. |
* |
* interrupts_disable() is assumed. |
* |
*/ |
static void cushion(void) |
{ |
void (*f)(void *) = THREAD->thread_code; |
void *arg = THREAD->thread_arg; |
/* this is where each thread wakes up after its creation */ |
spinlock_unlock(&THREAD->lock); |
interrupts_enable(); |
f(arg); |
thread_exit(); |
/* not reached */ |
} |
/** Initialization and allocation for thread_t structure */ |
static int thr_constructor(void *obj, int kmflags) |
{ |
thread_t *t = (thread_t *)obj; |
pfn_t pfn; |
int status; |
spinlock_initialize(&t->lock, "thread_t_lock"); |
link_initialize(&t->rq_link); |
link_initialize(&t->wq_link); |
link_initialize(&t->th_link); |
#ifdef ARCH_HAS_FPU |
# ifdef CONFIG_FPU_LAZY |
t->saved_fpu_context = NULL; |
# else |
t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
if (!t->saved_fpu_context) |
return -1; |
# endif |
#endif |
pfn = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status); |
if (status) { |
#ifdef ARCH_HAS_FPU |
if (t->saved_fpu_context) |
slab_free(fpu_context_slab,t->saved_fpu_context); |
#endif |
return -1; |
} |
t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn)); |
return 0; |
} |
/** Destruction of thread_t object */ |
static int thr_destructor(void *obj) |
{ |
thread_t *t = (thread_t *)obj; |
frame_free(ADDR2PFN(KA2PA(t->kstack))); |
#ifdef ARCH_HAS_FPU |
if (t->saved_fpu_context) |
slab_free(fpu_context_slab,t->saved_fpu_context); |
#endif |
return 1; /* One page freed */ |
} |
/** Initialize threads |
* |
* Initialize kernel threads support. |
* |
*/ |
void thread_init(void) |
{ |
THREAD = NULL; |
atomic_set(&nrdy,0); |
thread_slab = slab_cache_create("thread_slab", |
sizeof(thread_t),0, |
thr_constructor, thr_destructor, 0); |
#ifdef ARCH_HAS_FPU |
fpu_context_slab = slab_cache_create("fpu_slab", |
sizeof(fpu_context_t), |
FPU_CONTEXT_ALIGN, |
NULL, NULL, 0); |
#endif |
btree_create(&threads_btree); |
} |
/** Make thread ready |
* |
* Switch thread t to the ready state. |
* |
* @param t Thread to make ready. |
* |
*/ |
void thread_ready(thread_t *t) |
{ |
cpu_t *cpu; |
runq_t *r; |
ipl_t ipl; |
int i, avg; |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(! (t->state == Ready)); |
i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
cpu = CPU; |
if (t->flags & X_WIRED) { |
cpu = t->cpu; |
} |
t->state = Ready; |
spinlock_unlock(&t->lock); |
/* |
* Append t to respective ready queue on respective processor. |
*/ |
r = &cpu->rq[i]; |
spinlock_lock(&r->lock); |
list_append(&t->rq_link, &r->rq_head); |
r->n++; |
spinlock_unlock(&r->lock); |
atomic_inc(&nrdy); |
avg = atomic_get(&nrdy) / config.cpu_active; |
atomic_inc(&cpu->nrdy); |
interrupts_restore(ipl); |
} |
/** Destroy thread memory structure |
* |
* Detach thread from all queues, cpus etc. and destroy it. |
* |
* Assume thread->lock is held!! |
*/ |
void thread_destroy(thread_t *t) |
{ |
ASSERT(t->state == Exiting); |
ASSERT(t->task); |
ASSERT(t->cpu); |
spinlock_lock(&t->cpu->lock); |
if(t->cpu->fpu_owner==t) |
t->cpu->fpu_owner=NULL; |
spinlock_unlock(&t->cpu->lock); |
/* |
* Detach from the containing task. |
*/ |
spinlock_lock(&t->task->lock); |
list_remove(&t->th_link); |
spinlock_unlock(&t->task->lock); |
spinlock_unlock(&t->lock); |
spinlock_lock(&threads_lock); |
btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
spinlock_unlock(&threads_lock); |
slab_free(thread_slab, t); |
} |
/** Create new thread |
* |
* Create a new thread. |
* |
* @param func Thread's implementing function. |
* @param arg Thread's implementing function argument. |
* @param task Task to which the thread belongs. |
* @param flags Thread flags. |
* @param name Symbolic name. |
* |
* @return New thread's structure on success, NULL on failure. |
* |
*/ |
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
{ |
thread_t *t; |
ipl_t ipl; |
t = (thread_t *) slab_alloc(thread_slab, 0); |
if (!t) |
return NULL; |
thread_create_arch(t); |
/* Not needed, but good for debugging */ |
memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
ipl = interrupts_disable(); |
spinlock_lock(&tidlock); |
t->tid = ++last_tid; |
spinlock_unlock(&tidlock); |
interrupts_restore(ipl); |
context_save(&t->saved_context); |
context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
the_initialize((the_t *) t->kstack); |
ipl = interrupts_disable(); |
t->saved_context.ipl = interrupts_read(); |
interrupts_restore(ipl); |
memcpy(t->name, name, THREAD_NAME_BUFLEN); |
t->thread_code = func; |
t->thread_arg = arg; |
t->ticks = -1; |
t->priority = -1; /* start in rq[0] */ |
t->cpu = NULL; |
t->flags = 0; |
t->state = Entering; |
t->call_me = NULL; |
t->call_me_with = NULL; |
timeout_initialize(&t->sleep_timeout); |
t->sleep_queue = NULL; |
t->timeout_pending = 0; |
t->rwlock_holder_type = RWLOCK_NONE; |
t->task = task; |
t->fpu_context_exists = 0; |
t->fpu_context_engaged = 0; |
/* |
* Register this thread in the system-wide list. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
spinlock_unlock(&threads_lock); |
/* |
* Attach to the containing task. |
*/ |
spinlock_lock(&task->lock); |
list_append(&t->th_link, &task->th_head); |
spinlock_unlock(&task->lock); |
interrupts_restore(ipl); |
return t; |
} |
/** Make thread exiting |
* |
* End current thread execution and switch it to the exiting |
* state. All pending timeouts are executed. |
* |
*/ |
void thread_exit(void) |
{ |
ipl_t ipl; |
restart: |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
goto restart; |
} |
THREAD->state = Exiting; |
spinlock_unlock(&THREAD->lock); |
scheduler(); |
} |
/** Thread sleep |
* |
* Suspend execution of the current thread. |
* |
* @param sec Number of seconds to sleep. |
* |
*/ |
void thread_sleep(__u32 sec) |
{ |
thread_usleep(sec*1000000); |
} |
/** Thread usleep |
* |
* Suspend execution of the current thread. |
* |
* @param usec Number of microseconds to sleep. |
* |
*/ |
void thread_usleep(__u32 usec) |
{ |
waitq_t wq; |
waitq_initialize(&wq); |
(void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
} |
/** Register thread out-of-context invocation |
* |
* Register a function and its argument to be executed |
* on next context switch to the current thread. |
* |
* @param call_me Out-of-context function. |
* @param call_me_with Out-of-context function argument. |
* |
*/ |
void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->call_me = call_me; |
THREAD->call_me_with = call_me_with; |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
} |
/** Print list of threads debug info */ |
void thread_print_list(void) |
{ |
link_t *cur; |
ipl_t ipl; |
/* Messing with thread structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
thread_t *t; |
t = (thread_t *) node->value[i]; |
printf("%s: address=%#zX, tid=%zd, state=%s, task=%#zX, code=%#zX, stack=%#zX, cpu=", |
t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
if (t->cpu) |
printf("cpu%zd ", t->cpu->id); |
else |
printf("none"); |
printf("\n"); |
} |
} |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
} |
/** Check whether thread exists. |
* |
* Note that threads_lock must be already held and |
* interrupts must be already disabled. |
* |
* @param t Pointer to thread. |
* |
* @return True if thread t is known to the system, false otherwise. |
*/ |
bool thread_exists(thread_t *t) |
{ |
btree_node_t *leaf; |
return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL; |
} |
/** Process syscall to create new thread. |
* |
*/ |
__native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
{ |
thread_t *t; |
char namebuf[THREAD_NAME_BUFLEN]; |
uspace_arg_t *kernel_uarg; |
__u32 tid; |
copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
tid = t->tid; |
thread_ready(t); |
return (__native) tid; |
} else { |
free(kernel_uarg); |
} |
return (__native) -1; |
} |
/** Process syscall to terminate thread. |
* |
*/ |
__native sys_thread_exit(int uspace_status) |
{ |
thread_exit(); |
/* Unreachable */ |
return 0; |
} |
//kernel/trunk/generic/src/lib/elf.c |
---|
0,0 → 1,216 |
/* |
* Copyright (C) 2006 Sergey Bondari |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <elf.h> |
#include <debug.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <mm/as.h> |
#include <mm/frame.h> |
#include <mm/slab.h> |
#include <align.h> |
#include <memstr.h> |
#include <macros.h> |
static char *error_codes[] = { |
"no error", |
"invalid image", |
"address space error", |
"incompatible image", |
"unsupported image type", |
"irrecoverable error" |
}; |
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); |
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as); |
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); |
/** ELF loader |
* |
* @param header Pointer to ELF header in memory |
* @param as Created and properly mapped address space |
* @return EE_OK on success |
*/ |
int elf_load(elf_header_t *header, as_t * as) |
{ |
int i, rc; |
/* Identify ELF */ |
if (header->e_ident[EI_MAG0] != ELFMAG0 || header->e_ident[EI_MAG1] != ELFMAG1 || |
header->e_ident[EI_MAG2] != ELFMAG2 || header->e_ident[EI_MAG3] != ELFMAG3) { |
return EE_INVALID; |
} |
/* Identify ELF compatibility */ |
if (header->e_ident[EI_DATA] != ELF_DATA_ENCODING || header->e_machine != ELF_MACHINE || |
header->e_ident[EI_VERSION] != EV_CURRENT || header->e_version != EV_CURRENT || |
header->e_ident[EI_CLASS] != ELF_CLASS) { |
return EE_INCOMPATIBLE; |
} |
if (header->e_phentsize != sizeof(elf_segment_header_t)) |
return EE_INCOMPATIBLE; |
if (header->e_shentsize != sizeof(elf_section_header_t)) |
return EE_INCOMPATIBLE; |
/* Check if the object type is supported. */ |
if (header->e_type != ET_EXEC) |
return EE_UNSUPPORTED; |
/* Walk through all segment headers and process them. */ |
for (i = 0; i < header->e_phnum; i++) { |
rc = segment_header(&((elf_segment_header_t *)(((__u8 *) header) + header->e_phoff))[i], header, as); |
if (rc != EE_OK) |
return rc; |
} |
/* Inspect all section headers and proccess them. */ |
for (i = 0; i < header->e_shnum; i++) { |
rc = section_header(&((elf_section_header_t *)(((__u8 *) header) + header->e_shoff))[i], header, as); |
if (rc != EE_OK) |
return rc; |
} |
return EE_OK; |
} |
/** Print error message according to error code. |
* |
* @param rc Return code returned by elf_load(). |
* |
* @return NULL terminated description of error. |
*/ |
char *elf_error(int rc) |
{ |
ASSERT(rc < sizeof(error_codes)/sizeof(char *)); |
return error_codes[rc]; |
} |
/** Process segment header. |
* |
* @param entry Segment header. |
* @param elf ELF header. |
* @param as Address space into wich the ELF is being loaded. |
* |
* @return EE_OK on success, error code otherwise. |
*/ |
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as) |
{ |
switch (entry->p_type) { |
case PT_NULL: |
case PT_PHDR: |
break; |
case PT_LOAD: |
return load_segment(entry, elf, as); |
break; |
case PT_DYNAMIC: |
case PT_INTERP: |
case PT_SHLIB: |
case PT_NOTE: |
case PT_LOPROC: |
case PT_HIPROC: |
default: |
return EE_UNSUPPORTED; |
break; |
} |
return EE_OK; |
} |
/** Load segment described by program header entry. |
* |
* @param entry Program header entry describing segment to be loaded. |
* @param elf ELF header. |
* @param as Address space into wich the ELF is being loaded. |
* |
* @return EE_OK on success, error code otherwise. |
*/ |
int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as) |
{ |
as_area_t *a; |
int i, flags = 0; |
size_t segment_size; |
__u8 *segment; |
if (entry->p_align > 1) { |
if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) { |
return EE_INVALID; |
} |
} |
if (entry->p_flags & PF_X) |
flags |= AS_AREA_EXEC; |
if (entry->p_flags & PF_W) |
flags |= AS_AREA_WRITE; |
if (entry->p_flags & PF_R) |
flags |= AS_AREA_READ; |
/* |
* Check if the virtual address starts on page boundary. |
*/ |
if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr) |
return EE_UNSUPPORTED; |
segment_size = ALIGN_UP(max(entry->p_filesz, entry->p_memsz), PAGE_SIZE); |
if ((entry->p_flags & PF_W)) { |
/* If writable, copy data (should be COW in the future) */ |
segment = malloc(segment_size, 0); |
memsetb((__address) (segment + entry->p_filesz), segment_size - entry->p_filesz, 0); |
memcpy(segment, (void *) (((__address) elf) + entry->p_offset), entry->p_filesz); |
} else /* Map identically original data */ |
segment = ((void *) elf) + entry->p_offset; |
a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE); |
if (!a) |
return EE_MEMORY; |
for (i = 0; i < SIZE2FRAMES(entry->p_filesz); i++) { |
as_set_mapping(as, entry->p_vaddr + i*PAGE_SIZE, KA2PA(((__address) segment) + i*PAGE_SIZE)); |
} |
return EE_OK; |
} |
/** Process section header. |
* |
* @param entry Segment header. |
* @param elf ELF header. |
* @param as Address space into wich the ELF is being loaded. |
* |
* @return EE_OK on success, error code otherwise. |
*/ |
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as) |
{ |
switch (entry->sh_type) { |
default: |
break; |
} |
return EE_OK; |
} |
//kernel/trunk/generic/src/lib/func.c |
---|
0,0 → 1,184 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <func.h> |
#include <print.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <typedefs.h> |
#include <console/kconsole.h> |
atomic_t haltstate = {0}; /**< Halt flag */ |
/** Halt wrapper |
* |
* Set halt flag and halt the cpu. |
* |
*/ |
void halt() |
{ |
#ifdef CONFIG_DEBUG |
bool rundebugger = false; |
// TODO test_and_set not defined on all arches |
// if (!test_and_set(&haltstate)) |
if (!atomic_get(&haltstate)) { |
atomic_set(&haltstate, 1); |
rundebugger = true; |
} |
#else |
atomic_set(&haltstate, 1); |
#endif |
interrupts_disable(); |
#ifdef CONFIG_DEBUG |
if (rundebugger) { |
printf("\n"); |
kconsole("panic"); /* Run kconsole as a last resort to user */ |
} |
#endif |
if (CPU) |
printf("cpu%d: halted\n", CPU->id); |
else |
printf("cpu: halted\n"); |
cpu_halt(); |
} |
/** Return number of characters in a string. |
* |
* @param str NULL terminated string. |
* |
* @return Number of characters in str. |
*/ |
size_t strlen(const char *str) |
{ |
int i; |
for (i = 0; str[i]; i++) |
; |
return i; |
} |
/** Compare two NULL terminated strings |
* |
* Do a char-by-char comparison of two NULL terminated strings. |
* The strings are considered equal iff they consist of the same |
* characters on the minimum of their lengths and specified maximal |
* length. |
* |
* @param src First string to compare. |
* @param dst Second string to compare. |
* @param len Maximal length for comparison. |
* |
* @return 0 if the strings are equal, -1 if first is smaller, 1 if second smaller. |
* |
*/ |
int strncmp(const char *src, const char *dst, size_t len) |
{ |
int i; |
i = 0; |
for (;*src && *dst && i < len;src++,dst++,i++) { |
if (*src < *dst) |
return -1; |
if (*src > *dst) |
return 1; |
} |
if (i == len || *src == *dst) |
return 0; |
if (!*src) |
return -1; |
return 1; |
} |
/** Copy NULL terminated string. |
* |
* Copy at most 'len' characters from string 'src' to 'dest'. |
* If 'src' is shorter than 'len', '\0' is inserted behind the |
* last copied character. |
* |
* @param src Source string. |
* @param dst Destination buffer. |
* @param len Size of destination buffer. |
*/ |
void strncpy(char *dest, const char *src, size_t len) |
{ |
int i; |
for (i = 0; i < len; i++) { |
if (!(dest[i] = src[i])) |
return; |
} |
dest[i-1] = '\0'; |
} |
/** Convert ascii representation to __native |
* |
* Supports 0x for hexa & 0 for octal notation. |
* Does not check for overflows, does not support negative numbers |
* |
* @param text Textual representation of number |
* @return Converted number or 0 if no valid number ofund |
*/ |
__native atoi(const char *text) |
{ |
int base = 10; |
__native result = 0; |
if (text[0] == '0' && text[1] == 'x') { |
base = 16; |
text += 2; |
} else if (text[0] == '0') |
base = 8; |
while (*text) { |
if (base != 16 && \ |
((*text >= 'A' && *text <= 'F' ) |
|| (*text >='a' && *text <='f'))) |
break; |
if (base == 8 && *text >='8') |
break; |
if (*text >= '0' && *text <= '9') { |
result *= base; |
result += *text - '0'; |
} else if (*text >= 'A' && *text <= 'F') { |
result *= base; |
result += *text - 'A' + 10; |
} else if (*text >= 'a' && *text <= 'f') { |
result *= base; |
result += *text - 'a' + 10; |
} else |
break; |
text++; |
} |
return result; |
} |
//kernel/trunk/generic/src/lib/memstr.c |
---|
0,0 → 1,92 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <memstr.h> |
#include <arch/types.h> |
/** Copy block of memory |
* |
* Copy cnt bytes from src address to dst address. |
* The copying is done word-by-word and then byte-by-byte. |
* The source and destination memory areas cannot overlap. |
* |
* @param src Origin address to copy from. |
* @param dst Origin address to copy to. |
* @param cnt Number of bytes to copy. |
* |
*/ |
void *_memcpy(void * dst, const void *src, size_t cnt) |
{ |
int i, j; |
for (i = 0; i < cnt/sizeof(__native); i++) |
((__native *) dst)[i] = ((__native *) src)[i]; |
for (j = 0; j < cnt%sizeof(__native); j++) |
((__u8 *)(((__native *) dst) + i))[j] = ((__u8 *)(((__native *) src) + i))[j]; |
return (char *)src; |
} |
/** Fill block of memory |
* |
* Fill cnt bytes at dst address with the value x. |
* The filling is done byte-by-byte. |
* |
* @param dst Origin address to fill. |
* @param cnt Number of bytes to fill. |
* @param x Value to fill. |
* |
*/ |
void _memsetb(__address dst, size_t cnt, __u8 x) |
{ |
int i; |
__u8 *p = (__u8 *) dst; |
for(i=0; i<cnt; i++) |
p[i] = x; |
} |
/** Fill block of memory |
* |
* Fill cnt words at dst address with the value x. |
* The filling is done word-by-word. |
* |
* @param dst Origin address to fill. |
* @param cnt Number of words to fill. |
* @param x Value to fill. |
* |
*/ |
void _memsetw(__address dst, size_t cnt, __u16 x) |
{ |
int i; |
__u16 *p = (__u16 *) dst; |
for(i=0; i<cnt; i++) |
p[i] = x; |
} |
//kernel/trunk/generic/src/lib/sort.c |
---|
0,0 → 1,188 |
/* |
* Copyright (C) 2005 Sergey Bondari |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/slab.h> |
#include <memstr.h> |
#include <sort.h> |
#include <panic.h> |
#define EBUFSIZE 32 |
void _qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot); |
void _bubblesort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *slot); |
/** Quicksort wrapper |
* |
* This is only a wrapper that takes care of memory allocations for storing |
* the pivot and temporary elements for generic quicksort algorithm. |
* |
* This function _can_ sleep |
* |
* @param data Pointer to data to be sorted. |
* @param n Number of elements to be sorted. |
* @param e_size Size of one element. |
* @param cmp Comparator function. |
* |
*/ |
void qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b)) |
{ |
__u8 buf_tmp[EBUFSIZE]; |
__u8 buf_pivot[EBUFSIZE]; |
void * tmp = buf_tmp; |
void * pivot = buf_pivot; |
if (e_size > EBUFSIZE) { |
pivot = (void *) malloc(e_size, 0); |
tmp = (void *) malloc(e_size, 0); |
} |
_qsort(data, n, e_size, cmp, tmp, pivot); |
if (e_size > EBUFSIZE) { |
free(tmp); |
free(pivot); |
} |
} |
/** Quicksort |
* |
* Apply generic quicksort algorithm on supplied data, using pre-allocated buffers. |
* |
* @param data Pointer to data to be sorted. |
* @param n Number of elements to be sorted. |
* @param e_size Size of one element. |
* @param cmp Comparator function. |
* @param tmp Pointer to scratch memory buffer e_size bytes long. |
* @param pivot Pointer to scratch memory buffer e_size bytes long. |
* |
*/ |
void _qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot) |
{ |
if (n > 4) { |
int i = 0, j = n - 1; |
memcpy(pivot, data, e_size); |
while (1) { |
while ((cmp(data + i * e_size, pivot) < 0) && i < n) i++; |
while ((cmp(data + j * e_size, pivot) >=0) && j > 0) j--; |
if (i<j) { |
memcpy(tmp, data + i * e_size, e_size); |
memcpy(data + i * e_size, data + j * e_size, e_size); |
memcpy(data + j * e_size, tmp, e_size); |
} else { |
break; |
} |
} |
_qsort(data, j + 1, e_size, cmp, tmp, pivot); |
_qsort(data + (j + 1) * e_size, n - j - 1, e_size, cmp, tmp, pivot); |
} else { |
_bubblesort(data, n, e_size, cmp, tmp); |
} |
} |
/** Bubblesort wrapper |
* |
* This is only a wrapper that takes care of memory allocation for storing |
* the slot element for generic bubblesort algorithm. |
* |
* @param data Pointer to data to be sorted. |
* @param n Number of elements to be sorted. |
* @param e_size Size of one element. |
* @param cmp Comparator function. |
* |
*/ |
void bubblesort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b)) |
{ |
__u8 buf_slot[EBUFSIZE]; |
void * slot = buf_slot; |
if (e_size > EBUFSIZE) { |
slot = (void *) malloc(e_size, 0); |
} |
_bubblesort(data, n, e_size, cmp, slot); |
if (e_size > EBUFSIZE) { |
free(slot); |
} |
} |
/** Bubblesort |
* |
* Apply generic bubblesort algorithm on supplied data, using pre-allocated buffer. |
* |
* @param data Pointer to data to be sorted. |
* @param n Number of elements to be sorted. |
* @param e_size Size of one element. |
* @param cmp Comparator function. |
* @param slot Pointer to scratch memory buffer e_size bytes long. |
* |
*/ |
void _bubblesort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *slot) |
{ |
bool done = false; |
void * p; |
while (!done) { |
done = true; |
for (p = data; p < data + e_size * (n - 1); p = p + e_size) { |
if (cmp(p, p + e_size) == 1) { |
memcpy(slot, p, e_size); |
memcpy(p, p + e_size, e_size); |
memcpy(p + e_size, slot, e_size); |
done = false; |
} |
} |
} |
} |
/* |
* Comparator returns 1 if a > b, 0 if a == b, -1 if a < b |
*/ |
int int_cmp(void * a, void * b) |
{ |
return (* (int *) a > * (int*)b) ? 1 : (*(int *)a < * (int *)b) ? -1 : 0; |
} |
int __u8_cmp(void * a, void * b) |
{ |
return (* (__u8 *) a > * (__u8 *)b) ? 1 : (*(__u8 *)a < * (__u8 *)b) ? -1 : 0; |
} |
int __u16_cmp(void * a, void * b) |
{ |
return (* (__u16 *) a > * (__u16 *)b) ? 1 : (*(__u16 *)a < * (__u16 *)b) ? -1 : 0; |
} |
int __u32_cmp(void * a, void * b) |
{ |
return (* (__u32 *) a > * (__u32 *)b) ? 1 : (*(__u32 *)a < * (__u32 *)b) ? -1 : 0; |
} |
//kernel/trunk/generic/src/mm/slab.c |
---|
0,0 → 1,901 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file slab.c |
* @brief Slab allocator. |
* |
* The slab allocator is closely modelled after OpenSolaris slab allocator. |
* @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
* |
* with the following exceptions: |
* @li empty slabs are deallocated immediately |
* (in Linux they are kept in linked list, in Solaris ???) |
* @li empty magazines are deallocated when not needed |
* (in Solaris they are held in linked list in slab cache) |
* |
* Following features are not currently supported but would be easy to do: |
* @li cache coloring |
* @li dynamic magazine growing (different magazine sizes are already |
* supported, but we would need to adjust allocation strategy) |
* |
* The slab allocator supports per-CPU caches ('magazines') to facilitate |
* good SMP scaling. |
* |
* When a new object is being allocated, it is first checked, if it is |
* available in CPU-bound magazine. If it is not found there, it is |
* allocated from CPU-shared slab - if partial full is found, it is used, |
* otherwise a new one is allocated. |
* |
* When an object is being deallocated, it is put to CPU-bound magazine. |
* If there is no such magazine, new one is allocated (if it fails, |
* the object is deallocated into slab). If the magazine is full, it is |
* put into cpu-shared list of magazines and new one is allocated. |
* |
* The CPU-bound magazine is actually a pair of magazine to avoid |
* thrashing when somebody is allocating/deallocating 1 item at the magazine |
* size boundary. LIFO order is enforced, which should avoid fragmentation |
* as much as possible. |
* |
* Every cache contains list of full slabs and list of partialy full slabs. |
* Empty slabs are immediately freed (thrashing will be avoided because |
* of magazines). |
* |
* The slab information structure is kept inside the data area, if possible. |
* The cache can be marked that it should not use magazines. This is used |
* only for slab related caches to avoid deadlocks and infinite recursion |
* (the slab allocator uses itself for allocating all it's control structures). |
* |
* The slab allocator allocates lot of space and does not free it. When |
* frame allocator fails to allocate the frame, it calls slab_reclaim(). |
* It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
* releases slabs from cpu-shared magazine-list, until at least 1 slab |
* is deallocated in each cache (this algorithm should probably change). |
* The brutal reclaim removes all cached objects, even from CPU-bound |
* magazines. |
* |
* TODO:@n |
* For better CPU-scaling the magazine allocation strategy should |
* be extended. Currently, if the cache does not have magazine, it asks |
* for non-cpu cached magazine cache to provide one. It might be feasible |
* to add cpu-cached magazine cache (which would allocate it's magazines |
* from non-cpu-cached mag. cache). This would provide a nice per-cpu |
* buffer. The other possibility is to use the per-cache |
* 'empty-magazine-list', which decreases competing for 1 per-system |
* magazine cache. |
* |
* @li it might be good to add granularity of locks even to slab level, |
* we could then try_spinlock over all partial slabs and thus improve |
* scalability even on slab level |
*/ |
#include <synch/spinlock.h> |
#include <mm/slab.h> |
#include <adt/list.h> |
#include <memstr.h> |
#include <align.h> |
#include <mm/frame.h> |
#include <config.h> |
#include <print.h> |
#include <arch.h> |
#include <panic.h> |
#include <debug.h> |
#include <bitops.h> |
SPINLOCK_INITIALIZE(slab_cache_lock); |
static LIST_INITIALIZE(slab_cache_list); |
/** Magazine cache */ |
static slab_cache_t mag_cache; |
/** Cache for cache descriptors */ |
static slab_cache_t slab_cache_cache; |
/** Cache for external slab descriptors |
* This time we want per-cpu cache, so do not make it static |
* - using slab for internal slab structures will not deadlock, |
* as all slab structures are 'small' - control structures of |
* their caches do not require further allocation |
*/ |
static slab_cache_t *slab_extern_cache; |
/** Caches for malloc */ |
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
char *malloc_names[] = { |
"malloc-16","malloc-32","malloc-64","malloc-128", |
"malloc-256","malloc-512","malloc-1K","malloc-2K", |
"malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
"malloc-64K","malloc-128K" |
}; |
/** Slab descriptor */ |
typedef struct { |
slab_cache_t *cache; /**< Pointer to parent cache */ |
link_t link; /* List of full/partial slabs */ |
void *start; /**< Start address of first available item */ |
count_t available; /**< Count of available items in this slab */ |
index_t nextavail; /**< The index of next available item */ |
}slab_t; |
#ifdef CONFIG_DEBUG |
static int _slab_initialized = 0; |
#endif |
/**************************************/ |
/* Slab allocation functions */ |
/** |
* Allocate frames for slab space and initialize |
* |
*/ |
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
{ |
void *data; |
slab_t *slab; |
size_t fsize; |
int i; |
int status; |
pfn_t pfn; |
int zone=0; |
pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
data = (void *) PA2KA(PFN2ADDR(pfn)); |
if (status != FRAME_OK) { |
return NULL; |
} |
if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
slab = slab_alloc(slab_extern_cache, flags); |
if (!slab) { |
frame_free(ADDR2PFN(KA2PA(data))); |
return NULL; |
} |
} else { |
fsize = (PAGE_SIZE << cache->order); |
slab = data + fsize - sizeof(*slab); |
} |
/* Fill in slab structures */ |
for (i=0; i < (1 << cache->order); i++) |
frame_set_parent(pfn+i, slab, zone); |
slab->start = data; |
slab->available = cache->objects; |
slab->nextavail = 0; |
slab->cache = cache; |
for (i=0; i<cache->objects;i++) |
*((int *) (slab->start + i*cache->size)) = i+1; |
atomic_inc(&cache->allocated_slabs); |
return slab; |
} |
/** |
* Deallocate space associated with slab |
* |
* @return number of freed frames |
*/ |
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
{ |
frame_free(ADDR2PFN(KA2PA(slab->start))); |
if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
slab_free(slab_extern_cache, slab); |
atomic_dec(&cache->allocated_slabs); |
return 1 << cache->order; |
} |
/** Map object to slab structure */ |
static slab_t * obj2slab(void *obj) |
{ |
return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
} |
/**************************************/ |
/* Slab functions */ |
/** |
* Return object to slab and call a destructor |
* |
* @param slab If the caller knows directly slab of the object, otherwise NULL |
* |
* @return Number of freed pages |
*/ |
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
slab_t *slab) |
{ |
int freed = 0; |
if (!slab) |
slab = obj2slab(obj); |
ASSERT(slab->cache == cache); |
if (cache->destructor) |
freed = cache->destructor(obj); |
spinlock_lock(&cache->slablock); |
ASSERT(slab->available < cache->objects); |
*((int *)obj) = slab->nextavail; |
slab->nextavail = (obj - slab->start)/cache->size; |
slab->available++; |
/* Move it to correct list */ |
if (slab->available == cache->objects) { |
/* Free associated memory */ |
list_remove(&slab->link); |
spinlock_unlock(&cache->slablock); |
return freed + slab_space_free(cache, slab); |
} else if (slab->available == 1) { |
/* It was in full, move to partial */ |
list_remove(&slab->link); |
list_prepend(&slab->link, &cache->partial_slabs); |
} |
spinlock_unlock(&cache->slablock); |
return freed; |
} |
/** |
* Take new object from slab or create new if needed |
* |
* @return Object address or null |
*/ |
static void * slab_obj_create(slab_cache_t *cache, int flags) |
{ |
slab_t *slab; |
void *obj; |
spinlock_lock(&cache->slablock); |
if (list_empty(&cache->partial_slabs)) { |
/* Allow recursion and reclaiming |
* - this should work, as the slab control structures |
* are small and do not need to allocte with anything |
* other ten frame_alloc when they are allocating, |
* that's why we should get recursion at most 1-level deep |
*/ |
spinlock_unlock(&cache->slablock); |
slab = slab_space_alloc(cache, flags); |
if (!slab) |
return NULL; |
spinlock_lock(&cache->slablock); |
} else { |
slab = list_get_instance(cache->partial_slabs.next, |
slab_t, |
link); |
list_remove(&slab->link); |
} |
obj = slab->start + slab->nextavail * cache->size; |
slab->nextavail = *((int *)obj); |
slab->available--; |
if (! slab->available) |
list_prepend(&slab->link, &cache->full_slabs); |
else |
list_prepend(&slab->link, &cache->partial_slabs); |
spinlock_unlock(&cache->slablock); |
if (cache->constructor && cache->constructor(obj, flags)) { |
/* Bad, bad, construction failed */ |
slab_obj_destroy(cache, obj, slab); |
return NULL; |
} |
return obj; |
} |
/**************************************/ |
/* CPU-Cache slab functions */ |
/** |
* Finds a full magazine in cache, takes it from list |
* and returns it |
* |
* @param first If true, return first, else last mag |
*/ |
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
int first) |
{ |
slab_magazine_t *mag = NULL; |
link_t *cur; |
spinlock_lock(&cache->maglock); |
if (!list_empty(&cache->magazines)) { |
if (first) |
cur = cache->magazines.next; |
else |
cur = cache->magazines.prev; |
mag = list_get_instance(cur, slab_magazine_t, link); |
list_remove(&mag->link); |
atomic_dec(&cache->magazine_counter); |
} |
spinlock_unlock(&cache->maglock); |
return mag; |
} |
/** Prepend magazine to magazine list in cache */ |
static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
{ |
spinlock_lock(&cache->maglock); |
list_prepend(&mag->link, &cache->magazines); |
atomic_inc(&cache->magazine_counter); |
spinlock_unlock(&cache->maglock); |
} |
/** |
* Free all objects in magazine and free memory associated with magazine |
* |
* @return Number of freed pages |
*/ |
static count_t magazine_destroy(slab_cache_t *cache, |
slab_magazine_t *mag) |
{ |
int i; |
count_t frames = 0; |
for (i=0;i < mag->busy; i++) { |
frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
atomic_dec(&cache->cached_objs); |
} |
slab_free(&mag_cache, mag); |
return frames; |
} |
/** |
* Find full magazine, set it as current and return it |
* |
* Assume cpu_magazine lock is held |
*/ |
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
{ |
slab_magazine_t *cmag, *lastmag, *newmag; |
cmag = cache->mag_cache[CPU->id].current; |
lastmag = cache->mag_cache[CPU->id].last; |
if (cmag) { /* First try local CPU magazines */ |
if (cmag->busy) |
return cmag; |
if (lastmag && lastmag->busy) { |
cache->mag_cache[CPU->id].current = lastmag; |
cache->mag_cache[CPU->id].last = cmag; |
return lastmag; |
} |
} |
/* Local magazines are empty, import one from magazine list */ |
newmag = get_mag_from_cache(cache, 1); |
if (!newmag) |
return NULL; |
if (lastmag) |
magazine_destroy(cache, lastmag); |
cache->mag_cache[CPU->id].last = cmag; |
cache->mag_cache[CPU->id].current = newmag; |
return newmag; |
} |
/** |
* Try to find object in CPU-cache magazines |
* |
* @return Pointer to object or NULL if not available |
*/ |
static void * magazine_obj_get(slab_cache_t *cache) |
{ |
slab_magazine_t *mag; |
void *obj; |
if (!CPU) |
return NULL; |
spinlock_lock(&cache->mag_cache[CPU->id].lock); |
mag = get_full_current_mag(cache); |
if (!mag) { |
spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
return NULL; |
} |
obj = mag->objs[--mag->busy]; |
spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
atomic_dec(&cache->cached_objs); |
return obj; |
} |
/** |
* Assure that the current magazine is empty, return pointer to it, or NULL if |
* no empty magazine is available and cannot be allocated |
* |
* Assume mag_cache[CPU->id].lock is held |
* |
* We have 2 magazines bound to processor. |
* First try the current. |
* If full, try the last. |
* If full, put to magazines list. |
* allocate new, exchange last & current |
* |
*/ |
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
{ |
slab_magazine_t *cmag,*lastmag,*newmag; |
cmag = cache->mag_cache[CPU->id].current; |
lastmag = cache->mag_cache[CPU->id].last; |
if (cmag) { |
if (cmag->busy < cmag->size) |
return cmag; |
if (lastmag && lastmag->busy < lastmag->size) { |
cache->mag_cache[CPU->id].last = cmag; |
cache->mag_cache[CPU->id].current = lastmag; |
return lastmag; |
} |
} |
/* current | last are full | nonexistent, allocate new */ |
/* We do not want to sleep just because of caching */ |
/* Especially we do not want reclaiming to start, as |
* this would deadlock */ |
newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
if (!newmag) |
return NULL; |
newmag->size = SLAB_MAG_SIZE; |
newmag->busy = 0; |
/* Flush last to magazine list */ |
if (lastmag) |
put_mag_to_cache(cache, lastmag); |
/* Move current as last, save new as current */ |
cache->mag_cache[CPU->id].last = cmag; |
cache->mag_cache[CPU->id].current = newmag; |
return newmag; |
} |
/** |
* Put object into CPU-cache magazine |
* |
* @return 0 - success, -1 - could not get memory |
*/ |
static int magazine_obj_put(slab_cache_t *cache, void *obj) |
{ |
slab_magazine_t *mag; |
if (!CPU) |
return -1; |
spinlock_lock(&cache->mag_cache[CPU->id].lock); |
mag = make_empty_current_mag(cache); |
if (!mag) { |
spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
return -1; |
} |
mag->objs[mag->busy++] = obj; |
spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
atomic_inc(&cache->cached_objs); |
return 0; |
} |
/**************************************/ |
/* Slab cache functions */ |
/** Return number of objects that fit in certain cache size */ |
static int comp_objects(slab_cache_t *cache) |
{ |
if (cache->flags & SLAB_CACHE_SLINSIDE) |
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
else |
return (PAGE_SIZE << cache->order) / cache->size; |
} |
/** Return wasted space in slab */ |
static int badness(slab_cache_t *cache) |
{ |
int objects; |
int ssize; |
objects = comp_objects(cache); |
ssize = PAGE_SIZE << cache->order; |
if (cache->flags & SLAB_CACHE_SLINSIDE) |
ssize -= sizeof(slab_t); |
return ssize - objects*cache->size; |
} |
/** |
* Initialize mag_cache structure in slab cache |
*/ |
static void make_magcache(slab_cache_t *cache) |
{ |
int i; |
ASSERT(_slab_initialized >= 2); |
cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
for (i=0; i < config.cpu_count; i++) { |
memsetb((__address)&cache->mag_cache[i], |
sizeof(cache->mag_cache[i]), 0); |
spinlock_initialize(&cache->mag_cache[i].lock, |
"slab_maglock_cpu"); |
} |
} |
/** Initialize allocated memory as a slab cache */ |
static void |
_slab_cache_create(slab_cache_t *cache, |
char *name, |
size_t size, |
size_t align, |
int (*constructor)(void *obj, int kmflag), |
int (*destructor)(void *obj), |
int flags) |
{ |
int pages; |
ipl_t ipl; |
memsetb((__address)cache, sizeof(*cache), 0); |
cache->name = name; |
if (align < sizeof(__native)) |
align = sizeof(__native); |
size = ALIGN_UP(size, align); |
cache->size = size; |
cache->constructor = constructor; |
cache->destructor = destructor; |
cache->flags = flags; |
list_initialize(&cache->full_slabs); |
list_initialize(&cache->partial_slabs); |
list_initialize(&cache->magazines); |
spinlock_initialize(&cache->slablock, "slab_lock"); |
spinlock_initialize(&cache->maglock, "slab_maglock"); |
if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
make_magcache(cache); |
/* Compute slab sizes, object counts in slabs etc. */ |
if (cache->size < SLAB_INSIDE_SIZE) |
cache->flags |= SLAB_CACHE_SLINSIDE; |
/* Minimum slab order */ |
pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
cache->order = fnzb(pages); |
while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
cache->order += 1; |
} |
cache->objects = comp_objects(cache); |
/* If info fits in, put it inside */ |
if (badness(cache) > sizeof(slab_t)) |
cache->flags |= SLAB_CACHE_SLINSIDE; |
/* Add cache to cache list */ |
ipl = interrupts_disable(); |
spinlock_lock(&slab_cache_lock); |
list_append(&cache->link, &slab_cache_list); |
spinlock_unlock(&slab_cache_lock); |
interrupts_restore(ipl); |
} |
/** Create slab cache */ |
slab_cache_t * slab_cache_create(char *name, |
size_t size, |
size_t align, |
int (*constructor)(void *obj, int kmflag), |
int (*destructor)(void *obj), |
int flags) |
{ |
slab_cache_t *cache; |
cache = slab_alloc(&slab_cache_cache, 0); |
_slab_cache_create(cache, name, size, align, constructor, destructor, |
flags); |
return cache; |
} |
/** |
* Reclaim space occupied by objects that are already free |
* |
* @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
* @return Number of freed pages |
*/ |
static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
{ |
int i; |
slab_magazine_t *mag; |
count_t frames = 0; |
int magcount; |
if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
return 0; /* Nothing to do */ |
/* We count up to original magazine count to avoid |
* endless loop |
*/ |
magcount = atomic_get(&cache->magazine_counter); |
while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
frames += magazine_destroy(cache,mag); |
if (!(flags & SLAB_RECLAIM_ALL) && frames) |
break; |
} |
if (flags & SLAB_RECLAIM_ALL) { |
/* Free cpu-bound magazines */ |
/* Destroy CPU magazines */ |
for (i=0; i<config.cpu_count; i++) { |
spinlock_lock(&cache->mag_cache[i].lock); |
mag = cache->mag_cache[i].current; |
if (mag) |
frames += magazine_destroy(cache, mag); |
cache->mag_cache[i].current = NULL; |
mag = cache->mag_cache[i].last; |
if (mag) |
frames += magazine_destroy(cache, mag); |
cache->mag_cache[i].last = NULL; |
spinlock_unlock(&cache->mag_cache[i].lock); |
} |
} |
return frames; |
} |
/** Check that there are no slabs and remove cache from system */ |
void slab_cache_destroy(slab_cache_t *cache) |
{ |
ipl_t ipl; |
/* First remove cache from link, so that we don't need |
* to disable interrupts later |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&slab_cache_lock); |
list_remove(&cache->link); |
spinlock_unlock(&slab_cache_lock); |
interrupts_restore(ipl); |
/* Do not lock anything, we assume the software is correct and |
* does not touch the cache when it decides to destroy it */ |
/* Destroy all magazines */ |
_slab_reclaim(cache, SLAB_RECLAIM_ALL); |
/* All slabs must be empty */ |
if (!list_empty(&cache->full_slabs) \ |
|| !list_empty(&cache->partial_slabs)) |
panic("Destroying cache that is not empty."); |
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
free(cache->mag_cache); |
slab_free(&slab_cache_cache, cache); |
} |
/** Allocate new object from cache - if no flags given, always returns |
memory */ |
void * slab_alloc(slab_cache_t *cache, int flags) |
{ |
ipl_t ipl; |
void *result = NULL; |
/* Disable interrupts to avoid deadlocks with interrupt handlers */ |
ipl = interrupts_disable(); |
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
result = magazine_obj_get(cache); |
} |
if (!result) |
result = slab_obj_create(cache, flags); |
interrupts_restore(ipl); |
if (result) |
atomic_inc(&cache->allocated_objs); |
return result; |
} |
/** Return object to cache, use slab if known */ |
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
|| magazine_obj_put(cache, obj)) { |
slab_obj_destroy(cache, obj, slab); |
} |
interrupts_restore(ipl); |
atomic_dec(&cache->allocated_objs); |
} |
/** Return slab object to cache */ |
void slab_free(slab_cache_t *cache, void *obj) |
{ |
_slab_free(cache,obj,NULL); |
} |
/* Go through all caches and reclaim what is possible */ |
count_t slab_reclaim(int flags) |
{ |
slab_cache_t *cache; |
link_t *cur; |
count_t frames = 0; |
spinlock_lock(&slab_cache_lock); |
/* TODO: Add assert, that interrupts are disabled, otherwise |
* memory allocation from interrupts can deadlock. |
*/ |
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
cache = list_get_instance(cur, slab_cache_t, link); |
frames += _slab_reclaim(cache, flags); |
} |
spinlock_unlock(&slab_cache_lock); |
return frames; |
} |
/* Print list of slabs */ |
void slab_print_list(void) |
{ |
slab_cache_t *cache; |
link_t *cur; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&slab_cache_lock); |
printf("slab name\t Osize\t Pages\t Obj/pg\t Slabs\t Cached\tAllocobjs\tCtl\n"); |
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
cache = list_get_instance(cur, slab_cache_t, link); |
printf("%s\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t\t%s\n", cache->name, cache->size, |
(1 << cache->order), cache->objects, |
atomic_get(&cache->allocated_slabs), |
atomic_get(&cache->cached_objs), |
atomic_get(&cache->allocated_objs), |
cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
} |
spinlock_unlock(&slab_cache_lock); |
interrupts_restore(ipl); |
} |
void slab_cache_init(void) |
{ |
int i, size; |
/* Initialize magazine cache */ |
_slab_cache_create(&mag_cache, |
"slab_magazine", |
sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
sizeof(__address), |
NULL, NULL, |
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
/* Initialize slab_cache cache */ |
_slab_cache_create(&slab_cache_cache, |
"slab_cache", |
sizeof(slab_cache_cache), |
sizeof(__address), |
NULL, NULL, |
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
/* Initialize external slab cache */ |
slab_extern_cache = slab_cache_create("slab_extern", |
sizeof(slab_t), |
0, NULL, NULL, |
SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
/* Initialize structures for malloc */ |
for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
i++, size <<= 1) { |
malloc_caches[i] = slab_cache_create(malloc_names[i], |
size, 0, |
NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
} |
#ifdef CONFIG_DEBUG |
_slab_initialized = 1; |
#endif |
} |
/** Enable cpu_cache |
* |
* Kernel calls this function, when it knows the real number of |
* processors. |
* Allocate slab for cpucache and enable it on all existing |
* slabs that are SLAB_CACHE_MAGDEFERRED |
*/ |
void slab_enable_cpucache(void) |
{ |
link_t *cur; |
slab_cache_t *s; |
#ifdef CONFIG_DEBUG |
_slab_initialized = 2; |
#endif |
spinlock_lock(&slab_cache_lock); |
for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
s = list_get_instance(cur, slab_cache_t, link); |
if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
continue; |
make_magcache(s); |
s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
} |
spinlock_unlock(&slab_cache_lock); |
} |
/**************************************/ |
/* kalloc/kfree functions */ |
void * malloc(unsigned int size, int flags) |
{ |
int idx; |
ASSERT(_slab_initialized); |
ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
if (size < (1 << SLAB_MIN_MALLOC_W)) |
size = (1 << SLAB_MIN_MALLOC_W); |
idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
return slab_alloc(malloc_caches[idx], flags); |
} |
void free(void *obj) |
{ |
slab_t *slab; |
if (!obj) return; |
slab = obj2slab(obj); |
_slab_free(slab->cache, obj, slab); |
} |
//kernel/trunk/generic/src/mm/tlb.c |
---|
0,0 → 1,178 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file tlb.c |
* @brief Generic TLB shootdown algorithm. |
*/ |
#include <mm/tlb.h> |
#include <mm/asid.h> |
#include <arch/mm/tlb.h> |
#include <smp/ipi.h> |
#include <synch/spinlock.h> |
#include <typedefs.h> |
#include <atomic.h> |
#include <arch/interrupt.h> |
#include <config.h> |
#include <arch.h> |
#include <panic.h> |
#include <debug.h> |
/** |
* This lock is used for synchronisation between sender and |
* recipients of TLB shootdown message. It must be acquired |
* before CPU structure lock. |
*/ |
SPINLOCK_INITIALIZE(tlblock); |
void tlb_init(void) |
{ |
tlb_arch_init(); |
} |
#ifdef CONFIG_SMP |
/** Send TLB shootdown message. |
* |
* This function attempts to deliver TLB shootdown message |
* to all other processors. |
* |
* This function must be called with interrupts disabled. |
* |
* @param type Type describing scope of shootdown. |
* @param asid Address space, if required by type. |
* @param page Virtual page address, if required by type. |
* @param count Number of pages, if required by type. |
*/ |
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, __address page, count_t count) |
{ |
int i; |
CPU->tlb_active = 0; |
spinlock_lock(&tlblock); |
for (i = 0; i < config.cpu_count; i++) { |
cpu_t *cpu; |
if (i == CPU->id) |
continue; |
cpu = &cpus[i]; |
spinlock_lock(&cpu->lock); |
if (cpu->tlb_messages_count == TLB_MESSAGE_QUEUE_LEN) { |
/* |
* The message queue is full. |
* Erase the queue and store one TLB_INVL_ALL message. |
*/ |
cpu->tlb_messages_count = 1; |
cpu->tlb_messages[0].type = TLB_INVL_ALL; |
cpu->tlb_messages[0].asid = ASID_INVALID; |
cpu->tlb_messages[0].page = 0; |
cpu->tlb_messages[0].count = 0; |
} else { |
/* |
* Enqueue the message. |
*/ |
cpu->tlb_messages[cpu->tlb_messages_count].type = type; |
cpu->tlb_messages[cpu->tlb_messages_count].asid = asid; |
cpu->tlb_messages[cpu->tlb_messages_count].page = page; |
cpu->tlb_messages[cpu->tlb_messages_count].count = count; |
cpu->tlb_messages_count++; |
} |
spinlock_unlock(&cpu->lock); |
} |
tlb_shootdown_ipi_send(); |
busy_wait: |
for (i = 0; i < config.cpu_count; i++) |
if (cpus[i].tlb_active) |
goto busy_wait; |
} |
/** Finish TLB shootdown sequence. */ |
void tlb_shootdown_finalize(void) |
{ |
spinlock_unlock(&tlblock); |
CPU->tlb_active = 1; |
} |
void tlb_shootdown_ipi_send(void) |
{ |
ipi_broadcast(VECTOR_TLB_SHOOTDOWN_IPI); |
} |
/** Receive TLB shootdown message. */ |
void tlb_shootdown_ipi_recv(void) |
{ |
tlb_invalidate_type_t type; |
asid_t asid; |
__address page; |
count_t count; |
int i; |
ASSERT(CPU); |
CPU->tlb_active = 0; |
spinlock_lock(&tlblock); |
spinlock_unlock(&tlblock); |
spinlock_lock(&CPU->lock); |
ASSERT(CPU->tlb_messages_count <= TLB_MESSAGE_QUEUE_LEN); |
for (i = 0; i < CPU->tlb_messages_count; CPU->tlb_messages_count--) { |
type = CPU->tlb_messages[i].type; |
asid = CPU->tlb_messages[i].asid; |
page = CPU->tlb_messages[i].page; |
count = CPU->tlb_messages[i].count; |
switch (type) { |
case TLB_INVL_ALL: |
tlb_invalidate_all(); |
break; |
case TLB_INVL_ASID: |
tlb_invalidate_asid(asid); |
break; |
case TLB_INVL_PAGES: |
ASSERT(count); |
tlb_invalidate_pages(asid, page, count); |
break; |
default: |
panic("unknown type (%d)\n", type); |
break; |
} |
if (type == TLB_INVL_ALL) |
break; |
} |
spinlock_unlock(&CPU->lock); |
CPU->tlb_active = 1; |
} |
#endif /* CONFIG_SMP */ |
//kernel/trunk/generic/src/mm/as.c |
---|
0,0 → 1,923 |
/* |
* Copyright (C) 2001-2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file as.c |
* @brief Address space related functions. |
* |
* This file contains address space manipulation functions. |
* Roughly speaking, this is a higher-level client of |
* Virtual Address Translation (VAT) subsystem. |
* |
* Functionality provided by this file allows one to |
* create address space and create, resize and share |
* address space areas. |
* |
* @see page.c |
* |
*/ |
#include <mm/as.h> |
#include <arch/mm/as.h> |
#include <mm/page.h> |
#include <mm/frame.h> |
#include <mm/slab.h> |
#include <mm/tlb.h> |
#include <arch/mm/page.h> |
#include <genarch/mm/page_pt.h> |
#include <genarch/mm/page_ht.h> |
#include <mm/asid.h> |
#include <arch/mm/asid.h> |
#include <synch/spinlock.h> |
#include <adt/list.h> |
#include <adt/btree.h> |
#include <proc/task.h> |
#include <arch/asm.h> |
#include <panic.h> |
#include <debug.h> |
#include <print.h> |
#include <memstr.h> |
#include <macros.h> |
#include <arch.h> |
#include <errno.h> |
#include <config.h> |
#include <arch/types.h> |
#include <typedefs.h> |
as_operations_t *as_operations = NULL; |
/** Address space lock. It protects inactive_as_with_asid_head. */ |
SPINLOCK_INITIALIZE(as_lock); |
/** |
* This list contains address spaces that are not active on any |
* processor and that have valid ASID. |
*/ |
LIST_INITIALIZE(inactive_as_with_asid_head); |
/** Kernel address space. */ |
as_t *AS_KERNEL = NULL; |
static int area_flags_to_page_flags(int aflags); |
static int get_area_flags(as_area_t *a); |
static as_area_t *find_area_and_lock(as_t *as, __address va); |
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); |
/** Initialize address space subsystem. */ |
void as_init(void) |
{ |
as_arch_init(); |
AS_KERNEL = as_create(FLAG_AS_KERNEL); |
if (!AS_KERNEL) |
panic("can't create kernel address space\n"); |
} |
/** Create address space. |
* |
* @param flags Flags that influence way in wich the address space is created. |
*/ |
as_t *as_create(int flags) |
{ |
as_t *as; |
as = (as_t *) malloc(sizeof(as_t), 0); |
link_initialize(&as->inactive_as_with_asid_link); |
spinlock_initialize(&as->lock, "as_lock"); |
btree_create(&as->as_area_btree); |
if (flags & FLAG_AS_KERNEL) |
as->asid = ASID_KERNEL; |
else |
as->asid = ASID_INVALID; |
as->refcount = 0; |
as->page_table = page_table_create(flags); |
return as; |
} |
/** Free Adress space */ |
void as_free(as_t *as) |
{ |
ASSERT(as->refcount == 0); |
/* TODO: free as_areas and other resources held by as */ |
/* TODO: free page table */ |
free(as); |
} |
/** Create address space area of common attributes. |
* |
* The created address space area is added to the target address space. |
* |
* @param as Target address space. |
* @param flags Flags of the area memory. |
* @param size Size of area. |
* @param base Base address of area. |
* @param attrs Attributes of the area. |
* |
* @return Address space area on success or NULL on failure. |
*/ |
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs) |
{ |
ipl_t ipl; |
as_area_t *a; |
if (base % PAGE_SIZE) |
return NULL; |
if (!size) |
return NULL; |
/* Writeable executable areas are not supported. */ |
if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
return NULL; |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
if (!check_area_conflicts(as, base, size, NULL)) { |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
return NULL; |
} |
a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
spinlock_initialize(&a->lock, "as_area_lock"); |
a->flags = flags; |
a->attributes = attrs; |
a->pages = SIZE2FRAMES(size); |
a->base = base; |
btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
return a; |
} |
/** Find address space area and change it. |
* |
* @param as Address space. |
* @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
* @param size New size of the virtual memory block starting at address. |
* @param flags Flags influencing the remap operation. Currently unused. |
* |
* @return address on success, (__address) -1 otherwise. |
*/ |
__address as_area_resize(as_t *as, __address address, size_t size, int flags) |
{ |
as_area_t *area = NULL; |
ipl_t ipl; |
size_t pages; |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
/* |
* Locate the area. |
*/ |
area = find_area_and_lock(as, address); |
if (!area) { |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
return (__address) -1; |
} |
if (area->flags & AS_AREA_DEVICE) { |
/* |
* Remapping of address space areas associated |
* with memory mapped devices is not supported. |
*/ |
spinlock_unlock(&area->lock); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
return (__address) -1; |
} |
pages = SIZE2FRAMES((address - area->base) + size); |
if (!pages) { |
/* |
* Zero size address space areas are not allowed. |
*/ |
spinlock_unlock(&area->lock); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
return (__address) -1; |
} |
if (pages < area->pages) { |
int i; |
/* |
* Shrinking the area. |
* No need to check for overlaps. |
*/ |
for (i = pages; i < area->pages; i++) { |
pte_t *pte; |
/* |
* Releasing physical memory. |
* This depends on the fact that the memory was allocated using frame_alloc(). |
*/ |
page_table_lock(as, false); |
pte = page_mapping_find(as, area->base + i*PAGE_SIZE); |
if (pte && PTE_VALID(pte)) { |
__address frame; |
ASSERT(PTE_PRESENT(pte)); |
frame = PTE_GET_FRAME(pte); |
page_mapping_remove(as, area->base + i*PAGE_SIZE); |
page_table_unlock(as, false); |
frame_free(ADDR2PFN(frame)); |
} else { |
page_table_unlock(as, false); |
} |
} |
/* |
* Invalidate TLB's. |
*/ |
tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
tlb_shootdown_finalize(); |
} else { |
/* |
* Growing the area. |
* Check for overlaps with other address space areas. |
*/ |
if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
spinlock_unlock(&area->lock); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
return (__address) -1; |
} |
} |
area->pages = pages; |
spinlock_unlock(&area->lock); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
return address; |
} |
/** Send address space area to another task. |
* |
* Address space area is sent to the specified task. |
* If the destination task is willing to accept the |
* area, a new area is created according to the |
* source area. Moreover, any existing mapping |
* is copied as well, providing thus a mechanism |
* for sharing group of pages. The source address |
* space area and any associated mapping is preserved. |
* |
* @param dst_id Task ID of the accepting task. |
* @param src_base Base address of the source address space area. |
* |
* @return 0 on success or ENOENT if there is no such task or |
* if there is no such address space area, |
* EPERM if there was a problem in accepting the area or |
* ENOMEM if there was a problem in allocating destination |
* address space area. |
*/ |
int as_area_send(task_id_t dst_id, __address src_base) |
{ |
ipl_t ipl; |
task_t *t; |
count_t i; |
as_t *dst_as; |
__address dst_base; |
int src_flags; |
size_t src_size; |
as_area_t *src_area, *dst_area; |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
t = task_find_by_id(dst_id); |
if (!NULL) { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
spinlock_lock(&t->lock); |
spinlock_unlock(&tasks_lock); |
dst_as = t->as; |
dst_base = (__address) t->accept_arg.base; |
if (dst_as == AS) { |
/* |
* The two tasks share the entire address space. |
* Return error since there is no point in continuing. |
*/ |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return EPERM; |
} |
spinlock_lock(&AS->lock); |
src_area = find_area_and_lock(AS, src_base); |
if (!src_area) { |
/* |
* Could not find the source address space area. |
*/ |
spinlock_unlock(&t->lock); |
spinlock_unlock(&AS->lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
src_size = src_area->pages * PAGE_SIZE; |
src_flags = src_area->flags; |
spinlock_unlock(&src_area->lock); |
spinlock_unlock(&AS->lock); |
if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != src_size) || |
(t->accept_arg.flags != src_flags)) { |
/* |
* Discrepancy in either task ID, size or flags. |
*/ |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return EPERM; |
} |
/* |
* Create copy of the source address space area. |
* The destination area is created with AS_AREA_ATTR_PARTIAL |
* attribute set which prevents race condition with |
* preliminary as_page_fault() calls. |
*/ |
dst_area = as_area_create(dst_as, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL); |
if (!dst_area) { |
/* |
* Destination address space area could not be created. |
*/ |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return ENOMEM; |
} |
memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0); |
spinlock_unlock(&t->lock); |
/* |
* Avoid deadlock by first locking the address space with lower address. |
*/ |
if (dst_as < AS) { |
spinlock_lock(&dst_as->lock); |
spinlock_lock(&AS->lock); |
} else { |
spinlock_lock(&AS->lock); |
spinlock_lock(&dst_as->lock); |
} |
for (i = 0; i < SIZE2FRAMES(src_size); i++) { |
pte_t *pte; |
__address frame; |
page_table_lock(AS, false); |
pte = page_mapping_find(AS, src_base + i*PAGE_SIZE); |
if (pte && PTE_VALID(pte)) { |
ASSERT(PTE_PRESENT(pte)); |
frame = PTE_GET_FRAME(pte); |
if (!(src_flags & AS_AREA_DEVICE)) |
frame_reference_add(ADDR2PFN(frame)); |
page_table_unlock(AS, false); |
} else { |
page_table_unlock(AS, false); |
continue; |
} |
page_table_lock(dst_as, false); |
page_mapping_insert(dst_as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags)); |
page_table_unlock(dst_as, false); |
} |
/* |
* Now the destination address space area has been |
* fully initialized. Clear the AS_AREA_ATTR_PARTIAL |
* attribute. |
*/ |
spinlock_lock(&dst_area->lock); |
dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
spinlock_unlock(&dst_area->lock); |
spinlock_unlock(&AS->lock); |
spinlock_unlock(&dst_as->lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Initialize mapping for one page of address space. |
* |
* This functions maps 'page' to 'frame' according |
* to attributes of the address space area to |
* wich 'page' belongs. |
* |
* @param as Target address space. |
* @param page Virtual page within the area. |
* @param frame Physical frame to which page will be mapped. |
*/ |
void as_set_mapping(as_t *as, __address page, __address frame) |
{ |
as_area_t *area; |
ipl_t ipl; |
ipl = interrupts_disable(); |
page_table_lock(as, true); |
area = find_area_and_lock(as, page); |
if (!area) { |
panic("page not part of any as_area\n"); |
} |
page_mapping_insert(as, page, frame, get_area_flags(area)); |
spinlock_unlock(&area->lock); |
page_table_unlock(as, true); |
interrupts_restore(ipl); |
} |
/** Handle page fault within the current address space. |
* |
* This is the high-level page fault handler. |
* Interrupts are assumed disabled. |
* |
* @param page Faulting page. |
* |
* @return 0 on page fault, 1 on success. |
*/ |
int as_page_fault(__address page) |
{ |
pte_t *pte; |
as_area_t *area; |
__address frame; |
ASSERT(AS); |
spinlock_lock(&AS->lock); |
area = find_area_and_lock(AS, page); |
if (!area) { |
/* |
* No area contained mapping for 'page'. |
* Signal page fault to low-level handler. |
*/ |
spinlock_unlock(&AS->lock); |
return 0; |
} |
if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
/* |
* The address space area is not fully initialized. |
* Avoid possible race by returning error. |
*/ |
spinlock_unlock(&area->lock); |
spinlock_unlock(&AS->lock); |
return 0; |
} |
ASSERT(!(area->flags & AS_AREA_DEVICE)); |
page_table_lock(AS, false); |
/* |
* To avoid race condition between two page faults |
* on the same address, we need to make sure |
* the mapping has not been already inserted. |
*/ |
if ((pte = page_mapping_find(AS, page))) { |
if (PTE_PRESENT(pte)) { |
page_table_unlock(AS, false); |
spinlock_unlock(&area->lock); |
spinlock_unlock(&AS->lock); |
return 1; |
} |
} |
/* |
* In general, there can be several reasons that |
* can have caused this fault. |
* |
* - non-existent mapping: the area is a scratch |
* area (e.g. stack) and so far has not been |
* allocated a frame for the faulting page |
* |
* - non-present mapping: another possibility, |
* currently not implemented, would be frame |
* reuse; when this becomes a possibility, |
* do not forget to distinguish between |
* the different causes |
*/ |
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
/* |
* Map 'page' to 'frame'. |
* Note that TLB shootdown is not attempted as only new information is being |
* inserted into page tables. |
*/ |
page_mapping_insert(AS, page, frame, get_area_flags(area)); |
page_table_unlock(AS, false); |
spinlock_unlock(&area->lock); |
spinlock_unlock(&AS->lock); |
return 1; |
} |
/** Switch address spaces. |
* |
* @param old Old address space or NULL. |
* @param new New address space. |
*/ |
void as_switch(as_t *old, as_t *new) |
{ |
ipl_t ipl; |
bool needs_asid = false; |
ipl = interrupts_disable(); |
spinlock_lock(&as_lock); |
/* |
* First, take care of the old address space. |
*/ |
if (old) { |
spinlock_lock(&old->lock); |
ASSERT(old->refcount); |
if((--old->refcount == 0) && (old != AS_KERNEL)) { |
/* |
* The old address space is no longer active on |
* any processor. It can be appended to the |
* list of inactive address spaces with assigned |
* ASID. |
*/ |
ASSERT(old->asid != ASID_INVALID); |
list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
} |
spinlock_unlock(&old->lock); |
} |
/* |
* Second, prepare the new address space. |
*/ |
spinlock_lock(&new->lock); |
if ((new->refcount++ == 0) && (new != AS_KERNEL)) { |
if (new->asid != ASID_INVALID) |
list_remove(&new->inactive_as_with_asid_link); |
else |
needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
} |
SET_PTL0_ADDRESS(new->page_table); |
spinlock_unlock(&new->lock); |
if (needs_asid) { |
/* |
* Allocation of new ASID was deferred |
* until now in order to avoid deadlock. |
*/ |
asid_t asid; |
asid = asid_get(); |
spinlock_lock(&new->lock); |
new->asid = asid; |
spinlock_unlock(&new->lock); |
} |
spinlock_unlock(&as_lock); |
interrupts_restore(ipl); |
/* |
* Perform architecture-specific steps. |
* (e.g. write ASID to hardware register etc.) |
*/ |
as_install_arch(new); |
AS = new; |
} |
/** Convert address space area flags to page flags. |
* |
* @param aflags Flags of some address space area. |
* |
* @return Flags to be passed to page_mapping_insert(). |
*/ |
int area_flags_to_page_flags(int aflags) |
{ |
int flags; |
flags = PAGE_USER | PAGE_PRESENT; |
if (aflags & AS_AREA_READ) |
flags |= PAGE_READ; |
if (aflags & AS_AREA_WRITE) |
flags |= PAGE_WRITE; |
if (aflags & AS_AREA_EXEC) |
flags |= PAGE_EXEC; |
if (!(aflags & AS_AREA_DEVICE)) |
flags |= PAGE_CACHEABLE; |
return flags; |
} |
/** Compute flags for virtual address translation subsytem. |
* |
* The address space area must be locked. |
* Interrupts must be disabled. |
* |
* @param a Address space area. |
* |
* @return Flags to be used in page_mapping_insert(). |
*/ |
int get_area_flags(as_area_t *a) |
{ |
return area_flags_to_page_flags(a->flags); |
} |
/** Create page table. |
* |
* Depending on architecture, create either address space |
* private or global page table. |
* |
* @param flags Flags saying whether the page table is for kernel address space. |
* |
* @return First entry of the page table. |
*/ |
pte_t *page_table_create(int flags) |
{ |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_create); |
return as_operations->page_table_create(flags); |
} |
/** Lock page table. |
* |
* This function should be called before any page_mapping_insert(), |
* page_mapping_remove() and page_mapping_find(). |
* |
* Locking order is such that address space areas must be locked |
* prior to this call. Address space can be locked prior to this |
* call in which case the lock argument is false. |
* |
* @param as Address space. |
* @param lock If false, do not attempt to lock as->lock. |
*/ |
void page_table_lock(as_t *as, bool lock) |
{ |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_lock); |
as_operations->page_table_lock(as, lock); |
} |
/** Unlock page table. |
* |
* @param as Address space. |
* @param unlock If false, do not attempt to unlock as->lock. |
*/ |
void page_table_unlock(as_t *as, bool unlock) |
{ |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_unlock); |
as_operations->page_table_unlock(as, unlock); |
} |
/** Find address space area and lock it. |
* |
* The address space must be locked and interrupts must be disabled. |
* |
* @param as Address space. |
* @param va Virtual address. |
* |
* @return Locked address space area containing va on success or NULL on failure. |
*/ |
as_area_t *find_area_and_lock(as_t *as, __address va) |
{ |
as_area_t *a; |
btree_node_t *leaf, *lnode; |
int i; |
a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
if (a) { |
/* va is the base address of an address space area */ |
spinlock_lock(&a->lock); |
return a; |
} |
/* |
* Search the leaf node and the righmost record of its left neighbour |
* to find out whether this is a miss or va belongs to an address |
* space area found there. |
*/ |
/* First, search the leaf node itself. */ |
for (i = 0; i < leaf->keys; i++) { |
a = (as_area_t *) leaf->value[i]; |
spinlock_lock(&a->lock); |
if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
return a; |
} |
spinlock_unlock(&a->lock); |
} |
/* |
* Second, locate the left neighbour and test its last record. |
* Because of its position in the B+tree, it must have base < va. |
*/ |
if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
a = (as_area_t *) lnode->value[lnode->keys - 1]; |
spinlock_lock(&a->lock); |
if (va < a->base + a->pages * PAGE_SIZE) { |
return a; |
} |
spinlock_unlock(&a->lock); |
} |
return NULL; |
} |
/** Check area conflicts with other areas. |
* |
* The address space must be locked and interrupts must be disabled. |
* |
* @param as Address space. |
* @param va Starting virtual address of the area being tested. |
* @param size Size of the area being tested. |
* @param avoid_area Do not touch this area. |
* |
* @return True if there is no conflict, false otherwise. |
*/ |
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area) |
{ |
as_area_t *a; |
btree_node_t *leaf, *node; |
int i; |
/* |
* We don't want any area to have conflicts with NULL page. |
*/ |
if (overlaps(va, size, NULL, PAGE_SIZE)) |
return false; |
/* |
* The leaf node is found in O(log n), where n is proportional to |
* the number of address space areas belonging to as. |
* The check for conflicts is then attempted on the rightmost |
* record in the left neighbour, the leftmost record in the right |
* neighbour and all records in the leaf node itself. |
*/ |
if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
if (a != avoid_area) |
return false; |
} |
/* First, check the two border cases. */ |
if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
a = (as_area_t *) node->value[node->keys - 1]; |
spinlock_lock(&a->lock); |
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
spinlock_unlock(&a->lock); |
return false; |
} |
spinlock_unlock(&a->lock); |
} |
if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) { |
a = (as_area_t *) node->value[0]; |
spinlock_lock(&a->lock); |
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
spinlock_unlock(&a->lock); |
return false; |
} |
spinlock_unlock(&a->lock); |
} |
/* Second, check the leaf node. */ |
for (i = 0; i < leaf->keys; i++) { |
a = (as_area_t *) leaf->value[i]; |
if (a == avoid_area) |
continue; |
spinlock_lock(&a->lock); |
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
spinlock_unlock(&a->lock); |
return false; |
} |
spinlock_unlock(&a->lock); |
} |
/* |
* So far, the area does not conflict with other areas. |
* Check if it doesn't conflict with kernel address space. |
*/ |
if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
return !overlaps(va, size, |
KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
} |
return true; |
} |
/* |
* Address space related syscalls. |
*/ |
/** Wrapper for as_area_create(). */ |
__native sys_as_area_create(__address address, size_t size, int flags) |
{ |
if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE)) |
return (__native) address; |
else |
return (__native) -1; |
} |
/** Wrapper for as_area_resize. */ |
__native sys_as_area_resize(__address address, size_t size, int flags) |
{ |
return as_area_resize(AS, address, size, 0); |
} |
/** Prepare task for accepting address space area from another task. |
* |
* @param uspace_accept_arg Accept structure passed from userspace. |
* |
* @return EPERM if the task ID encapsulated in @uspace_accept_arg references |
* TASK. Otherwise zero is returned. |
*/ |
__native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg) |
{ |
as_area_acptsnd_arg_t arg; |
copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t)); |
if (!arg.size) |
return (__native) EPERM; |
if (arg.task_id == TASK->taskid) { |
/* |
* Accepting from itself is not allowed. |
*/ |
return (__native) EPERM; |
} |
memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t)); |
return 0; |
} |
/** Wrapper for as_area_send. */ |
__native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg) |
{ |
as_area_acptsnd_arg_t arg; |
copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t)); |
if (!arg.size) |
return (__native) EPERM; |
if (arg.task_id == TASK->taskid) { |
/* |
* Sending to itself is not allowed. |
*/ |
return (__native) EPERM; |
} |
return (__native) as_area_send(arg.task_id, (__address) arg.base); |
} |
//kernel/trunk/generic/src/mm/buddy.c |
---|
0,0 → 1,318 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file buddy.c |
* @brief Buddy allocator framework. |
* |
* This file contains buddy system allocator framework. |
* Specialized functions are needed for this abstract framework |
* to be useful. |
*/ |
#include <mm/buddy.h> |
#include <mm/frame.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <adt/list.h> |
#include <debug.h> |
#include <print.h> |
/** Return size needed for the buddy configuration data */ |
size_t buddy_conf_size(int max_order) |
{ |
return sizeof(buddy_system_t) + (max_order + 1) * sizeof(link_t); |
} |
/** Create buddy system |
* |
* Allocate memory for and initialize new buddy system. |
* |
* @param b Preallocated buddy system control data. |
* @param max_order The biggest allocable size will be 2^max_order. |
* @param op Operations for new buddy system. |
* @param data Pointer to be used by implementation. |
* |
* @return New buddy system. |
*/ |
void buddy_system_create(buddy_system_t *b, |
__u8 max_order, |
buddy_system_operations_t *op, |
void *data) |
{ |
int i; |
ASSERT(max_order < BUDDY_SYSTEM_INNER_BLOCK); |
ASSERT(op->find_buddy); |
ASSERT(op->set_order); |
ASSERT(op->get_order); |
ASSERT(op->bisect); |
ASSERT(op->coalesce); |
ASSERT(op->mark_busy); |
/* |
* Use memory after our own structure |
*/ |
b->order = (link_t *) (&b[1]); |
for (i = 0; i <= max_order; i++) |
list_initialize(&b->order[i]); |
b->max_order = max_order; |
b->op = op; |
b->data = data; |
} |
/** Check if buddy system can allocate block |
* |
* @param b Buddy system pointer |
* @param i Size of the block (2^i) |
* |
* @return True if block can be allocated |
*/ |
bool buddy_system_can_alloc(buddy_system_t *b, __u8 i) { |
__u8 k; |
/* |
* If requested block is greater then maximal block |
* we know immediatly that we cannot satisfy the request. |
*/ |
if (i > b->max_order) return false; |
/* |
* Check if any bigger or equal order has free elements |
*/ |
for (k=i; k <= b->max_order; k++) { |
if (!list_empty(&b->order[k])) { |
return true; |
} |
} |
return false; |
} |
/** Allocate PARTICULAR block from buddy system |
* |
* @ return Block of data or NULL if no such block was found |
*/ |
link_t *buddy_system_alloc_block(buddy_system_t *b, link_t *block) |
{ |
link_t *left,*right, *tmp; |
__u8 order; |
left = b->op->find_block(b, block, BUDDY_SYSTEM_INNER_BLOCK); |
ASSERT(left); |
list_remove(left); |
while (1) { |
if (! b->op->get_order(b,left)) { |
b->op->mark_busy(b, left); |
return left; |
} |
order = b->op->get_order(b, left); |
right = b->op->bisect(b, left); |
b->op->set_order(b, left, order-1); |
b->op->set_order(b, right, order-1); |
tmp = b->op->find_block(b, block, BUDDY_SYSTEM_INNER_BLOCK); |
if (tmp == right) { |
right = left; |
left = tmp; |
} |
ASSERT(tmp == left); |
b->op->mark_busy(b, left); |
buddy_system_free(b, right); |
b->op->mark_available(b, left); |
} |
} |
/** Allocate block from buddy system. |
* |
* @param b Buddy system pointer. |
* @param i Returned block will be 2^i big. |
* |
* @return Block of data represented by link_t. |
*/ |
link_t *buddy_system_alloc(buddy_system_t *b, __u8 i) |
{ |
link_t *res, *hlp; |
ASSERT(i <= b->max_order); |
/* |
* If the list of order i is not empty, |
* the request can be immediatelly satisfied. |
*/ |
if (!list_empty(&b->order[i])) { |
res = b->order[i].next; |
list_remove(res); |
b->op->mark_busy(b, res); |
return res; |
} |
/* |
* If order i is already the maximal order, |
* the request cannot be satisfied. |
*/ |
if (i == b->max_order) |
return NULL; |
/* |
* Try to recursively satisfy the request from higher order lists. |
*/ |
hlp = buddy_system_alloc(b, i + 1); |
/* |
* The request could not be satisfied |
* from higher order lists. |
*/ |
if (!hlp) |
return NULL; |
res = hlp; |
/* |
* Bisect the block and set order of both of its parts to i. |
*/ |
hlp = b->op->bisect(b, res); |
b->op->set_order(b, res, i); |
b->op->set_order(b, hlp, i); |
/* |
* Return the other half to buddy system. Mark the first part |
* full, so that it won't coalesce again. |
*/ |
b->op->mark_busy(b, res); |
buddy_system_free(b, hlp); |
return res; |
} |
/** Return block to buddy system. |
* |
* @param b Buddy system pointer. |
* @param block Block to return. |
*/ |
void buddy_system_free(buddy_system_t *b, link_t *block) |
{ |
link_t *buddy, *hlp; |
__u8 i; |
/* |
* Determine block's order. |
*/ |
i = b->op->get_order(b, block); |
ASSERT(i <= b->max_order); |
if (i != b->max_order) { |
/* |
* See if there is any buddy in the list of order i. |
*/ |
buddy = b->op->find_buddy(b, block); |
if (buddy) { |
ASSERT(b->op->get_order(b, buddy) == i); |
/* |
* Remove buddy from the list of order i. |
*/ |
list_remove(buddy); |
/* |
* Invalidate order of both block and buddy. |
*/ |
b->op->set_order(b, block, BUDDY_SYSTEM_INNER_BLOCK); |
b->op->set_order(b, buddy, BUDDY_SYSTEM_INNER_BLOCK); |
/* |
* Coalesce block and buddy into one block. |
*/ |
hlp = b->op->coalesce(b, block, buddy); |
/* |
* Set order of the coalesced block to i + 1. |
*/ |
b->op->set_order(b, hlp, i + 1); |
/* |
* Recursively add the coalesced block to the list of order i + 1. |
*/ |
buddy_system_free(b, hlp); |
return; |
} |
} |
/* |
* Insert block into the list of order i. |
*/ |
list_append(block, &b->order[i]); |
} |
/** Prints out structure of buddy system |
* |
* @param b Pointer to buddy system |
* @param es Element size |
*/ |
void buddy_system_structure_print(buddy_system_t *b, size_t elem_size) { |
index_t i; |
count_t cnt, elem_count = 0, block_count = 0; |
link_t * cur; |
printf("Order\tBlocks\tSize \tBlock size\tElems per block\n"); |
printf("-----\t------\t--------\t----------\t---------------\n"); |
for (i=0;i <= b->max_order; i++) { |
cnt = 0; |
if (!list_empty(&b->order[i])) { |
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next) |
cnt++; |
} |
printf("#%zd\t%5zd\t%7zdK\t%8zdK\t%6zd\t", i, cnt, (cnt * (1 << i) * elem_size) >> 10, ((1 << i) * elem_size) >> 10, 1 << i); |
if (!list_empty(&b->order[i])) { |
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next) { |
b->op->print_id(b, cur); |
printf(" "); |
} |
} |
printf("\n"); |
block_count += cnt; |
elem_count += (1 << i) * cnt; |
} |
printf("-----\t------\t--------\t----------\t---------------\n"); |
printf("Buddy system contains %zd free elements (%zd blocks)\n" , elem_count, block_count); |
} |
//kernel/trunk/generic/src/mm/frame.c |
---|
0,0 → 1,1110 |
/* |
* Copyright (C) 2001-2005 Jakub Jermar |
* Copyright (C) 2005 Sergey Bondari |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file frame.c |
* @brief Physical frame allocator. |
* |
* This file contains the physical frame allocator and memory zone management. |
* The frame allocator is built on top of the buddy allocator. |
* |
* @see buddy.c |
*/ |
/* |
* Locking order |
* |
* In order to access particular zone, the process must first lock |
* the zones.lock, then lock the zone and then unlock the zones.lock. |
* This insures, that we can fiddle with the zones in runtime without |
* affecting the processes. |
* |
*/ |
#include <typedefs.h> |
#include <arch/types.h> |
#include <mm/frame.h> |
#include <mm/as.h> |
#include <panic.h> |
#include <debug.h> |
#include <adt/list.h> |
#include <synch/spinlock.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <print.h> |
#include <align.h> |
#include <mm/slab.h> |
#include <bitops.h> |
#include <macros.h> |
typedef struct { |
count_t refcount; /**< tracking of shared frames */ |
__u8 buddy_order; /**< buddy system block order */ |
link_t buddy_link; /**< link to the next free block inside one order */ |
void *parent; /**< If allocated by slab, this points there */ |
} frame_t; |
typedef struct { |
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
pfn_t base; /**< frame_no of the first frame in the frames array */ |
count_t count; /**< Size of zone */ |
frame_t *frames; /**< array of frame_t structures in this zone */ |
count_t free_count; /**< number of free frame_t structures */ |
count_t busy_count; /**< number of busy frame_t structures */ |
buddy_system_t * buddy_system; /**< buddy system for the zone */ |
int flags; |
} zone_t; |
/* |
* The zoneinfo.lock must be locked when accessing zoneinfo structure. |
* Some of the attributes in zone_t structures are 'read-only' |
*/ |
struct { |
SPINLOCK_DECLARE(lock); |
int count; |
zone_t *info[ZONES_MAX]; |
} zones; |
/*********************************/ |
/* Helper functions */ |
static inline index_t frame_index(zone_t *zone, frame_t *frame) |
{ |
return (index_t)(frame - zone->frames); |
} |
static inline index_t frame_index_abs(zone_t *zone, frame_t *frame) |
{ |
return (index_t)(frame - zone->frames) + zone->base; |
} |
static inline int frame_index_valid(zone_t *zone, index_t index) |
{ |
return index >= 0 && index < zone->count; |
} |
/** Compute pfn_t from frame_t pointer & zone pointer */ |
static index_t make_frame_index(zone_t *zone, frame_t *frame) |
{ |
return frame - zone->frames; |
} |
/** Initialize frame structure |
* |
* Initialize frame structure. |
* |
* @param frame Frame structure to be initialized. |
*/ |
static void frame_initialize(frame_t *frame) |
{ |
frame->refcount = 1; |
frame->buddy_order = 0; |
} |
/*************************************/ |
/* Zoneinfo functions */ |
/** |
* Insert-sort zone into zones list |
* |
* @return zone number on success, -1 on error |
*/ |
static int zones_add_zone(zone_t *newzone) |
{ |
int i,j; |
ipl_t ipl; |
zone_t *z; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
/* Try to merge */ |
if (zones.count + 1 == ZONES_MAX) |
panic("Maximum zone(%d) count exceeded.", ZONES_MAX); |
for (i = 0; i < zones.count; i++) { |
/* Check for overflow */ |
z = zones.info[i]; |
if (overlaps(newzone->base,newzone->count, |
z->base, z->count)) { |
printf("Zones overlap!\n"); |
return -1; |
} |
if (newzone->base < z->base) |
break; |
} |
/* Move other zones up */ |
for (j = i;j < zones.count; j++) |
zones.info[j + 1] = zones.info[j]; |
zones.info[i] = newzone; |
zones.count++; |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
return i; |
} |
/** |
* Try to find a zone where can we find the frame |
* |
* @param hint Start searching in zone 'hint' |
* @param lock Lock zone if true |
* |
* Assume interrupts disable |
*/ |
static zone_t * find_zone_and_lock(pfn_t frame, int *pzone) |
{ |
int i; |
int hint = pzone ? *pzone : 0; |
zone_t *z; |
spinlock_lock(&zones.lock); |
if (hint >= zones.count || hint < 0) |
hint = 0; |
i = hint; |
do { |
z = zones.info[i]; |
spinlock_lock(&z->lock); |
if (z->base <= frame && z->base + z->count > frame) { |
spinlock_unlock(&zones.lock); /* Unlock the global lock */ |
if (pzone) |
*pzone = i; |
return z; |
} |
spinlock_unlock(&z->lock); |
i++; |
if (i >= zones.count) |
i = 0; |
} while(i != hint); |
spinlock_unlock(&zones.lock); |
return NULL; |
} |
/** @return True if zone can allocate specified order */ |
static int zone_can_alloc(zone_t *z, __u8 order) |
{ |
return buddy_system_can_alloc(z->buddy_system, order); |
} |
/** |
* Find AND LOCK zone that can allocate order frames |
* |
* Assume interrupts are disabled!! |
* |
* @param pzone Pointer to preferred zone or NULL, on return contains zone number |
*/ |
static zone_t * find_free_zone_lock(__u8 order, int *pzone) |
{ |
int i; |
zone_t *z; |
int hint = pzone ? *pzone : 0; |
spinlock_lock(&zones.lock); |
if (hint >= zones.count) |
hint = 0; |
i = hint; |
do { |
z = zones.info[i]; |
spinlock_lock(&z->lock); |
/* Check if the zone has 2^order frames area available */ |
if (zone_can_alloc(z, order)) { |
spinlock_unlock(&zones.lock); |
if (pzone) |
*pzone = i; |
return z; |
} |
spinlock_unlock(&z->lock); |
if (++i >= zones.count) |
i = 0; |
} while(i != hint); |
spinlock_unlock(&zones.lock); |
return NULL; |
} |
/********************************************/ |
/* Buddy system functions */ |
/** Buddy system find_block implementation |
* |
* Find block that is parent of current list. |
* That means go to lower addresses, until such block is found |
* |
* @param order - Order of parent must be different then this parameter!! |
*/ |
static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child, |
__u8 order) |
{ |
frame_t * frame; |
zone_t * zone; |
index_t index; |
frame = list_get_instance(child, frame_t, buddy_link); |
zone = (zone_t *) b->data; |
index = frame_index(zone, frame); |
do { |
if (zone->frames[index].buddy_order != order) { |
return &zone->frames[index].buddy_link; |
} |
} while(index-- > 0); |
return NULL; |
} |
static void zone_buddy_print_id(buddy_system_t *b, link_t *block) |
{ |
frame_t * frame; |
zone_t * zone; |
index_t index; |
frame = list_get_instance(block, frame_t, buddy_link); |
zone = (zone_t *) b->data; |
index = frame_index(zone, frame); |
printf("%zd", index); |
} |
/** Buddy system find_buddy implementation |
* |
* @param b Buddy system. |
* @param block Block for which buddy should be found |
* |
* @return Buddy for given block if found |
*/ |
static link_t * zone_buddy_find_buddy(buddy_system_t *b, link_t * block) |
{ |
frame_t * frame; |
zone_t * zone; |
index_t index; |
bool is_left, is_right; |
frame = list_get_instance(block, frame_t, buddy_link); |
zone = (zone_t *) b->data; |
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), frame->buddy_order)); |
is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame); |
is_right = IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame); |
ASSERT(is_left ^ is_right); |
if (is_left) { |
index = (frame_index(zone, frame)) + (1 << frame->buddy_order); |
} else { // if (is_right) |
index = (frame_index(zone, frame)) - (1 << frame->buddy_order); |
} |
if (frame_index_valid(zone, index)) { |
if (zone->frames[index].buddy_order == frame->buddy_order && |
zone->frames[index].refcount == 0) { |
return &zone->frames[index].buddy_link; |
} |
} |
return NULL; |
} |
/** Buddy system bisect implementation |
* |
* @param b Buddy system. |
* @param block Block to bisect |
* |
* @return right block |
*/ |
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t * block) { |
frame_t * frame_l, * frame_r; |
frame_l = list_get_instance(block, frame_t, buddy_link); |
frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); |
return &frame_r->buddy_link; |
} |
/** Buddy system coalesce implementation |
* |
* @param b Buddy system. |
* @param block_1 First block |
* @param block_2 First block's buddy |
* |
* @return Coalesced block (actually block that represents lower address) |
*/ |
static link_t * zone_buddy_coalesce(buddy_system_t *b, link_t * block_1, |
link_t * block_2) |
{ |
frame_t *frame1, *frame2; |
frame1 = list_get_instance(block_1, frame_t, buddy_link); |
frame2 = list_get_instance(block_2, frame_t, buddy_link); |
return frame1 < frame2 ? block_1 : block_2; |
} |
/** Buddy system set_order implementation |
* |
* @param b Buddy system. |
* @param block Buddy system block |
* @param order Order to set |
*/ |
static void zone_buddy_set_order(buddy_system_t *b, link_t * block, __u8 order) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->buddy_order = order; |
} |
/** Buddy system get_order implementation |
* |
* @param b Buddy system. |
* @param block Buddy system block |
* |
* @return Order of block |
*/ |
static __u8 zone_buddy_get_order(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
return frame->buddy_order; |
} |
/** Buddy system mark_busy implementation |
* |
* @param b Buddy system |
* @param block Buddy system block |
* |
*/ |
static void zone_buddy_mark_busy(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->refcount = 1; |
} |
/** Buddy system mark_available implementation |
* |
* @param b Buddy system |
* @param block Buddy system block |
* |
*/ |
static void zone_buddy_mark_available(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->refcount = 0; |
} |
static struct buddy_system_operations zone_buddy_system_operations = { |
.find_buddy = zone_buddy_find_buddy, |
.bisect = zone_buddy_bisect, |
.coalesce = zone_buddy_coalesce, |
.set_order = zone_buddy_set_order, |
.get_order = zone_buddy_get_order, |
.mark_busy = zone_buddy_mark_busy, |
.mark_available = zone_buddy_mark_available, |
.find_block = zone_buddy_find_block, |
.print_id = zone_buddy_print_id |
}; |
/*************************************/ |
/* Zone functions */ |
/** Allocate frame in particular zone |
* |
* Assume zone is locked |
* Panics, if allocation is impossible. |
* |
* @return Frame index in zone |
*/ |
static pfn_t zone_frame_alloc(zone_t *zone,__u8 order) |
{ |
pfn_t v; |
link_t *tmp; |
frame_t *frame; |
/* Allocate frames from zone buddy system */ |
tmp = buddy_system_alloc(zone->buddy_system, order); |
ASSERT(tmp); |
/* Update zone information. */ |
zone->free_count -= (1 << order); |
zone->busy_count += (1 << order); |
/* Frame will be actually a first frame of the block. */ |
frame = list_get_instance(tmp, frame_t, buddy_link); |
/* get frame address */ |
v = make_frame_index(zone, frame); |
return v; |
} |
/** Free frame from zone |
* |
* Assume zone is locked |
*/ |
static void zone_frame_free(zone_t *zone, index_t frame_idx) |
{ |
frame_t *frame; |
__u8 order; |
frame = &zone->frames[frame_idx]; |
/* remember frame order */ |
order = frame->buddy_order; |
ASSERT(frame->refcount); |
if (!--frame->refcount) { |
buddy_system_free(zone->buddy_system, &frame->buddy_link); |
/* Update zone information. */ |
zone->free_count += (1 << order); |
zone->busy_count -= (1 << order); |
} |
} |
/** Return frame from zone */ |
static frame_t * zone_get_frame(zone_t *zone, index_t frame_idx) |
{ |
ASSERT(frame_idx < zone->count); |
return &zone->frames[frame_idx]; |
} |
/** Mark frame in zone unavailable to allocation */ |
static void zone_mark_unavailable(zone_t *zone, index_t frame_idx) |
{ |
frame_t *frame; |
link_t *link; |
frame = zone_get_frame(zone, frame_idx); |
if (frame->refcount) |
return; |
link = buddy_system_alloc_block(zone->buddy_system, |
&frame->buddy_link); |
ASSERT(link); |
zone->free_count--; |
} |
/** |
* Join 2 zones |
* |
* Expect zone_t *z to point to space at least zone_conf_size large |
* |
* Assume z1 & z2 are locked |
*/ |
static void _zone_merge(zone_t *z, zone_t *z1, zone_t *z2) |
{ |
__u8 max_order; |
int i, z2idx; |
pfn_t frame_idx; |
frame_t *frame; |
ASSERT(!overlaps(z1->base,z1->count,z2->base,z2->count)); |
ASSERT(z1->base < z2->base); |
spinlock_initialize(&z->lock, "zone_lock"); |
z->base = z1->base; |
z->count = z2->base+z2->count - z1->base; |
z->flags = z1->flags & z2->flags; |
z->free_count = z1->free_count + z2->free_count; |
z->busy_count = z1->busy_count + z2->busy_count; |
max_order = fnzb(z->count); |
z->buddy_system = (buddy_system_t *)&z[1]; |
buddy_system_create(z->buddy_system, max_order, |
&zone_buddy_system_operations, |
(void *) z); |
z->frames = (frame_t *)((void *)z->buddy_system+buddy_conf_size(max_order)); |
for (i = 0; i < z->count; i++) { |
/* This marks all frames busy */ |
frame_initialize(&z->frames[i]); |
} |
/* Copy frames from both zones to preserve full frame orders, |
* parents etc. Set all free frames with refcount=0 to 1, because |
* we add all free frames to buddy allocator later again, clear |
* order to 0. Don't set busy frames with refcount=0, as they |
* will not be reallocated during merge and it would make later |
* problems with allocation/free. |
*/ |
for (i=0; i<z1->count; i++) |
z->frames[i] = z1->frames[i]; |
for (i=0; i < z2->count; i++) { |
z2idx = i + (z2->base - z1->base); |
z->frames[z2idx] = z2->frames[i]; |
} |
i = 0; |
while (i < z->count) { |
if (z->frames[i].refcount) { |
/* skip busy frames */ |
i += 1 << z->frames[i].buddy_order; |
} else { /* Free frames, set refcount=1 */ |
/* All free frames have refcount=0, we need not |
* to check the order */ |
z->frames[i].refcount = 1; |
z->frames[i].buddy_order = 0; |
i++; |
} |
} |
/* Add free blocks from the 2 original zones */ |
while (zone_can_alloc(z1, 0)) { |
frame_idx = zone_frame_alloc(z1, 0); |
frame = &z->frames[frame_idx]; |
frame->refcount = 0; |
buddy_system_free(z->buddy_system, &frame->buddy_link); |
} |
while (zone_can_alloc(z2, 0)) { |
frame_idx = zone_frame_alloc(z2, 0); |
frame = &z->frames[frame_idx + (z2->base-z1->base)]; |
frame->refcount = 0; |
buddy_system_free(z->buddy_system, &frame->buddy_link); |
} |
} |
/** Return old configuration frames into the zone |
* |
* We have several cases |
* - the conf. data is outside of zone -> exit, shall we call frame_free?? |
* - the conf. data was created by zone_create or |
* updated with reduce_region -> free every frame |
* |
* @param newzone The actual zone where freeing should occur |
* @param oldzone Pointer to old zone configuration data that should |
* be freed from new zone |
*/ |
static void return_config_frames(zone_t *newzone, zone_t *oldzone) |
{ |
pfn_t pfn; |
frame_t *frame; |
count_t cframes; |
int i; |
pfn = ADDR2PFN((__address)KA2PA(oldzone)); |
cframes = SIZE2FRAMES(zone_conf_size(oldzone->count)); |
if (pfn < newzone->base || pfn >= newzone->base + newzone->count) |
return; |
frame = &newzone->frames[pfn - newzone->base]; |
ASSERT(!frame->buddy_order); |
for (i=0; i < cframes; i++) { |
newzone->busy_count++; |
zone_frame_free(newzone, pfn+i-newzone->base); |
} |
} |
/** Reduce allocated block to count of order 0 frames |
* |
* The allocated block need 2^order frames of space. Reduce all frames |
* in block to order 0 and free the unneded frames. This means, that |
* when freeing the block, you have to free every frame from block. |
* |
* @param zone |
* @param frame_idx Index to block |
* @param count Allocated space in block |
*/ |
static void zone_reduce_region(zone_t *zone, pfn_t frame_idx, count_t count) |
{ |
count_t i; |
__u8 order; |
frame_t *frame; |
ASSERT(frame_idx+count < zone->count); |
order = zone->frames[frame_idx].buddy_order; |
ASSERT((1 << order) >= count); |
/* Reduce all blocks to order 0 */ |
for (i=0; i < (1 << order); i++) { |
frame = &zone->frames[i + frame_idx]; |
frame->buddy_order = 0; |
if (! frame->refcount) |
frame->refcount = 1; |
ASSERT(frame->refcount == 1); |
} |
/* Free unneeded frames */ |
for (i=count; i < (1 << order); i++) { |
zone_frame_free(zone, i + frame_idx); |
} |
} |
/** Merge zones z1 and z2 |
* |
* - the zones must be 2 zones with no zone existing in between, |
* which means that z2 = z1+1 |
* |
* - When you create a new zone, the frame allocator configuration does |
* not to be 2^order size. Once the allocator is running it is no longer |
* possible, merged configuration data occupies more space :-/ |
*/ |
void zone_merge(int z1, int z2) |
{ |
ipl_t ipl; |
zone_t *zone1, *zone2, *newzone; |
int cframes; |
__u8 order; |
int i; |
pfn_t pfn; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
if (z1 < 0 || z1 >= zones.count || z2 < 0 || z2 >= zones.count) |
goto errout; |
/* We can join only 2 zones with none existing inbetween */ |
if (z2-z1 != 1) |
goto errout; |
zone1 = zones.info[z1]; |
zone2 = zones.info[z2]; |
spinlock_lock(&zone1->lock); |
spinlock_lock(&zone2->lock); |
cframes = SIZE2FRAMES(zone_conf_size(zone2->base+zone2->count-zone1->base)); |
order = fnzb(cframes) + 1; |
/* Allocate zonedata inside one of the zones */ |
if (zone_can_alloc(zone1, order)) |
pfn = zone1->base + zone_frame_alloc(zone1, order); |
else if (zone_can_alloc(zone2, order)) |
pfn = zone2->base + zone_frame_alloc(zone2, order); |
else |
goto errout2; |
newzone = (zone_t *)PA2KA(PFN2ADDR(pfn)); |
_zone_merge(newzone, zone1, zone2); |
/* Free unneeded config frames */ |
zone_reduce_region(newzone, pfn - newzone->base, cframes); |
/* Subtract zone information from busy frames */ |
newzone->busy_count -= cframes; |
/* Replace existing zones in zoneinfo list */ |
zones.info[z1] = newzone; |
for (i = z2 + 1; i < zones.count; i++) |
zones.info[i - 1] = zones.info[i]; |
zones.count--; |
/* Free old zone information */ |
return_config_frames(newzone, zone1); |
return_config_frames(newzone, zone2); |
errout2: |
/* Nobody is allowed to enter to zone, so we are safe |
* to touch the spinlocks last time */ |
spinlock_unlock(&zone1->lock); |
spinlock_unlock(&zone2->lock); |
errout: |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
} |
/** |
* Merge all zones into one big zone |
* |
* It is reasonable to do this on systems whose bios reports parts in chunks, |
* so that we could have 1 zone (it's faster). |
*/ |
void zone_merge_all(void) |
{ |
int count = zones.count; |
while (zones.count > 1 && --count) { |
zone_merge(0,1); |
break; |
} |
} |
/** Create frame zone |
* |
* Create new frame zone. |
* |
* @param start Physical address of the first frame within the zone. |
* @param size Size of the zone. Must be a multiple of FRAME_SIZE. |
* @param conffram Address of configuration frame |
* @param flags Zone flags. |
* |
* @return Initialized zone. |
*/ |
static void zone_construct(pfn_t start, count_t count, zone_t *z, int flags) |
{ |
int i; |
__u8 max_order; |
spinlock_initialize(&z->lock, "zone_lock"); |
z->base = start; |
z->count = count; |
z->flags = flags; |
z->free_count = count; |
z->busy_count = 0; |
/* |
* Compute order for buddy system, initialize |
*/ |
max_order = fnzb(count); |
z->buddy_system = (buddy_system_t *)&z[1]; |
buddy_system_create(z->buddy_system, max_order, |
&zone_buddy_system_operations, |
(void *) z); |
/* Allocate frames _after_ the conframe */ |
/* Check sizes */ |
z->frames = (frame_t *)((void *)z->buddy_system+buddy_conf_size(max_order)); |
for (i = 0; i<count; i++) { |
frame_initialize(&z->frames[i]); |
} |
/* Stuffing frames */ |
for (i = 0; i < count; i++) { |
z->frames[i].refcount = 0; |
buddy_system_free(z->buddy_system, &z->frames[i].buddy_link); |
} |
} |
/** Compute configuration data size for zone */ |
__address zone_conf_size(count_t count) |
{ |
int size = sizeof(zone_t) + count*sizeof(frame_t); |
int max_order; |
max_order = fnzb(count); |
size += buddy_conf_size(max_order); |
return size; |
} |
/** Create and add zone to system |
* |
* @param confframe Where configuration frame is supposed to be. |
* Always check, that we will not disturb the kernel and possibly init. |
* If confframe is given _outside_ this zone, it is expected, |
* that the area is already marked BUSY and big enough |
* to contain zone_conf_size() amount of data |
* |
* @return Zone number or -1 on error |
*/ |
int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags) |
{ |
zone_t *z; |
__address addr; |
count_t confcount; |
int i; |
int znum; |
/* Theoretically we could have here 0, practically make sure |
* nobody tries to do that. If some platform requires, remove |
* the assert |
*/ |
ASSERT(confframe); |
/* If conframe is supposed to be inside our zone, then make sure |
* it does not span kernel & init |
*/ |
confcount = SIZE2FRAMES(zone_conf_size(count)); |
if (confframe >= start && confframe < start+count) { |
for (;confframe < start + count; confframe++) { |
addr = PFN2ADDR(confframe); |
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.base), config.kernel_size)) |
continue; |
bool overlap = false; |
count_t i; |
for (i = 0; i < init.cnt; i++) |
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(init.tasks[i].addr), init.tasks[i].size)) { |
overlap = true; |
break; |
} |
if (overlap) |
continue; |
break; |
} |
if (confframe >= start + count) |
panic("Cannot find configuration data for zone."); |
} |
z = (zone_t *)PA2KA(PFN2ADDR(confframe)); |
zone_construct(start, count, z, flags); |
znum = zones_add_zone(z); |
if (znum == -1) |
return -1; |
/* If confdata in zone, mark as unavailable */ |
if (confframe >= start && confframe < start+count) |
for (i=confframe; i<confframe+confcount; i++) { |
zone_mark_unavailable(z, i - z->base); |
} |
return znum; |
} |
/***************************************/ |
/* Frame functions */ |
/** Set parent of frame */ |
void frame_set_parent(pfn_t pfn, void *data, int hint) |
{ |
zone_t *zone = find_zone_and_lock(pfn, &hint); |
ASSERT(zone); |
zone_get_frame(zone, pfn-zone->base)->parent = data; |
spinlock_unlock(&zone->lock); |
} |
void * frame_get_parent(pfn_t pfn, int hint) |
{ |
zone_t *zone = find_zone_and_lock(pfn, &hint); |
void *res; |
ASSERT(zone); |
res = zone_get_frame(zone, pfn - zone->base)->parent; |
spinlock_unlock(&zone->lock); |
return res; |
} |
/** Allocate power-of-two frames of physical memory. |
* |
* @param flags Flags for host zone selection and address processing. |
* @param order Allocate exactly 2^order frames. |
* @param pzone Preferred zone |
* |
* @return Allocated frame. |
*/ |
pfn_t frame_alloc_generic(__u8 order, int flags, int * status, int *pzone) |
{ |
ipl_t ipl; |
int freed; |
pfn_t v; |
zone_t *zone; |
loop: |
ipl = interrupts_disable(); |
/* |
* First, find suitable frame zone. |
*/ |
zone = find_free_zone_lock(order,pzone); |
/* If no memory, reclaim some slab memory, |
if it does not help, reclaim all */ |
if (!zone && !(flags & FRAME_NO_RECLAIM)) { |
freed = slab_reclaim(0); |
if (freed) |
zone = find_free_zone_lock(order,pzone); |
if (!zone) { |
freed = slab_reclaim(SLAB_RECLAIM_ALL); |
if (freed) |
zone = find_free_zone_lock(order,pzone); |
} |
} |
if (!zone) { |
if (flags & FRAME_PANIC) |
panic("Can't allocate frame.\n"); |
/* |
* TODO: Sleep until frames are available again. |
*/ |
interrupts_restore(ipl); |
if (flags & FRAME_ATOMIC) { |
ASSERT(status != NULL); |
if (status) |
*status = FRAME_NO_MEMORY; |
return NULL; |
} |
panic("Sleep not implemented.\n"); |
goto loop; |
} |
v = zone_frame_alloc(zone,order); |
v += zone->base; |
spinlock_unlock(&zone->lock); |
interrupts_restore(ipl); |
if (status) |
*status = FRAME_OK; |
return v; |
} |
/** Free a frame. |
* |
* Find respective frame structure for supplied PFN. |
* Decrement frame reference count. |
* If it drops to zero, move the frame structure to free list. |
* |
* @param frame Frame number to be freed. |
*/ |
void frame_free(pfn_t pfn) |
{ |
ipl_t ipl; |
zone_t *zone; |
ipl = interrupts_disable(); |
/* |
* First, find host frame zone for addr. |
*/ |
zone = find_zone_and_lock(pfn,NULL); |
ASSERT(zone); |
zone_frame_free(zone, pfn-zone->base); |
spinlock_unlock(&zone->lock); |
interrupts_restore(ipl); |
} |
/** Add reference to frame. |
* |
* Find respective frame structure for supplied PFN and |
* increment frame reference count. |
* |
* @param frame Frame no to be freed. |
*/ |
void frame_reference_add(pfn_t pfn) |
{ |
ipl_t ipl; |
zone_t *zone; |
frame_t *frame; |
ipl = interrupts_disable(); |
/* |
* First, find host frame zone for addr. |
*/ |
zone = find_zone_and_lock(pfn,NULL); |
ASSERT(zone); |
frame = &zone->frames[pfn-zone->base]; |
frame->refcount++; |
spinlock_unlock(&zone->lock); |
interrupts_restore(ipl); |
} |
/** Mark given range unavailable in frame zones */ |
void frame_mark_unavailable(pfn_t start, count_t count) |
{ |
int i; |
zone_t *zone; |
int prefzone = 0; |
for (i=0; i < count; i++) { |
zone = find_zone_and_lock(start+i,&prefzone); |
if (!zone) /* PFN not found */ |
continue; |
zone_mark_unavailable(zone, start+i-zone->base); |
spinlock_unlock(&zone->lock); |
} |
} |
/** Initialize physical memory management |
* |
* Initialize physical memory managemnt. |
*/ |
void frame_init(void) |
{ |
if (config.cpu_active == 1) { |
zones.count = 0; |
spinlock_initialize(&zones.lock,"zones_glob_lock"); |
} |
/* Tell the architecture to create some memory */ |
frame_arch_init(); |
if (config.cpu_active == 1) { |
pfn_t firstframe = ADDR2PFN(KA2PA(config.base)); |
pfn_t lastframe = ADDR2PFN(KA2PA(config.base+config.kernel_size)); |
frame_mark_unavailable(firstframe,lastframe-firstframe+1); |
count_t i; |
for (i = 0; i < init.cnt; i++) |
frame_mark_unavailable(ADDR2PFN(KA2PA(init.tasks[i].addr)), SIZE2FRAMES(init.tasks[i].size)); |
} |
} |
/** Prints list of zones |
* |
*/ |
void zone_print_list(void) { |
zone_t *zone = NULL; |
int i; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
printf("# Base address\tFree Frames\tBusy Frames\n"); |
printf(" ------------\t-----------\t-----------\n"); |
for (i = 0; i < zones.count; i++) { |
zone = zones.info[i]; |
spinlock_lock(&zone->lock); |
printf("%d: %.*p \t%10zd\t%10zd\n", i, sizeof(__address) * 2, PFN2ADDR(zone->base), zone->free_count, zone->busy_count); |
spinlock_unlock(&zone->lock); |
} |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
} |
/** Prints zone details |
* |
* @param base Zone base address OR zone number |
*/ |
void zone_print_one(int num) { |
zone_t *zone = NULL; |
ipl_t ipl; |
int i; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
for (i = 0; i < zones.count; i++) { |
if (i == num || PFN2ADDR(zones.info[i]->base) == num) { |
zone = zones.info[i]; |
break; |
} |
} |
if (!zone) { |
printf("Zone not found.\n"); |
goto out; |
} |
spinlock_lock(&zone->lock); |
printf("Memory zone information\n"); |
printf("Zone base address: %#.*p\n", sizeof(__address) * 2, PFN2ADDR(zone->base)); |
printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10); |
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10); |
printf("Available space: %zd (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10); |
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE); |
spinlock_unlock(&zone->lock); |
out: |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
} |
//kernel/trunk/generic/src/mm/page.c |
---|
0,0 → 1,136 |
/* |
* Copyright (C) 2001-2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file page.c |
* @brief Virtual Address Translation subsystem. |
* |
* This file contains code for creating, destroying and searching |
* mappings between virtual addresses and physical addresses. |
* Functions here are mere wrappers that call the real implementation. |
* They however, define the single interface. |
*/ |
#include <mm/page.h> |
#include <arch/mm/page.h> |
#include <arch/mm/asid.h> |
#include <mm/as.h> |
#include <mm/frame.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <arch/asm.h> |
#include <memstr.h> |
#include <debug.h> |
#include <arch.h> |
/** Virtual operations for page subsystem. */ |
page_mapping_operations_t *page_mapping_operations = NULL; |
void page_init(void) |
{ |
page_arch_init(); |
} |
/** Map memory structure |
* |
* Identity-map memory structure |
* considering possible crossings |
* of page boundaries. |
* |
* @param s Address of the structure. |
* @param size Size of the structure. |
*/ |
void map_structure(__address s, size_t size) |
{ |
int i, cnt, length; |
length = size + (s - (s & ~(PAGE_SIZE - 1))); |
cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); |
for (i = 0; i < cnt; i++) |
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE); |
} |
/** Insert mapping of page to frame. |
* |
* Map virtual address page to physical address frame |
* using flags. Allocate and setup any missing page tables. |
* |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be mapped. |
* @param frame Physical address of memory frame to which the mapping is done. |
* @param flags Flags to be used for mapping. |
*/ |
void page_mapping_insert(as_t *as, __address page, __address frame, int flags) |
{ |
ASSERT(page_mapping_operations); |
ASSERT(page_mapping_operations->mapping_insert); |
page_mapping_operations->mapping_insert(as, page, frame, flags); |
} |
/** Remove mapping of page. |
* |
* Remove any mapping of page within address space as. |
* TLB shootdown should follow in order to make effects of |
* this call visible. |
* |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be demapped. |
*/ |
void page_mapping_remove(as_t *as, __address page) |
{ |
ASSERT(page_mapping_operations); |
ASSERT(page_mapping_operations->mapping_remove); |
page_mapping_operations->mapping_remove(as, page); |
} |
/** Find mapping for virtual page |
* |
* Find mapping for virtual page. |
* |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual page. |
* |
* @return NULL if there is no such mapping; requested mapping otherwise. |
*/ |
pte_t *page_mapping_find(as_t *as, __address page) |
{ |
ASSERT(page_mapping_operations); |
ASSERT(page_mapping_operations->mapping_find); |
return page_mapping_operations->mapping_find(as, page); |
} |
//kernel/trunk/generic/src/interrupt/interrupt.c |
---|
0,0 → 1,135 |
/* |
* Copyright (C) 2005 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <interrupt.h> |
#include <debug.h> |
#include <console/kconsole.h> |
#include <console/console.h> |
#include <console/chardev.h> |
#include <console/cmd.h> |
#include <panic.h> |
#include <print.h> |
#include <symtab.h> |
static struct { |
const char *name; |
iroutine f; |
} exc_table[IVT_ITEMS]; |
SPINLOCK_INITIALIZE(exctbl_lock); |
/** Register exception handler |
* |
* @param n Exception number |
* @param name Description |
* @param f Exception handler |
*/ |
iroutine exc_register(int n, const char *name, iroutine f) |
{ |
ASSERT(n < IVT_ITEMS); |
iroutine old; |
spinlock_lock(&exctbl_lock); |
old = exc_table[n].f; |
exc_table[n].f = f; |
exc_table[n].name = name; |
spinlock_unlock(&exctbl_lock); |
return old; |
} |
/** Dispatch exception according to exception table |
* |
* Called directly from the assembler code. |
* CPU is interrupts_disable()'d. |
*/ |
void exc_dispatch(int n, istate_t *istate) |
{ |
ASSERT(n < IVT_ITEMS); |
exc_table[n].f(n + IVT_FIRST, istate); |
} |
/** Default 'null' exception handler */ |
static void exc_undef(int n, istate_t *istate) |
{ |
panic("Unhandled exception %d.", n); |
} |
/** kconsole cmd - print all exceptions */ |
static int exc_print_cmd(cmd_arg_t *argv) |
{ |
int i; |
char *symbol; |
spinlock_lock(&exctbl_lock); |
printf("Exc Description Handler\n"); |
for (i=0; i < IVT_ITEMS; i++) { |
symbol = get_symtab_entry((__native)exc_table[i].f); |
if (!symbol) |
symbol = "not found"; |
printf("%d %s %.*p(%s)\n", i + IVT_FIRST, exc_table[i].name, |
sizeof(__address) * 2, exc_table[i].f,symbol); |
if (!((i+1) % 20)) { |
printf("Press any key to continue."); |
spinlock_unlock(&exctbl_lock); |
getc(stdin); |
spinlock_lock(&exctbl_lock); |
printf("\n"); |
} |
} |
spinlock_unlock(&exctbl_lock); |
return 1; |
} |
static cmd_info_t exc_info = { |
.name = "exc", |
.description = "Print exception table.", |
.func = exc_print_cmd, |
.help = NULL, |
.argc = 0, |
.argv = NULL |
}; |
/** Initialize generic exception handling support */ |
void exc_init(void) |
{ |
int i; |
for (i=0;i < IVT_ITEMS; i++) |
exc_register(i, "undef", (iroutine) exc_undef); |
cmd_initialize(&exc_info); |
if (!cmd_register(&exc_info)) |
panic("could not register command %s\n", exc_info.name); |
} |
//kernel/trunk/generic/src/console/cmd.c |
---|
0,0 → 1,698 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* This file is meant to contain all wrapper functions for |
* all kconsole commands. The point is in separating |
* kconsole specific wrappers from kconsole-unaware functions |
* from other subsystems. |
*/ |
#include <console/cmd.h> |
#include <console/kconsole.h> |
#include <print.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <adt/list.h> |
#include <arch.h> |
#include <func.h> |
#include <macros.h> |
#include <debug.h> |
#include <symtab.h> |
#include <cpu.h> |
#include <mm/tlb.h> |
#include <arch/mm/tlb.h> |
#include <mm/frame.h> |
#include <main/version.h> |
#include <mm/slab.h> |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
/** Data and methods for 'help' command. */ |
static int cmd_help(cmd_arg_t *argv); |
static cmd_info_t help_info = { |
.name = "help", |
.description = "List of supported commands.", |
.func = cmd_help, |
.argc = 0 |
}; |
static cmd_info_t exit_info = { |
.name = "exit", |
.description ="Exit kconsole", |
.argc = 0 |
}; |
/** Data and methods for 'description' command. */ |
static int cmd_desc(cmd_arg_t *argv); |
static void desc_help(void); |
static char desc_buf[MAX_CMDLINE+1]; |
static cmd_arg_t desc_argv = { |
.type = ARG_TYPE_STRING, |
.buffer = desc_buf, |
.len = sizeof(desc_buf) |
}; |
static cmd_info_t desc_info = { |
.name = "describe", |
.description = "Describe specified command.", |
.help = desc_help, |
.func = cmd_desc, |
.argc = 1, |
.argv = &desc_argv |
}; |
/** Data and methods for 'symaddr' command. */ |
static int cmd_symaddr(cmd_arg_t *argv); |
static char symaddr_buf[MAX_CMDLINE+1]; |
static cmd_arg_t symaddr_argv = { |
.type = ARG_TYPE_STRING, |
.buffer = symaddr_buf, |
.len = sizeof(symaddr_buf) |
}; |
static cmd_info_t symaddr_info = { |
.name = "symaddr", |
.description = "Return symbol address.", |
.func = cmd_symaddr, |
.argc = 1, |
.argv = &symaddr_argv |
}; |
static char set_buf[MAX_CMDLINE+1]; |
static int cmd_set4(cmd_arg_t *argv); |
static cmd_arg_t set4_argv[] = { |
{ |
.type = ARG_TYPE_STRING, |
.buffer = set_buf, |
.len = sizeof(set_buf) |
}, |
{ |
.type = ARG_TYPE_INT |
} |
}; |
static cmd_info_t set4_info = { |
.name = "set4", |
.description = "set <dest_addr> <value> - 4byte version", |
.func = cmd_set4, |
.argc = 2, |
.argv = set4_argv |
}; |
/** Data and methods for 'call0' command. */ |
static char call0_buf[MAX_CMDLINE+1]; |
static char carg1_buf[MAX_CMDLINE+1]; |
static char carg2_buf[MAX_CMDLINE+1]; |
static char carg3_buf[MAX_CMDLINE+1]; |
static int cmd_call0(cmd_arg_t *argv); |
static cmd_arg_t call0_argv = { |
.type = ARG_TYPE_STRING, |
.buffer = call0_buf, |
.len = sizeof(call0_buf) |
}; |
static cmd_info_t call0_info = { |
.name = "call0", |
.description = "call0 <function> -> call function().", |
.func = cmd_call0, |
.argc = 1, |
.argv = &call0_argv |
}; |
/** Data and methods for 'call1' command. */ |
static int cmd_call1(cmd_arg_t *argv); |
static cmd_arg_t call1_argv[] = { |
{ |
.type = ARG_TYPE_STRING, |
.buffer = call0_buf, |
.len = sizeof(call0_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg1_buf, |
.len = sizeof(carg1_buf) |
} |
}; |
static cmd_info_t call1_info = { |
.name = "call1", |
.description = "call1 <function> <arg1> -> call function(arg1).", |
.func = cmd_call1, |
.argc = 2, |
.argv = call1_argv |
}; |
/** Data and methods for 'call2' command. */ |
static int cmd_call2(cmd_arg_t *argv); |
static cmd_arg_t call2_argv[] = { |
{ |
.type = ARG_TYPE_STRING, |
.buffer = call0_buf, |
.len = sizeof(call0_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg1_buf, |
.len = sizeof(carg1_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg2_buf, |
.len = sizeof(carg2_buf) |
} |
}; |
static cmd_info_t call2_info = { |
.name = "call2", |
.description = "call2 <function> <arg1> <arg2> -> call function(arg1,arg2).", |
.func = cmd_call2, |
.argc = 3, |
.argv = call2_argv |
}; |
/** Data and methods for 'call3' command. */ |
static int cmd_call3(cmd_arg_t *argv); |
static cmd_arg_t call3_argv[] = { |
{ |
.type = ARG_TYPE_STRING, |
.buffer = call0_buf, |
.len = sizeof(call0_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg1_buf, |
.len = sizeof(carg1_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg2_buf, |
.len = sizeof(carg2_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg3_buf, |
.len = sizeof(carg3_buf) |
} |
}; |
static cmd_info_t call3_info = { |
.name = "call3", |
.description = "call3 <function> <arg1> <arg2> <arg3> -> call function(arg1,arg2,arg3).", |
.func = cmd_call3, |
.argc = 4, |
.argv = call3_argv |
}; |
/** Data and methods for 'halt' command. */ |
static int cmd_halt(cmd_arg_t *argv); |
static cmd_info_t halt_info = { |
.name = "halt", |
.description = "Halt the kernel.", |
.func = cmd_halt, |
.argc = 0 |
}; |
/** Data and methods for 'tlb' command. */ |
static int cmd_tlb(cmd_arg_t *argv); |
cmd_info_t tlb_info = { |
.name = "tlb", |
.description = "Print TLB of current processor.", |
.help = NULL, |
.func = cmd_tlb, |
.argc = 0, |
.argv = NULL |
}; |
static int cmd_threads(cmd_arg_t *argv); |
static cmd_info_t threads_info = { |
.name = "threads", |
.description = "List all threads", |
.func = cmd_threads, |
.argc = 0 |
}; |
static int cmd_tasks(cmd_arg_t *argv); |
static cmd_info_t tasks_info = { |
.name = "tasks", |
.description = "List all tasks", |
.func = cmd_tasks, |
.argc = 0 |
}; |
static int cmd_sched(cmd_arg_t *argv); |
static cmd_info_t sched_info = { |
.name = "scheduler", |
.description = "List all scheduler information", |
.func = cmd_sched, |
.argc = 0 |
}; |
static int cmd_slabs(cmd_arg_t *argv); |
static cmd_info_t slabs_info = { |
.name = "slabs", |
.description = "List SLAB caches.", |
.func = cmd_slabs, |
.argc = 0 |
}; |
/** Data and methods for 'zones' command */ |
static int cmd_zones(cmd_arg_t *argv); |
static cmd_info_t zones_info = { |
.name = "zones", |
.description = "List of memory zones.", |
.func = cmd_zones, |
.argc = 0 |
}; |
/** Data and methods for 'zone' command */ |
static int cmd_zone(cmd_arg_t *argv); |
static cmd_arg_t zone_argv = { |
.type = ARG_TYPE_INT, |
}; |
static cmd_info_t zone_info = { |
.name = "zone", |
.description = "Show memory zone structure.", |
.func = cmd_zone, |
.argc = 1, |
.argv = &zone_argv |
}; |
/** Data and methods for 'cpus' command. */ |
static int cmd_cpus(cmd_arg_t *argv); |
cmd_info_t cpus_info = { |
.name = "cpus", |
.description = "List all processors.", |
.help = NULL, |
.func = cmd_cpus, |
.argc = 0, |
.argv = NULL |
}; |
/** Data and methods for 'version' command. */ |
static int cmd_version(cmd_arg_t *argv); |
cmd_info_t version_info = { |
.name = "version", |
.description = "Print version information.", |
.help = NULL, |
.func = cmd_version, |
.argc = 0, |
.argv = NULL |
}; |
static cmd_info_t *basic_commands[] = { |
&call0_info, |
&call1_info, |
&call2_info, |
&call3_info, |
&cpus_info, |
&desc_info, |
&exit_info, |
&halt_info, |
&help_info, |
&set4_info, |
&slabs_info, |
&symaddr_info, |
&sched_info, |
&threads_info, |
&tasks_info, |
&tlb_info, |
&version_info, |
&zones_info, |
&zone_info, |
NULL |
}; |
/** Initialize command info structure. |
* |
* @param cmd Command info structure. |
* |
*/ |
void cmd_initialize(cmd_info_t *cmd) |
{ |
spinlock_initialize(&cmd->lock, "cmd"); |
link_initialize(&cmd->link); |
} |
/** Initialize and register commands. */ |
void cmd_init(void) |
{ |
int i; |
for (i=0;basic_commands[i]; i++) { |
cmd_initialize(basic_commands[i]); |
if (!cmd_register(basic_commands[i])) |
panic("could not register command %s\n", |
basic_commands[i]->name); |
} |
} |
/** List supported commands. |
* |
* @param argv Argument vector. |
* |
* @return 0 on failure, 1 on success. |
*/ |
int cmd_help(cmd_arg_t *argv) |
{ |
link_t *cur; |
spinlock_lock(&cmd_lock); |
for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { |
cmd_info_t *hlp; |
hlp = list_get_instance(cur, cmd_info_t, link); |
spinlock_lock(&hlp->lock); |
printf("%s - %s\n", hlp->name, hlp->description); |
spinlock_unlock(&hlp->lock); |
} |
spinlock_unlock(&cmd_lock); |
return 1; |
} |
/** Describe specified command. |
* |
* @param argv Argument vector. |
* |
* @return 0 on failure, 1 on success. |
*/ |
int cmd_desc(cmd_arg_t *argv) |
{ |
link_t *cur; |
spinlock_lock(&cmd_lock); |
for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { |
cmd_info_t *hlp; |
hlp = list_get_instance(cur, cmd_info_t, link); |
spinlock_lock(&hlp->lock); |
if (strncmp(hlp->name, (const char *) argv->buffer, strlen(hlp->name)) == 0) { |
printf("%s - %s\n", hlp->name, hlp->description); |
if (hlp->help) |
hlp->help(); |
spinlock_unlock(&hlp->lock); |
break; |
} |
spinlock_unlock(&hlp->lock); |
} |
spinlock_unlock(&cmd_lock); |
return 1; |
} |
/** Search symbol table */ |
int cmd_symaddr(cmd_arg_t *argv) |
{ |
symtab_print_search(argv->buffer); |
return 1; |
} |
/** Call function with zero parameters */ |
int cmd_call0(cmd_arg_t *argv) |
{ |
__address symaddr; |
char *symbol; |
__native (*f)(void); |
symaddr = get_symbol_addr(argv->buffer); |
if (!symaddr) |
printf("Symbol %s not found.\n", argv->buffer); |
else if (symaddr == (__address) -1) { |
symtab_print_search(argv->buffer); |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(): %.*p: %s\n", sizeof(__address) * 2, symaddr, symbol); |
f = (__native (*)(void)) symaddr; |
printf("Result: %#zx\n", f()); |
} |
return 1; |
} |
/** Call function with one parameter */ |
int cmd_call1(cmd_arg_t *argv) |
{ |
__address symaddr; |
char *symbol; |
__native (*f)(__native,...); |
__native arg1 = argv[1].intval; |
symaddr = get_symbol_addr(argv->buffer); |
if (!symaddr) |
printf("Symbol %s not found.\n", argv->buffer); |
else if (symaddr == (__address) -1) { |
symtab_print_search(argv->buffer); |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(0x%zX): %.*p: %s\n", arg1, sizeof(__address) * 2, symaddr, symbol); |
f = (__native (*)(__native,...)) symaddr; |
printf("Result: %#zx\n", f(arg1)); |
} |
return 1; |
} |
/** Call function with two parameters */ |
int cmd_call2(cmd_arg_t *argv) |
{ |
__address symaddr; |
char *symbol; |
__native (*f)(__native,__native,...); |
__native arg1 = argv[1].intval; |
__native arg2 = argv[2].intval; |
symaddr = get_symbol_addr(argv->buffer); |
if (!symaddr) |
printf("Symbol %s not found.\n", argv->buffer); |
else if (symaddr == (__address) -1) { |
symtab_print_search(argv->buffer); |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(0x%zx,0x%zx): %.*p: %s\n", |
arg1, arg2, sizeof(__address) * 2, symaddr, symbol); |
f = (__native (*)(__native,__native,...)) symaddr; |
printf("Result: %#zx\n", f(arg1, arg2)); |
} |
return 1; |
} |
/** Call function with three parameters */ |
int cmd_call3(cmd_arg_t *argv) |
{ |
__address symaddr; |
char *symbol; |
__native (*f)(__native,__native,__native,...); |
__native arg1 = argv[1].intval; |
__native arg2 = argv[2].intval; |
__native arg3 = argv[3].intval; |
symaddr = get_symbol_addr(argv->buffer); |
if (!symaddr) |
printf("Symbol %s not found.\n", argv->buffer); |
else if (symaddr == (__address) -1) { |
symtab_print_search(argv->buffer); |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(0x%zx,0x%zx, 0x%zx): %.*p: %s\n", |
arg1, arg2, arg3, sizeof(__address) * 2, symaddr, symbol); |
f = (__native (*)(__native,__native,__native,...)) symaddr; |
printf("Result: %#zx\n", f(arg1, arg2, arg3)); |
} |
return 1; |
} |
/** Print detailed description of 'describe' command. */ |
void desc_help(void) |
{ |
printf("Syntax: describe command_name\n"); |
} |
/** Halt the kernel. |
* |
* @param argv Argument vector (ignored). |
* |
* @return 0 on failure, 1 on success (never returns). |
*/ |
int cmd_halt(cmd_arg_t *argv) |
{ |
halt(); |
return 1; |
} |
/** Command for printing TLB contents. |
* |
* @param argv Not used. |
* |
* @return Always returns 1. |
*/ |
int cmd_tlb(cmd_arg_t *argv) |
{ |
tlb_print(); |
return 1; |
} |
/** Write 4 byte value to address */ |
int cmd_set4(cmd_arg_t *argv) |
{ |
__u32 *addr ; |
__u32 arg1 = argv[1].intval; |
bool pointer = false; |
if (((char *)argv->buffer)[0] == '*') { |
addr = (__u32 *) get_symbol_addr(argv->buffer+1); |
pointer = true; |
} else if (((char *)argv->buffer)[0] >= '0' && |
((char *)argv->buffer)[0] <= '9') |
addr = (__u32 *)atoi((char *)argv->buffer); |
else |
addr = (__u32 *)get_symbol_addr(argv->buffer); |
if (!addr) |
printf("Symbol %s not found.\n", argv->buffer); |
else if (addr == (__u32 *) -1) { |
symtab_print_search(argv->buffer); |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
if (pointer) |
addr = (__u32 *)(*(__native *)addr); |
printf("Writing 0x%x -> %.*p\n", arg1, sizeof(__address) * 2, addr); |
*addr = arg1; |
} |
return 1; |
} |
/** Command for listings SLAB caches |
* |
* @param argv Ignores |
* |
* @return Always 1 |
*/ |
int cmd_slabs(cmd_arg_t * argv) { |
slab_print_list(); |
return 1; |
} |
/** Command for listings Thread information |
* |
* @param argv Ignores |
* |
* @return Always 1 |
*/ |
int cmd_threads(cmd_arg_t * argv) { |
thread_print_list(); |
return 1; |
} |
/** Command for listings Task information |
* |
* @param argv Ignores |
* |
* @return Always 1 |
*/ |
int cmd_tasks(cmd_arg_t * argv) { |
task_print_list(); |
return 1; |
} |
/** Command for listings Thread information |
* |
* @param argv Ignores |
* |
* @return Always 1 |
*/ |
int cmd_sched(cmd_arg_t * argv) { |
sched_print_list(); |
return 1; |
} |
/** Command for listing memory zones |
* |
* @param argv Ignored |
* |
* return Always 1 |
*/ |
int cmd_zones(cmd_arg_t * argv) { |
zone_print_list(); |
return 1; |
} |
/** Command for memory zone details |
* |
* @param argv Integer argument from cmdline expected |
* |
* return Always 1 |
*/ |
int cmd_zone(cmd_arg_t * argv) { |
zone_print_one(argv[0].intval); |
return 1; |
} |
/** Command for listing processors. |
* |
* @param argv Ignored. |
* |
* return Always 1. |
*/ |
int cmd_cpus(cmd_arg_t *argv) |
{ |
cpu_list(); |
return 1; |
} |
/** Command for printing kernel version. |
* |
* @param argv Ignored. |
* |
* return Always 1. |
*/ |
int cmd_version(cmd_arg_t *argv) |
{ |
version_print(); |
return 1; |
} |
//kernel/trunk/generic/src/console/console.c |
---|
0,0 → 1,160 |
/* |
* Copyright (C) 2003 Josef Cejka |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <console/console.h> |
#include <console/chardev.h> |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <arch.h> |
#include <func.h> |
#include <print.h> |
#include <atomic.h> |
#define BUFLEN 2048 |
static char debug_buffer[BUFLEN]; |
static size_t offset = 0; |
/** Initialize stdout to something that does not print, but does not fail |
* |
* Save data in some buffer so that it could be retrieved in the debugger |
*/ |
static void null_putchar(chardev_t *d, const char ch) |
{ |
if (offset >= BUFLEN) |
offset = 0; |
debug_buffer[offset++] = ch; |
} |
static chardev_operations_t null_stdout_ops = { |
.write = null_putchar |
}; |
chardev_t null_stdout = { |
.name = "null", |
.op = &null_stdout_ops |
}; |
/** Standard input character device. */ |
chardev_t *stdin = NULL; |
chardev_t *stdout = &null_stdout; |
/** Get character from character device. Do not echo character. |
* |
* @param chardev Character device. |
* |
* @return Character read. |
*/ |
__u8 _getc(chardev_t *chardev) |
{ |
__u8 ch; |
ipl_t ipl; |
if (atomic_get(&haltstate)) { |
/* If we are here, we are hopefully on the processor, that |
* issued the 'halt' command, so proceed to read the character |
* directly from input |
*/ |
if (chardev->op->read) |
return chardev->op->read(chardev); |
/* no other way of interacting with user, halt */ |
if (CPU) |
printf("cpu%d: ", CPU->id); |
else |
printf("cpu: "); |
printf("halted - no kconsole\n"); |
cpu_halt(); |
} |
waitq_sleep(&chardev->wq); |
ipl = interrupts_disable(); |
spinlock_lock(&chardev->lock); |
ch = chardev->buffer[(chardev->index - chardev->counter) % CHARDEV_BUFLEN]; |
chardev->counter--; |
spinlock_unlock(&chardev->lock); |
interrupts_restore(ipl); |
chardev->op->resume(chardev); |
return ch; |
} |
/** Get string from character device. |
* |
* Read characters from character device until first occurrence |
* of newline character. |
* |
* @param chardev Character device. |
* @param buf Buffer where to store string terminated by '\0'. |
* @param len Size of the buffer. |
* |
* @return Number of characters read. |
*/ |
count_t gets(chardev_t *chardev, char *buf, size_t buflen) |
{ |
index_t index = 0; |
char ch; |
while (index < buflen) { |
ch = _getc(chardev); |
if (ch == '\b') { |
if (index > 0) { |
index--; |
/* Space backspace, space */ |
putchar('\b'); |
putchar(' '); |
putchar('\b'); |
} |
continue; |
} |
putchar(ch); |
if (ch == '\n') { /* end of string => write 0, return */ |
buf[index] = '\0'; |
return (count_t) index; |
} |
buf[index++] = ch; |
} |
return (count_t) index; |
} |
/** Get character from device & echo it to screen */ |
__u8 getc(chardev_t *chardev) |
{ |
__u8 ch; |
ch = _getc(chardev); |
putchar(ch); |
return ch; |
} |
void putchar(char c) |
{ |
if (stdout->op->write) |
stdout->op->write(stdout, c); |
} |
//kernel/trunk/generic/src/console/chardev.c |
---|
0,0 → 1,69 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <console/chardev.h> |
#include <putchar.h> |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
/** Initialize character device. |
* |
* @param chardev Character device. |
* @param op Implementation of character device operations. |
*/ |
void chardev_initialize(char *name,chardev_t *chardev, |
chardev_operations_t *op) |
{ |
chardev->name = name; |
waitq_initialize(&chardev->wq); |
spinlock_initialize(&chardev->lock, "chardev"); |
chardev->counter = 0; |
chardev->index = 0; |
chardev->op = op; |
} |
/** Push character read from input character device. |
* |
* @param chardev Character device. |
* @param ch Character being pushed. |
*/ |
void chardev_push_character(chardev_t *chardev, __u8 ch) |
{ |
spinlock_lock(&chardev->lock); |
chardev->counter++; |
if (chardev->counter == CHARDEV_BUFLEN - 1) { |
/* buffer full => disable device interrupt */ |
chardev->op->suspend(chardev); |
} |
chardev->buffer[chardev->index++] = ch; |
chardev->index = chardev->index % CHARDEV_BUFLEN; /* index modulo size of buffer */ |
waitq_wakeup(&chardev->wq, WAKEUP_FIRST); |
spinlock_unlock(&chardev->lock); |
} |
//kernel/trunk/generic/src/console/kconsole.c |
---|
0,0 → 1,616 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <console/kconsole.h> |
#include <console/console.h> |
#include <console/chardev.h> |
#include <console/cmd.h> |
#include <print.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <adt/list.h> |
#include <arch.h> |
#include <macros.h> |
#include <debug.h> |
#include <func.h> |
#include <symtab.h> |
#include <macros.h> |
/** Simple kernel console. |
* |
* The console is realized by kernel thread kconsole. |
* It doesn't understand any useful command on its own, |
* but makes it possible for other kernel subsystems to |
* register their own commands. |
*/ |
/** Locking. |
* |
* There is a list of cmd_info_t structures. This list |
* is protected by cmd_lock spinlock. Note that specially |
* the link elements of cmd_info_t are protected by |
* this lock. |
* |
* Each cmd_info_t also has its own lock, which protects |
* all elements thereof except the link element. |
* |
* cmd_lock must be acquired before any cmd_info lock. |
* When locking two cmd info structures, structure with |
* lower address must be locked first. |
*/ |
SPINLOCK_INITIALIZE(cmd_lock); /**< Lock protecting command list. */ |
LIST_INITIALIZE(cmd_head); /**< Command list. */ |
static cmd_info_t *parse_cmdline(char *cmdline, size_t len); |
static bool parse_argument(char *cmdline, size_t len, index_t *start, index_t *end); |
static char history[KCONSOLE_HISTORY][MAX_CMDLINE] = {}; |
/** Initialize kconsole data structures. */ |
void kconsole_init(void) |
{ |
int i; |
cmd_init(); |
for (i=0; i<KCONSOLE_HISTORY; i++) |
history[i][0] = '\0'; |
} |
/** Register kconsole command. |
* |
* @param cmd Structure describing the command. |
* |
* @return 0 on failure, 1 on success. |
*/ |
int cmd_register(cmd_info_t *cmd) |
{ |
link_t *cur; |
spinlock_lock(&cmd_lock); |
/* |
* Make sure the command is not already listed. |
*/ |
for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { |
cmd_info_t *hlp; |
hlp = list_get_instance(cur, cmd_info_t, link); |
if (hlp == cmd) { |
/* The command is already there. */ |
spinlock_unlock(&cmd_lock); |
return 0; |
} |
/* Avoid deadlock. */ |
if (hlp < cmd) { |
spinlock_lock(&hlp->lock); |
spinlock_lock(&cmd->lock); |
} else { |
spinlock_lock(&cmd->lock); |
spinlock_lock(&hlp->lock); |
} |
if ((strncmp(hlp->name, |
cmd->name, max(strlen(cmd->name), |
strlen(hlp->name))) == 0)) { |
/* The command is already there. */ |
spinlock_unlock(&hlp->lock); |
spinlock_unlock(&cmd->lock); |
spinlock_unlock(&cmd_lock); |
return 0; |
} |
spinlock_unlock(&hlp->lock); |
spinlock_unlock(&cmd->lock); |
} |
/* |
* Now the command can be added. |
*/ |
list_append(&cmd->link, &cmd_head); |
spinlock_unlock(&cmd_lock); |
return 1; |
} |
/** Print count times a character */ |
static void rdln_print_c(char ch, int count) |
{ |
int i; |
for (i=0;i<count;i++) |
putchar(ch); |
} |
/** Insert character to string */ |
static void insert_char(char *str, char ch, int pos) |
{ |
int i; |
for (i=strlen(str);i > pos; i--) |
str[i] = str[i-1]; |
str[pos] = ch; |
} |
/** Try to find a command beginning with prefix */ |
static const char * cmdtab_search_one(const char *name,link_t **startpos) |
{ |
int namelen = strlen(name); |
const char *curname; |
spinlock_lock(&cmd_lock); |
if (!*startpos) |
*startpos = cmd_head.next; |
for (;*startpos != &cmd_head;*startpos = (*startpos)->next) { |
cmd_info_t *hlp; |
hlp = list_get_instance(*startpos, cmd_info_t, link); |
curname = hlp->name; |
if (strlen(curname) < namelen) |
continue; |
if (strncmp(curname, name, namelen) == 0) { |
spinlock_unlock(&cmd_lock); |
return curname+namelen; |
} |
} |
spinlock_unlock(&cmd_lock); |
return NULL; |
} |
/** Command completion of the commands |
* |
* @param name - string to match, changed to hint on exit |
* @return number of found matches |
*/ |
static int cmdtab_compl(char *name) |
{ |
char output[MAX_SYMBOL_NAME+1]; |
link_t *startpos = NULL; |
const char *foundtxt; |
int found = 0; |
int i; |
output[0] = '\0'; |
while ((foundtxt = cmdtab_search_one(name, &startpos))) { |
startpos = startpos->next; |
if (!found) |
strncpy(output, foundtxt, strlen(foundtxt)+1); |
else { |
for (i=0; output[i] && foundtxt[i] && output[i]==foundtxt[i]; i++) |
; |
output[i] = '\0'; |
} |
found++; |
} |
if (!found) |
return 0; |
if (found > 1 && !strlen(output)) { |
printf("\n"); |
startpos = NULL; |
while ((foundtxt = cmdtab_search_one(name, &startpos))) { |
cmd_info_t *hlp; |
hlp = list_get_instance(startpos, cmd_info_t, link); |
printf("%s - %s\n", hlp->name, hlp->description); |
startpos = startpos->next; |
} |
} |
strncpy(name, output, MAX_SYMBOL_NAME); |
return found; |
} |
static char * clever_readline(const char *prompt, chardev_t *input) |
{ |
static int histposition = 0; |
char tmp[MAX_CMDLINE+1]; |
int curlen = 0, position = 0; |
char *current = history[histposition]; |
int i; |
char mod; /* Command Modifier */ |
char c; |
printf("%s> ", prompt); |
while (1) { |
c = _getc(input); |
if (c == '\n') { |
putchar(c); |
break; |
} if (c == '\b') { /* Backspace */ |
if (position == 0) |
continue; |
for (i=position; i<curlen;i++) |
current[i-1] = current[i]; |
curlen--; |
position--; |
putchar('\b'); |
for (i=position;i<curlen;i++) |
putchar(current[i]); |
putchar(' '); |
rdln_print_c('\b',curlen-position+1); |
continue; |
} |
if (c == '\t') { /* Tabulator */ |
int found; |
/* Move to the end of the word */ |
for (;position<curlen && current[position]!=' ';position++) |
putchar(current[position]); |
/* Copy to tmp last word */ |
for (i=position-1;i >= 0 && current[i]!=' ' ;i--) |
; |
/* If word begins with * or &, skip it */ |
if (tmp[0] == '*' || tmp[0] == '&') |
for (i=1;tmp[i];i++) |
tmp[i-1] = tmp[i]; |
i++; /* I is at the start of the word */ |
strncpy(tmp, current+i, position-i+1); |
if (i==0) { /* Command completion */ |
found = cmdtab_compl(tmp); |
} else { /* Symtab completion */ |
found = symtab_compl(tmp); |
} |
if (found == 0) |
continue; |
for (i=0;tmp[i] && curlen < MAX_CMDLINE;i++,curlen++) |
insert_char(current, tmp[i], i+position); |
if (strlen(tmp) || found==1) { /* If we have a hint */ |
for (i=position;i<curlen;i++) |
putchar(current[i]); |
position += strlen(tmp); |
/* Add space to end */ |
if (found == 1 && position == curlen && \ |
curlen < MAX_CMDLINE) { |
current[position] = ' '; |
curlen++; |
position++; |
putchar(' '); |
} |
} else { /* No hint, table was printed */ |
printf("%s> ", prompt); |
for (i=0; i<curlen;i++) |
putchar(current[i]); |
position += strlen(tmp); |
} |
rdln_print_c('\b', curlen-position); |
continue; |
} |
if (c == 0x1b) { /* Special command */ |
mod = _getc(input); |
c = _getc(input); |
if (mod != 0x5b && mod != 0x4f) |
continue; |
if (c == 0x33 && _getc(input) == 0x7e) { |
/* Delete */ |
if (position == curlen) |
continue; |
for (i=position+1; i<curlen;i++) { |
putchar(current[i]); |
current[i-1] = current[i]; |
} |
putchar(' '); |
rdln_print_c('\b',curlen-position); |
curlen--; |
} |
else if (c == 0x48) { /* Home */ |
rdln_print_c('\b',position); |
position = 0; |
} |
else if (c == 0x46) { /* End */ |
for (i=position;i<curlen;i++) |
putchar(current[i]); |
position = curlen; |
} |
else if (c == 0x44) { /* Left */ |
if (position > 0) { |
putchar('\b'); |
position--; |
} |
continue; |
} |
else if (c == 0x43) { /* Right */ |
if (position < curlen) { |
putchar(current[position]); |
position++; |
} |
continue; |
} |
else if (c == 0x41 || c == 0x42) { |
/* Up,down */ |
rdln_print_c('\b',position); |
rdln_print_c(' ',curlen); |
rdln_print_c('\b',curlen); |
if (c == 0x41) /* Up */ |
histposition--; |
else |
histposition++; |
if (histposition < 0) |
histposition = KCONSOLE_HISTORY -1 ; |
else |
histposition = histposition % KCONSOLE_HISTORY; |
current = history[histposition]; |
printf("%s", current); |
curlen = strlen(current); |
position = curlen; |
continue; |
} |
continue; |
} |
if (curlen >= MAX_CMDLINE) |
continue; |
insert_char(current, c, position); |
curlen++; |
for (i=position;i<curlen;i++) |
putchar(current[i]); |
position++; |
rdln_print_c('\b',curlen-position); |
} |
if (curlen) { |
histposition++; |
histposition = histposition % KCONSOLE_HISTORY; |
} |
current[curlen] = '\0'; |
return current; |
} |
/** Kernel console managing thread. |
* |
* @param arg Not used. |
*/ |
void kconsole(void *prompt) |
{ |
cmd_info_t *cmd_info; |
count_t len; |
char *cmdline; |
if (!stdin) { |
printf("%s: no stdin\n", __FUNCTION__); |
return; |
} |
while (true) { |
cmdline = clever_readline(prompt, stdin); |
len = strlen(cmdline); |
if (!len) |
continue; |
cmd_info = parse_cmdline(cmdline, len); |
if (!cmd_info) |
continue; |
if (strncmp(cmd_info->name,"exit", \ |
min(strlen(cmd_info->name),5)) == 0) |
break; |
(void) cmd_info->func(cmd_info->argv); |
} |
} |
static int parse_int_arg(char *text, size_t len, __native *result) |
{ |
char symname[MAX_SYMBOL_NAME]; |
__address symaddr; |
bool isaddr = false; |
bool isptr = false; |
/* If we get a name, try to find it in symbol table */ |
if (text[0] == '&') { |
isaddr = true; |
text++;len--; |
} else if (text[0] == '*') { |
isptr = true; |
text++;len--; |
} |
if (text[0] < '0' || text[0] > '9') { |
strncpy(symname, text, min(len+1, MAX_SYMBOL_NAME)); |
symaddr = get_symbol_addr(symname); |
if (!symaddr) { |
printf("Symbol %s not found.\n",symname); |
return -1; |
} |
if (symaddr == (__address) -1) { |
printf("Duplicate symbol %s.\n",symname); |
symtab_print_search(symname); |
return -1; |
} |
if (isaddr) |
*result = (__native)symaddr; |
else if (isptr) |
*result = **((__native **)symaddr); |
else |
*result = *((__native *)symaddr); |
} else { /* It's a number - convert it */ |
*result = atoi(text); |
if (isptr) |
*result = *((__native *)*result); |
} |
return 0; |
} |
/** Parse command line. |
* |
* @param cmdline Command line as read from input device. |
* @param len Command line length. |
* |
* @return Structure describing the command. |
*/ |
cmd_info_t *parse_cmdline(char *cmdline, size_t len) |
{ |
index_t start = 0, end = 0; |
cmd_info_t *cmd = NULL; |
link_t *cur; |
int i; |
int error = 0; |
if (!parse_argument(cmdline, len, &start, &end)) { |
/* Command line did not contain alphanumeric word. */ |
return NULL; |
} |
spinlock_lock(&cmd_lock); |
for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { |
cmd_info_t *hlp; |
hlp = list_get_instance(cur, cmd_info_t, link); |
spinlock_lock(&hlp->lock); |
if (strncmp(hlp->name, &cmdline[start], max(strlen(hlp->name), |
end-start+1)) == 0) { |
cmd = hlp; |
break; |
} |
spinlock_unlock(&hlp->lock); |
} |
spinlock_unlock(&cmd_lock); |
if (!cmd) { |
/* Unknown command. */ |
printf("Unknown command.\n"); |
return NULL; |
} |
/* cmd == hlp is locked */ |
/* |
* The command line must be further analyzed and |
* the parameters therefrom must be matched and |
* converted to those specified in the cmd info |
* structure. |
*/ |
for (i = 0; i < cmd->argc; i++) { |
char *buf; |
start = end + 1; |
if (!parse_argument(cmdline, len, &start, &end)) { |
printf("Too few arguments.\n"); |
spinlock_unlock(&cmd->lock); |
return NULL; |
} |
error = 0; |
switch (cmd->argv[i].type) { |
case ARG_TYPE_STRING: |
buf = cmd->argv[i].buffer; |
strncpy(buf, (const char *) &cmdline[start], min((end - start) + 2, cmd->argv[i].len)); |
buf[min((end - start) + 1, cmd->argv[i].len - 1)] = '\0'; |
break; |
case ARG_TYPE_INT: |
if (parse_int_arg(cmdline+start, end-start+1, |
&cmd->argv[i].intval)) |
error = 1; |
break; |
case ARG_TYPE_VAR: |
if (start != end && cmdline[start] == '"' && cmdline[end] == '"') { |
buf = cmd->argv[i].buffer; |
strncpy(buf, (const char *) &cmdline[start+1], |
min((end-start), cmd->argv[i].len)); |
buf[min((end - start), cmd->argv[i].len - 1)] = '\0'; |
cmd->argv[i].intval = (__native) buf; |
cmd->argv[i].vartype = ARG_TYPE_STRING; |
} else if (!parse_int_arg(cmdline+start, end-start+1, |
&cmd->argv[i].intval)) |
cmd->argv[i].vartype = ARG_TYPE_INT; |
else { |
printf("Unrecognized variable argument.\n"); |
error = 1; |
} |
break; |
case ARG_TYPE_INVALID: |
default: |
printf("invalid argument type\n"); |
error = 1; |
break; |
} |
} |
if (error) { |
spinlock_unlock(&cmd->lock); |
return NULL; |
} |
start = end + 1; |
if (parse_argument(cmdline, len, &start, &end)) { |
printf("Too many arguments.\n"); |
spinlock_unlock(&cmd->lock); |
return NULL; |
} |
spinlock_unlock(&cmd->lock); |
return cmd; |
} |
/** Parse argument. |
* |
* Find start and end positions of command line argument. |
* |
* @param cmdline Command line as read from the input device. |
* @param len Number of characters in cmdline. |
* @param start On entry, 'start' contains pointer to the index |
* of first unprocessed character of cmdline. |
* On successful exit, it marks beginning of the next argument. |
* @param end Undefined on entry. On exit, 'end' points to the last character |
* of the next argument. |
* |
* @return false on failure, true on success. |
*/ |
bool parse_argument(char *cmdline, size_t len, index_t *start, index_t *end) |
{ |
int i; |
bool found_start = false; |
ASSERT(start != NULL); |
ASSERT(end != NULL); |
for (i = *start; i < len; i++) { |
if (!found_start) { |
if (is_white(cmdline[i])) |
(*start)++; |
else |
found_start = true; |
} else { |
if (is_white(cmdline[i])) |
break; |
} |
} |
*end = i - 1; |
return found_start; |
} |
//kernel/trunk/generic/src/time/timeout.c |
---|
0,0 → 1,206 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <time/timeout.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <config.h> |
#include <panic.h> |
#include <synch/spinlock.h> |
#include <func.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <arch.h> |
/** Initialize timeouts |
* |
* Initialize kernel timeouts. |
* |
*/ |
void timeout_init(void) |
{ |
spinlock_initialize(&CPU->timeoutlock, "timeout_lock"); |
list_initialize(&CPU->timeout_active_head); |
} |
/** Reinitialize timeout |
* |
* Initialize all members except the lock. |
* |
* @param t Timeout to be initialized. |
* |
*/ |
void timeout_reinitialize(timeout_t *t) |
{ |
t->cpu = NULL; |
t->ticks = 0; |
t->handler = NULL; |
t->arg = NULL; |
link_initialize(&t->link); |
} |
/** Initialize timeout |
* |
* Initialize all members including the lock. |
* |
* @param t Timeout to be initialized. |
* |
*/ |
void timeout_initialize(timeout_t *t) |
{ |
spinlock_initialize(&t->lock, "timeout_t_lock"); |
timeout_reinitialize(t); |
} |
/** Register timeout |
* |
* Insert timeout handler f (with argument arg) |
* to timeout list and make it execute in |
* time microseconds (or slightly more). |
* |
* @param t Timeout structure. |
* @param time Number of usec in the future to execute |
* the handler. |
* @param f Timeout handler function. |
* @param arg Timeout handler argument. |
* |
*/ |
void timeout_register(timeout_t *t, __u64 time, timeout_handler_t f, void *arg) |
{ |
timeout_t *hlp = NULL; |
link_t *l, *m; |
ipl_t ipl; |
__u64 sum; |
ipl = interrupts_disable(); |
spinlock_lock(&CPU->timeoutlock); |
spinlock_lock(&t->lock); |
if (t->cpu) |
panic("t->cpu != 0"); |
t->cpu = CPU; |
t->ticks = us2ticks(time); |
t->handler = f; |
t->arg = arg; |
/* |
* Insert t into the active timeouts list according to t->ticks. |
*/ |
sum = 0; |
l = CPU->timeout_active_head.next; |
while (l != &CPU->timeout_active_head) { |
hlp = list_get_instance(l, timeout_t, link); |
spinlock_lock(&hlp->lock); |
if (t->ticks < sum + hlp->ticks) { |
spinlock_unlock(&hlp->lock); |
break; |
} |
sum += hlp->ticks; |
spinlock_unlock(&hlp->lock); |
l = l->next; |
} |
m = l->prev; |
list_prepend(&t->link, m); /* avoid using l->prev */ |
/* |
* Adjust t->ticks according to ticks accumulated in h's predecessors. |
*/ |
t->ticks -= sum; |
/* |
* Decrease ticks of t's immediate succesor by t->ticks. |
*/ |
if (l != &CPU->timeout_active_head) { |
spinlock_lock(&hlp->lock); |
hlp->ticks -= t->ticks; |
spinlock_unlock(&hlp->lock); |
} |
spinlock_unlock(&t->lock); |
spinlock_unlock(&CPU->timeoutlock); |
interrupts_restore(ipl); |
} |
/** Unregister timeout |
* |
* Remove timeout from timeout list. |
* |
* @param t Timeout to unregister. |
* |
* @return true on success, false on failure. |
*/ |
bool timeout_unregister(timeout_t *t) |
{ |
timeout_t *hlp; |
link_t *l; |
ipl_t ipl; |
grab_locks: |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
if (!t->cpu) { |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return false; |
} |
if (!spinlock_trylock(&t->cpu->timeoutlock)) { |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
goto grab_locks; |
} |
/* |
* Now we know for sure that t hasn't been activated yet |
* and is lurking in t->cpu->timeout_active_head queue. |
*/ |
l = t->link.next; |
if (l != &t->cpu->timeout_active_head) { |
hlp = list_get_instance(l, timeout_t, link); |
spinlock_lock(&hlp->lock); |
hlp->ticks += t->ticks; |
spinlock_unlock(&hlp->lock); |
} |
list_remove(&t->link); |
spinlock_unlock(&t->cpu->timeoutlock); |
timeout_reinitialize(t); |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return true; |
} |
//kernel/trunk/generic/src/time/clock.c |
---|
0,0 → 1,103 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <time/clock.h> |
#include <time/timeout.h> |
#include <arch/types.h> |
#include <config.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <func.h> |
#include <proc/scheduler.h> |
#include <cpu.h> |
#include <arch.h> |
#include <adt/list.h> |
#include <atomic.h> |
#include <proc/thread.h> |
/** Clock routine |
* |
* Clock routine executed from clock interrupt handler |
* (assuming interrupts_disable()'d). Runs expired timeouts |
* and preemptive scheduling. |
* |
*/ |
void clock(void) |
{ |
link_t *l; |
timeout_t *h; |
timeout_handler_t f; |
void *arg; |
/* |
* To avoid lock ordering problems, |
* run all expired timeouts as you visit them. |
*/ |
spinlock_lock(&CPU->timeoutlock); |
while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
h = list_get_instance(l, timeout_t, link); |
spinlock_lock(&h->lock); |
if (h->ticks-- != 0) { |
spinlock_unlock(&h->lock); |
break; |
} |
list_remove(l); |
f = h->handler; |
arg = h->arg; |
timeout_reinitialize(h); |
spinlock_unlock(&h->lock); |
spinlock_unlock(&CPU->timeoutlock); |
f(arg); |
spinlock_lock(&CPU->timeoutlock); |
} |
spinlock_unlock(&CPU->timeoutlock); |
/* |
* Do CPU usage accounting and find out whether to preempt THREAD. |
*/ |
if (THREAD) { |
__u64 ticks; |
spinlock_lock(&CPU->lock); |
CPU->needs_relink++; |
spinlock_unlock(&CPU->lock); |
spinlock_lock(&THREAD->lock); |
if ((ticks = THREAD->ticks)) |
THREAD->ticks--; |
spinlock_unlock(&THREAD->lock); |
if (!ticks && !PREEMPTION_DISABLED) { |
scheduler(); |
} |
} |
} |
//kernel/trunk/generic/src/time/delay.c |
---|
0,0 → 1,56 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <time/delay.h> |
#include <arch/types.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <arch.h> |
/** Active delay |
* |
* Delay the execution for the given number |
* of microseconds (or slightly more). The delay |
* is implemented as CPU calibrated active loop. |
* |
* @param usec Number of microseconds to sleep. |
*/ |
void delay(__u32 usec) |
{ |
ipl_t ipl; |
/* |
* The delay loop is calibrated for each and every |
* CPU in the system. Therefore it is necessary to |
* call interrupts_disable() before calling the |
* asm_delay_loop(). |
*/ |
ipl = interrupts_disable(); |
asm_delay_loop(usec * CPU->delay_loop_const); |
interrupts_restore(ipl); |
} |
//kernel/trunk/generic/src/security/cap.c |
---|
0,0 → 1,72 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <security/cap.h> |
#include <proc/task.h> |
#include <synch/spinlock.h> |
#include <arch.h> |
#include <typedefs.h> |
/** Set capabilities. |
* |
* @param t Task whose capabilities are to be changed. |
* @param caps New set of capabilities. |
*/ |
void cap_set(task_t *t, cap_t caps) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
t->capabilities = caps; |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
} |
/** Get capabilities. |
* |
* @param t Task whose capabilities are to be returned. |
* @return Task's capabilities. |
*/ |
cap_t cap_get(task_t *t) |
{ |
ipl_t ipl; |
cap_t caps; |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
caps = t->capabilities; |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return caps; |
} |
//kernel/trunk/generic/src/cpu/cpu.c |
---|
0,0 → 1,101 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <cpu.h> |
#include <arch.h> |
#include <arch/cpu.h> |
#include <mm/slab.h> |
#include <mm/page.h> |
#include <mm/frame.h> |
#include <arch/types.h> |
#include <config.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <memstr.h> |
#include <adt/list.h> |
#include <print.h> |
cpu_t *cpus; |
/** Initialize CPUs |
* |
* Initialize kernel CPUs support. |
* |
*/ |
void cpu_init(void) { |
int i, j; |
#ifdef CONFIG_SMP |
if (config.cpu_active == 1) { |
#endif /* CONFIG_SMP */ |
cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count, |
FRAME_ATOMIC); |
if (!cpus) |
panic("malloc/cpus"); |
/* initialize everything */ |
memsetb((__address) cpus, sizeof(cpu_t) * config.cpu_count, 0); |
for (i=0; i < config.cpu_count; i++) { |
cpus[i].stack = (__u8 *) PA2KA(PFN2ADDR(frame_alloc(STACK_FRAMES, FRAME_KA | FRAME_PANIC))); |
cpus[i].id = i; |
spinlock_initialize(&cpus[i].lock, "cpu_t.lock"); |
for (j = 0; j < RQ_COUNT; j++) { |
spinlock_initialize(&cpus[i].rq[j].lock, "rq_t.lock"); |
list_initialize(&cpus[i].rq[j].rq_head); |
} |
} |
#ifdef CONFIG_SMP |
} |
#endif /* CONFIG_SMP */ |
CPU = &cpus[config.cpu_active-1]; |
CPU->active = 1; |
CPU->tlb_active = 1; |
cpu_identify(); |
cpu_arch_init(); |
} |
/** List all processors. */ |
void cpu_list(void) |
{ |
int i; |
for (i = 0; i < config.cpu_count; i++) { |
if (cpus[i].active) |
cpu_print_report(&cpus[i]); |
else |
printf("cpu%d: not active\n", i); |
} |
} |
//kernel/trunk/generic/src/smp/ipi.c |
---|
0,0 → 1,54 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#ifdef CONFIG_SMP |
#include <smp/ipi.h> |
#include <config.h> |
/** Broadcast IPI message |
* |
* Broadcast IPI message to all CPUs. |
* |
* @param ipi Message to broadcast. |
* |
*/ |
void ipi_broadcast(int ipi) |
{ |
/* |
* Provisions must be made to avoid sending IPI: |
* - before all CPU's were configured to accept the IPI |
* - if there is only one CPU but the kernel was compiled with CONFIG_SMP |
*/ |
if ((config.cpu_active > 1) && (config.cpu_active == config.cpu_count)) |
ipi_broadcast_arch(ipi); |
} |
#endif /* CONFIG_SMP */ |
//kernel/trunk/generic/src/preempt/preemption.c |
---|
0,0 → 1,48 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <preemption.h> |
#include <arch.h> |
#include <arch/asm.h> |
#include <arch/barrier.h> |
#include <debug.h> |
/** Increment preemption disabled counter. */ |
void preemption_disable(void) |
{ |
THE->preemption_disabled++; |
memory_barrier(); |
} |
/** Decrement preemption disabled counter. */ |
void preemption_enable(void) |
{ |
ASSERT(THE->preemption_disabled); |
memory_barrier(); |
THE->preemption_disabled--; |
} |