Subversion Repositories HelenOS-historic

Rev

Rev 277 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <arch/pm.h>
  30. #include <arch/mm/page.h>
  31. #include <arch/types.h>
  32. #include <arch/interrupt.h>
  33. #include <arch/asm.h>
  34.  
  35. #include <config.h>
  36.  
  37. #include <memstr.h>
  38. #include <mm/heap.h>
  39. #include <debug.h>
  40.  
  41. /*
  42.  * There is no segmentation in long mode so we set up flat mode. In this
  43.  * mode, we use, for each privilege level, two segments spanning the
  44.  * whole memory. One is for code and one is for data.
  45.  */
  46.  
  47. struct descriptor gdt[GDT_ITEMS] = {
  48.     /* NULL descriptor */
  49.     { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
  50.     /* KTEXT descriptor */
  51.     { .limit_0_15  = 0xffff,
  52.       .base_0_15   = 0,
  53.       .base_16_23  = 0,
  54.       .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE ,
  55.       .limit_16_19 = 0xf,
  56.       .available   = 0,
  57.       .longmode    = 1,
  58.       .special     = 0,
  59.       .granularity = 1,
  60.       .base_24_31  = 0 },
  61.     /* KDATA descriptor */
  62.     { .limit_0_15  = 0xffff,
  63.       .base_0_15   = 0,
  64.       .base_16_23  = 0,
  65.       .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL,
  66.       .limit_16_19 = 0xf,
  67.       .available   = 0,
  68.       .longmode    = 0,
  69.       .special     = 0,
  70.       .granularity = 1,
  71.       .base_24_31  = 0 },
  72.     /* UTEXT descriptor */
  73.     { .limit_0_15  = 0xffff,
  74.       .base_0_15   = 0,
  75.       .base_16_23  = 0,
  76.       .access      = AR_PRESENT | AR_CODE | DPL_USER,
  77.       .limit_16_19 = 0xf,
  78.       .available   = 0,
  79.       .longmode    = 1,
  80.       .special     = 0,
  81.       .granularity = 1,
  82.       .base_24_31  = 0 },
  83.     /* UDATA descriptor */
  84.     { .limit_0_15  = 0xffff,
  85.       .base_0_15   = 0,
  86.       .base_16_23  = 0,
  87.       .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER,
  88.       .limit_16_19 = 0xf,
  89.       .available   = 0,
  90.       .longmode    = 0,
  91.       .special     = 1,
  92.       .granularity = 1,
  93.       .base_24_31  = 0 },
  94.     /* KTEXT 32-bit protected, for protected mode before long mode */
  95.     { .limit_0_15  = 0xffff,
  96.       .base_0_15   = 0,
  97.       .base_16_23  = 0,
  98.       .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE,
  99.       .limit_16_19 = 0xf,
  100.       .available   = 0,
  101.       .longmode    = 0,
  102.       .special     = 1,
  103.       .granularity = 1,
  104.       .base_24_31  = 0 },
  105.     /* TSS descriptor - set up will be completed later,
  106.      * on AMD64 it is 64-bit - 2 items in table */
  107.     { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
  108.     { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
  109. };
  110.  
  111. struct idescriptor idt[IDT_ITEMS];
  112.  
  113. struct ptr_16_64 gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };
  114. struct ptr_16_64 idtr = {.limit = sizeof(idt), .base= (__u64) idt };
  115.  
  116. static struct tss tss;
  117. struct tss *tss_p = NULL;
  118.  
  119. /* TODO: Does not compile correctly if it does not exist ???? */
  120. int __attribute__ ((section ("K_DATA_START"))) __fake;
  121.  
  122. void gdt_tss_setbase(struct descriptor *d, __address base)
  123. {
  124.     struct tss_descriptor *td = (struct tss_descriptor *) d;
  125.  
  126.     td->base_0_15 = base & 0xffff;
  127.     td->base_16_23 = ((base) >> 16) & 0xff;
  128.     td->base_24_31 = ((base) >> 24) & 0xff;
  129.     td->base_32_63 = ((base) >> 32);
  130. }
  131.  
  132. void gdt_tss_setlimit(struct descriptor *d, __u32 limit)
  133. {
  134.     struct tss_descriptor *td = (struct tss_descriptor *) d;
  135.  
  136.     td->limit_0_15 = limit & 0xffff;
  137.     td->limit_16_19 = (limit >> 16) & 0xf;
  138. }
  139.  
  140. void idt_setoffset(struct idescriptor *d, __address offset)
  141. {
  142.     /*
  143.      * Offset is a linear address.
  144.      */
  145.     d->offset_0_15 = offset & 0xffff;
  146.     d->offset_16_31 = offset >> 16 & 0xffff;
  147.     d->offset_32_63 = offset >> 32;
  148. }
  149.  
  150. void tss_initialize(struct tss *t)
  151. {
  152.     memsetb((__address) t, sizeof(struct tss), 0);
  153. }
  154.  
  155. /*
  156.  * This function takes care of proper setup of IDT and IDTR.
  157.  */
  158. void idt_init(void)
  159. {
  160.     struct idescriptor *d;
  161.     int i;
  162.  
  163.     for (i = 0; i < IDT_ITEMS; i++) {
  164.         d = &idt[i];
  165.  
  166.         d->unused = 0;
  167.         d->selector = gdtselector(KTEXT_DES);
  168.  
  169.         d->present = 1;
  170.         d->type = AR_INTERRUPT; /* masking interrupt */
  171.  
  172.         if (i == VECTOR_SYSCALL) {
  173.             /*
  174.              * The syscall interrupt gate must be calleable from userland.
  175.              */
  176.             d->dpl |= PL_USER;
  177.         }
  178.        
  179.         idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
  180.         trap_register(i, null_interrupt);
  181.     }
  182.     trap_register(13, gp_fault);
  183.     trap_register( 7, nm_fault);
  184.     trap_register(12, ss_fault);   
  185. }
  186.  
  187.  
  188. /* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
  189. static void clean_IOPL_NT_flags(void)
  190. {
  191.     asm
  192.     (
  193.         "pushfq;"
  194.         "pop %%rax;"
  195.         "and $~(0x7000),%%rax;"
  196.         "pushq %%rax;"
  197.         "popfq;"
  198.         :
  199.         :
  200.         :"%rax"
  201.     );
  202. }
  203.  
  204. /* Clean AM(18) flag in CR0 register */
  205. static void clean_AM_flag(void)
  206. {
  207.     asm
  208.     (
  209.         "mov %%cr0,%%rax;"
  210.         "and $~(0x40000),%%rax;"
  211.         "mov %%rax,%%cr0;"
  212.         :
  213.         :
  214.         :"%rax"
  215.     );
  216. }
  217.  
  218. void pm_init(void)
  219. {
  220.     struct descriptor *gdt_p = (struct descriptor *) gdtr.base;
  221.     struct tss_descriptor *tss_desc;
  222.  
  223.     /*
  224.      * Each CPU has its private GDT and TSS.
  225.      * All CPUs share one IDT.
  226.      */
  227.  
  228.     if (config.cpu_active == 1) {
  229.         idt_init();
  230.         /*
  231.          * NOTE: bootstrap CPU has statically allocated TSS, because
  232.          * the heap hasn't been initialized so far.
  233.          */
  234.         tss_p = &tss;
  235.     }
  236.     else {
  237.         tss_p = (struct tss *) malloc(sizeof(struct tss));
  238.         if (!tss_p)
  239.             panic("could not allocate TSS\n");
  240.     }
  241.  
  242.     tss_initialize(tss_p);
  243.  
  244.     tss_desc = (struct tss_descriptor *) (&gdt_p[TSS_DES]);
  245.     tss_desc->present = 1;
  246.     tss_desc->type = AR_TSS;
  247.     tss_desc->dpl = PL_KERNEL;
  248.    
  249.     gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p);
  250.     gdt_tss_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1);
  251.  
  252.     __asm__("lgdt %0" : : "m"(gdtr));
  253.     __asm__("lidt %0" : : "m"(idtr));
  254.     /*
  255.      * As of this moment, the current CPU has its own GDT pointing
  256.      * to its own TSS. We just need to load the TR register.
  257.      */
  258.     __asm__("ltr %0" : : "r" ((__u16) gdtselector(TSS_DES)));
  259.    
  260.     clean_IOPL_NT_flags();    /* Disable I/O on nonprivileged levels */
  261.     clean_AM_flag();          /* Disable alignment check */
  262. }
  263.