diff options
author | Theo de Raadt <deraadt@cvs.openbsd.org> | 2017-05-31 19:18:20 +0000 |
---|---|---|
committer | Theo de Raadt <deraadt@cvs.openbsd.org> | 2017-05-31 19:18:20 +0000 |
commit | c4cda060d41613faf55d1850cf44830fd702efe9 (patch) | |
tree | c863e437ff6ed15b3e3496eafcba9dd7f2e8a42a | |
parent | 8b03cf129e5e2d5a745565e8568bd6f752b55f19 (diff) |
Split early startup code out of locore.S into locore0.S. Adjust link
run so that this locore0.o is always at the start of the executable.
But randomize the link order of all other .o files in the kernel, so
that their exec/rodata/data/bss segments land all over the place.
Late during kernel boot, unmap the early startup code.
As a result, the internal layout of every newly build bsd kernel is
different from past kernels. Internal relative offsets are not known
to an outside attacker. The only known offsets are in the startup code,
which has been unmapped.
Ramdisk kernels cannot be compiled like this, because they are gzip'd.
When the internal pointer references change, the compression dictionary
bloats and results in poorer compression.
ok kettenis mlarkin visa, also thanks to tedu for getting me back to this
-rw-r--r-- | sys/arch/amd64/amd64/autoconf.c | 17 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/locore.S | 531 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/locore0.S | 672 | ||||
-rw-r--r-- | sys/arch/amd64/conf/Makefile.amd64 | 25 | ||||
-rw-r--r-- | sys/arch/amd64/conf/files.amd64 | 3 | ||||
-rw-r--r-- | sys/arch/amd64/conf/ld.script | 4 | ||||
-rw-r--r-- | sys/arch/i386/conf/Makefile.i386 | 27 | ||||
-rw-r--r-- | sys/arch/i386/conf/files.i386 | 3 | ||||
-rw-r--r-- | sys/arch/i386/conf/ld.script | 4 | ||||
-rw-r--r-- | sys/arch/i386/i386/autoconf.c | 17 | ||||
-rw-r--r-- | sys/arch/i386/i386/locore.s | 412 | ||||
-rw-r--r-- | sys/arch/i386/i386/locore0.S | 482 | ||||
-rw-r--r-- | sys/conf/makegap.sh | 32 |
13 files changed, 1266 insertions, 963 deletions
diff --git a/sys/arch/amd64/amd64/autoconf.c b/sys/arch/amd64/amd64/autoconf.c index c75907afce5..676c5d962ff 100644 --- a/sys/arch/amd64/amd64/autoconf.c +++ b/sys/arch/amd64/amd64/autoconf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: autoconf.c,v 1.47 2016/06/08 17:24:44 tedu Exp $ */ +/* $OpenBSD: autoconf.c,v 1.48 2017/05/31 19:18:18 deraadt Exp $ */ /* $NetBSD: autoconf.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */ /*- @@ -59,6 +59,7 @@ #include <sys/socketvar.h> #include <sys/timeout.h> #include <sys/hibernate.h> +#include <uvm/uvm.h> #include <net/if.h> #include <net/if_types.h> @@ -105,6 +106,18 @@ void aesni_setup(void); extern int amd64_has_aesni; #endif +void +unmap_startup(void) +{ + extern void *kernel_text, *endboot; + vaddr_t p = (vaddr_t)&kernel_text; + + do { + pmap_kremove(p, PAGE_SIZE); + p += PAGE_SIZE; + } while (p < (vaddr_t)&endboot); +} + /* * Determine i/o configuration for a machine. */ @@ -123,6 +136,8 @@ cpu_configure(void) ioapic_enable(); #endif + unmap_startup(); + #ifdef MULTIPROCESSOR cpu_init_idle_pcbs(); #endif diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S index c617d17b467..c8a4b133d9d 100644 --- a/sys/arch/amd64/amd64/locore.S +++ b/sys/arch/amd64/amd64/locore.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.S,v 1.84 2017/02/06 09:15:51 mpi Exp $ */ +/* $OpenBSD: locore.S,v 1.85 2017/05/31 19:18:18 deraadt Exp $ */ /* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */ /* @@ -140,11 +140,6 @@ #define SET_CURPCB(reg) movq reg,CPUVAR(CURPCB) -/* XXX temporary kluge; these should not be here */ -/* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */ -#include <dev/isa/isareg.h> - - /* * Initialization */ @@ -230,524 +225,11 @@ gdt64_start: .quad 0x00cf92000000ffff /* kernel DS */ gdt64_end: -farjmp64: - .long longmode-KERNBASE - .word GSEL(GCODE_SEL, SEL_KPL) - - .space 512 -tmpstk: - - .globl _C_LABEL(cpu_private) - .comm _C_LABEL(cpu_private),NBPG,NBPG - /* * Some hackage to deal with 64bit symbols in 32 bit mode. * This may not be needed if things are cleaned up a little. */ - .text - .globl _C_LABEL(kernel_text) - .set _C_LABEL(kernel_text),KERNTEXTOFF - - .code32 - - .globl start -start: movw $0x1234,0x472 # warm boot - - /* - * Load parameters from stack - * (howto, bootdev, bootapiver, esym, extmem, cnvmem, ac, av) - */ - movl 4(%esp),%eax - movl %eax, RELOC(boothowto) - movl 8(%esp),%eax - movl %eax, RELOC(bootdev) - - /* - * Syms are placed after last load and bss of the kernel. - * XXX Boot ignores 2MB roundup of _end, so esyms can be < _end. - */ - movl 16(%esp), %eax - testl %eax,%eax - jz 1f - addl $KERNBASE_LO,%eax - movl $RELOC(esym),%ebp - movl %eax,(%ebp) - movl $KERNBASE_HI,4(%ebp) -1: - movl 20(%esp), %eax - movl %eax, RELOC(biosextmem) - movl 24(%esp), %eax - movl %eax, RELOC(biosbasemem) - - movl 12(%esp), %eax - movl %eax, RELOC(bootapiver) - - /* - * Copy the boot arguments to bootinfo[] in machdep.c. - * - * We are passed the size of the data /boot passed to us in - * 28(%esp). We copy up to bootinfo_size bytes of data into - * bootinfo and report back how much we copied in bootinfo_size. - * - * machdep.c can then take action if bootinfo_size >= bootinfo[] - * (which would meant that we may have been passed too much data). - */ - movl 28(%esp), %eax - movl %eax, %ecx - cmpl RELOC(bootinfo_size), %ecx /* Too much? */ - jb bi_size_ok - movl RELOC(bootinfo_size), %ecx /* Only copy this much */ -bi_size_ok: - movl %eax, RELOC(bootinfo_size) /* Report full amount */ - - movl $RELOC(bootinfo), %edi /* Destination */ - movl 32(%esp), %esi /* Source */ - rep movsb /* Copy this many bytes */ - - /* First, reset the PSL. */ - pushl $PSL_MBO - popfl - - xorl %eax,%eax - cpuid - movl %eax,RELOC(cpuid_level) - movl $RELOC(cpu_vendor),%ebp - movl %ebx,(%ebp) - movl %edx,4(%ebp) - movl %ecx,8(%ebp) - movl $0, 12(%ebp) - - movl $1,%eax - cpuid - movl %eax,RELOC(cpu_id) - movl %ebx,RELOC(cpu_ebxfeature) - movl %ecx,RELOC(cpu_ecxfeature) - movl %edx,RELOC(cpu_feature) - - movl $0x0a,%eax - cpuid - movl %eax,RELOC(_C_LABEL(cpu_perf_eax)) - movl %ebx,RELOC(_C_LABEL(cpu_perf_ebx)) - movl %edx,RELOC(_C_LABEL(cpu_perf_edx)) - - movl $0x80000001, %eax - cpuid - andl $CPUID_NXE, %edx /* other bits may clash */ - jz cont - - /* - * We have NX, set pg_nx accordingly. - * NX bit is bit 63 (bit 31 of the second 32 bit dword) - need - * to use 32 bit registers here - */ - pushl %edx - movl RELOC((pg_nx + 4)), %edx /* Second dword */ - orl $0x80000000, %edx /* Bit 31 (really 63) */ - movl %edx, RELOC((pg_nx + 4)) - popl %edx -cont: - orl %edx, RELOC(cpu_feature) - - movl $0x80000007,%eax - cpuid - movl %edx,RELOC(_C_LABEL(cpu_apmi_edx)) - - /* - * Finished with old stack; load new %esp now instead of later so we - * can trace this code without having to worry about the trace trap - * clobbering the memory test or the zeroing of the bss+bootstrap page - * tables. - * - * The boot program should check: - * text+data <= &stack_variable - more_space_for_stack - * text+data+bss+pad+space_for_page_tables <= end_of_memory - * Oops, the gdt is in the carcass of the boot program so clearing - * the rest of memory is still not possible. - */ - movl $RELOC(tmpstk),%esp - -/* - * Virtual address space of kernel: - * - * text | data | bss | [syms] | page dir | proc0 kstack | L1 ptp | L2 ptp | L3 - * 0 1 2 3 - */ - -#if L2_SLOT_KERNBASE > 0 -#define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1)) -#else -#define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1) -#endif - -#if L3_SLOT_KERNBASE > 0 -#define TABLE_L3_ENTRIES (2 * NKL3_KIMG_ENTRIES) -#else -#define TABLE_L3_ENTRIES NKL3_KIMG_ENTRIES -#endif - - -#define PROC0_PML4_OFF 0 -#define PROC0_STK_OFF (PROC0_PML4_OFF + NBPG) -#define PROC0_PTP3_OFF (PROC0_STK_OFF + UPAGES * NBPG) -#define PROC0_PTP2_OFF (PROC0_PTP3_OFF + NKL4_KIMG_ENTRIES * NBPG) -#define PROC0_PTP1_OFF (PROC0_PTP2_OFF + TABLE_L3_ENTRIES * NBPG) -#define PROC0_DMP3_OFF (PROC0_PTP1_OFF + TABLE_L2_ENTRIES * NBPG) -#define PROC0_DMP2_OFF (PROC0_DMP3_OFF + NDML3_ENTRIES * NBPG) -#define TABLESIZE \ - ((NKL4_KIMG_ENTRIES + TABLE_L3_ENTRIES + TABLE_L2_ENTRIES + 1 + UPAGES + \ - NDML3_ENTRIES + NDML2_ENTRIES) * NBPG) - -#define fillkpt \ -1: movl %eax,(%ebx) ; /* store phys addr */ \ - movl $0,4(%ebx) ; /* upper 32 bits 0 */ \ - addl $8,%ebx ; /* next pte/pde */ \ - addl $NBPG,%eax ; /* next phys page */ \ - loop 1b ; /* till finished */ - - -#define fillkpt_nx \ - pushl %ebp ; /* save */ \ -1: movl %eax,(%ebx) ; /* store phys addr */ \ - movl RELOC((pg_nx + 4)), %ebp ; /* NX bit? */ \ - movl %ebp,4(%ebx) ; /* upper 32 bits */ \ - addl $8,%ebx ; /* next pte/pde */ \ - addl $NBPG,%eax ; /* next phys page */ \ - loop 1b ; /* till finished */ \ - popl %ebp - - /* Find end of kernel image. */ - movl $RELOC(end),%edi -#if (NKSYMS || defined(DDB)) - /* Save the symbols (if loaded). */ - movl RELOC(esym),%eax - testl %eax,%eax - jz 1f - subl $KERNBASE_LO,%eax /* XXX */ - /* Page tables must be after symbols and after kernel image. */ - cmpl %eax,%edi - jg 1f - movl %eax,%edi -1: -#endif - /* Clear tables */ - movl %edi,%esi - addl $PGOFSET,%esi - andl $~PGOFSET,%esi - - movl %esi,%edi - xorl %eax,%eax - cld - movl $TABLESIZE,%ecx - shrl $2,%ecx - rep - stosl - - leal (PROC0_PTP1_OFF)(%esi), %ebx - - /* - * Compute etext - KERNBASE. This can't be > 4G, or we can't deal - * with it anyway, since we can't load it in 32 bit mode. So use - * the bottom 32 bits. - */ - movl $RELOC(etext),%edx - addl $PGOFSET,%edx - andl $~PGOFSET,%edx - - /* - * Skip the first MB. - */ - movl $(KERNTEXTOFF_LO - KERNBASE_LO),%eax - movl %eax,%ecx - shrl $(PGSHIFT-3),%ecx /* ((n >> PGSHIFT) << 3) for # pdes */ - addl %ecx,%ebx - - /* Map kernel text RO, X */ - movl %edx,%ecx - subl %eax,%ecx - shrl $PGSHIFT,%ecx - orl $(PG_V|PG_KR),%eax - fillkpt - - /* Map .rodata RO, NX */ - movl $RELOC(__rodata_start), %eax - movl $RELOC(erodata), %ecx - addl $PGOFSET, %ecx - andl $~PGOFSET, %ecx - subl %eax, %ecx - shrl $PGSHIFT, %ecx - orl $(PG_V|PG_KR), %eax - fillkpt_nx - - /* Map the data and BSS sections RW, NX */ - movl $RELOC(__data_start), %eax - movl $RELOC(__kernel_bss_end),%ecx - addl $PGOFSET, %ecx - andl $~PGOFSET, %ecx - subl %eax, %ecx - shrl $PGSHIFT,%ecx - orl $(PG_V|PG_KW), %eax - fillkpt_nx - - /* Map "hole" at end of BSS RO, NX */ - movl $RELOC(__kernel_bss_end), %eax - movl $RELOC(end), %ecx - addl $PGOFSET, %ecx - andl $~PGOFSET, %ecx - cmpl %eax, %ecx - je map_syms - subl %eax, %ecx - shrl $PGSHIFT, %ecx - orl $(PG_V|PG_KR), %eax - fillkpt_nx - -map_syms: - /* Map symbol space RO, NX */ - movl $RELOC(end), %eax - movl %esi, %ecx - addl $PGOFSET, %ecx - andl $~PGOFSET, %ecx - cmpl %eax, %ecx - je map_tables - subl %eax, %ecx - shrl $PGSHIFT, %ecx - orl $(PG_V|PG_KR), %eax - fillkpt_nx - -map_tables: - /* Map the bootstrap tables RW, NX */ - movl %esi, %edx - leal (PG_V|PG_KW)(%edx),%eax - movl $TABLESIZE,%ecx - shrl $PGSHIFT,%ecx - fillkpt_nx - - /* Map ISA I/O mem (later atdevbase) RW, NX */ - movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax - movl $(IOM_SIZE>>PGSHIFT),%ecx - fillkpt_nx - - /* Set up level 2 pages (RWX) */ - leal (PROC0_PTP2_OFF)(%esi),%ebx - leal (PROC0_PTP1_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl $(NKL2_KIMG_ENTRIES+1),%ecx - fillkpt - -#if L2_SLOT_KERNBASE > 0 - /* If needed, set up L2 entries for actual kernel mapping (RWX) */ - leal (PROC0_PTP2_OFF+ L2_SLOT_KERNBASE*8)(%esi),%ebx - leal (PROC0_PTP1_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl $(NKL2_KIMG_ENTRIES+1),%ecx - fillkpt -#endif - - /* Set up level 3 pages (RWX) */ - leal (PROC0_PTP3_OFF)(%esi),%ebx - leal (PROC0_PTP2_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl $NKL3_KIMG_ENTRIES,%ecx - fillkpt - -#if L3_SLOT_KERNBASE > 0 - /* If needed, set up L3 entries for actual kernel mapping (RWX) */ - leal (PROC0_PTP3_OFF+ L3_SLOT_KERNBASE*8)(%esi),%ebx - leal (PROC0_PTP2_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl $NKL3_KIMG_ENTRIES,%ecx - fillkpt -#endif - - /* Set up top level entries for identity mapping (RWX) */ - leal (PROC0_PML4_OFF)(%esi),%ebx - leal (PROC0_PTP3_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl $NKL4_KIMG_ENTRIES,%ecx - fillkpt - - /* Set up top level entries for actual kernel mapping (RWX) */ - leal (PROC0_PML4_OFF + L4_SLOT_KERNBASE*8)(%esi),%ebx - leal (PROC0_PTP3_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl $NKL4_KIMG_ENTRIES,%ecx - fillkpt - - /* - * Map the first 4 GB with the direct map. We'll map the rest - * in pmap_bootstrap. But we always need the first 4GB during - * bootstrap. The direct map is mapped RW, NX. We also change - * the permissions on the 2MB pages corresponding to the kernel - * PAs to RO to prevent someone writing to the kernel area - * via the direct map. - */ - leal (PROC0_DMP2_OFF)(%esi), %ebx - xorl %eax, %eax - movl $(NDML2_ENTRIES * NPDPG), %ecx -1: orl $(PG_V|PG_KW|PG_PS|PG_G), %eax - cmpl $__kernel_base_phys, %eax - jl store_pte - cmpl $__kernel_end_phys, %eax - jg store_pte - andl $(~PG_KW), %eax -store_pte: - movl %eax, (%ebx) - pushl %ebp - movl RELOC((pg_nx + 4)), %ebp - movl %ebp, 4(%ebx) - popl %ebp - addl $8, %ebx - addl $NBPD_L2, %eax - loop 1b - - leal (PROC0_DMP3_OFF)(%esi), %ebx - leal (PROC0_DMP2_OFF)(%esi), %eax - orl $(PG_V|PG_KW), %eax - movl $NDML2_ENTRIES, %ecx - fillkpt_nx - - leal (PROC0_PML4_OFF + PDIR_SLOT_DIRECT * 8)(%esi), %ebx - leal (PROC0_DMP3_OFF)(%esi), %eax - orl $(PG_V|PG_KW), %eax - movl $NDML3_ENTRIES, %ecx - fillkpt_nx - - /* Install recursive top level PDE */ - leal (PROC0_PML4_OFF + PDIR_SLOT_PTE*8)(%esi),%ebx - leal (PROC0_PML4_OFF)(%esi),%eax - orl $(PG_V|PG_KW),%eax - movl %eax,(%ebx) - pushl %ebp - movl RELOC((pg_nx + 4)), %ebp - movl %ebp, 4(%ebx) - popl %ebp - - /* Save phys. addr of PTD, for libkvm. */ - movl $RELOC(PTDpaddr),%ebp - movl %esi,(%ebp) - movl $0,4(%ebp) - - /* - * Startup checklist: - * 1. Enable PAE (and SSE while here). - */ - movl %cr4,%eax - orl $(CR4_DEFAULT),%eax - movl %eax,%cr4 - - /* - * 2. Set Long Mode Enable in EFER. Also enable the - * syscall extensions and NX (if available). - */ - movl $MSR_EFER,%ecx - rdmsr - xorl %eax,%eax /* XXX */ - orl $(EFER_LME|EFER_SCE),%eax - movl RELOC((pg_nx + 4)), %ebx - cmpl $0, %ebx - je write_efer - orl $(EFER_NXE), %eax -write_efer: - wrmsr - - /* - * 3. Load %cr3 with pointer to PML4. - */ - movl %esi,%eax - movl %eax,%cr3 - - /* - * 4. Enable paging and the rest of it. - */ - movl %cr0,%eax - orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP|CR0_WP),%eax - movl %eax,%cr0 - jmp compat -compat: - - /* - * 5. - * Not quite done yet, we're now in a compatibility segment, - * in legacy mode. We must jump to a long mode segment. - * Need to set up a temporary GDT with a long mode segment - * in it to do that. - */ - - movl $RELOC(gdt64),%eax - lgdt (%eax) - movl $RELOC(farjmp64),%eax - ljmp *(%eax) - -.code64 -longmode: - /* - * 6. - * Finally, we're in long mode. However, we're still - * in the identity mapped area (could not jump out - * of that earlier because it would have been a > 32bit - * jump). We can do that now, so here we go. - */ - movabsq $longmode_hi,%rax - jmp *%rax -longmode_hi: - /* - * We have arrived. - * There's no need anymore for the identity mapping in low - * memory, remove it. - */ - movq $KERNBASE,%r8 - -#if L2_SLOT_KERNBASE > 0 - movq $(NKL2_KIMG_ENTRIES+1),%rcx - leaq (PROC0_PTP2_OFF)(%rsi),%rbx - addq %r8, %rbx -1: movq $0 ,(%rbx) - addq $8,%rbx - loop 1b -#endif - -#if L3_SLOT_KERNBASE > 0 - movq $NKL3_KIMG_ENTRIES,%rcx - leaq (PROC0_PTP3_OFF)(%rsi),%rbx - addq %r8, %rbx -1: movq $0 ,(%rbx) - addq $8,%rbx - loop 1b -#endif - - movq $NKL4_KIMG_ENTRIES,%rcx - leaq (PROC0_PML4_OFF)(%rsi),%rbx # old, phys address of PML4 - addq %r8, %rbx # new, virtual address of PML4 -1: movq $0, (%rbx) - addq $8,%rbx - loop 1b - - /* Relocate atdevbase. */ - movq $(TABLESIZE+KERNBASE),%rdx - addq %rsi,%rdx - movq %rdx,_C_LABEL(atdevbase)(%rip) - - /* Record start of symbols */ - movq $__kernel_bss_end, _C_LABEL(ssym)(%rip) - - /* Set up bootstrap stack. */ - leaq (PROC0_STK_OFF)(%rsi),%rax - addq %r8,%rax - movq %rax,_C_LABEL(proc0paddr)(%rip) - leaq (USPACE-FRAMESIZE)(%rax),%rsp - movq %rsi,PCB_CR3(%rax) # pcb->pcb_cr3 - xorq %rbp,%rbp # mark end of frames - - xorw %ax,%ax - movw %ax,%gs - movw %ax,%fs - - /* XXX merge these */ - leaq TABLESIZE(%rsi),%rdi - call _C_LABEL(init_x86_64) - - call _C_LABEL(main) - /*****************************************************************************/ /* @@ -1217,17 +699,6 @@ ENTRY(pagezero) sfence ret - .section .codepatch,"a" - .align 8, 0xcc - .globl _C_LABEL(codepatch_begin) -_C_LABEL(codepatch_begin): - .previous - - .section .codepatchend,"a" - .globl _C_LABEL(codepatch_end) -_C_LABEL(codepatch_end): - .previous - #if NXEN > 0 /* Hypercall page needs to be page aligned */ .text diff --git a/sys/arch/amd64/amd64/locore0.S b/sys/arch/amd64/amd64/locore0.S new file mode 100644 index 00000000000..350b57c6644 --- /dev/null +++ b/sys/arch/amd64/amd64/locore0.S @@ -0,0 +1,672 @@ +/* $OpenBSD: locore0.S,v 1.1 2017/05/31 19:18:18 deraadt Exp $ */ +/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */ + +/* + * Copyright-o-rama! + */ + +/* + * Copyright (c) 2001 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Frank van der Linden for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/*- + * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)locore.s 7.3 (Berkeley) 5/13/91 + */ + +#include "assym.h" +#include "lapic.h" +#include "ksyms.h" +#include "xen.h" +#include "hyperv.h" + +#include <sys/syscall.h> + +#include <machine/param.h> +#include <machine/segments.h> +#include <machine/specialreg.h> +#include <machine/trap.h> +#include <machine/frameasm.h> + +#if NLAPIC > 0 +#include <machine/i82489reg.h> +#endif + +/* + * override user-land alignment before including asm.h + */ +#define ALIGN_DATA .align 8 +#define ALIGN_TEXT .align 16,0x90 +#define _ALIGN_TEXT ALIGN_TEXT + +#include <machine/asm.h> + +/* XXX temporary kluge; these should not be here */ +/* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */ +#include <dev/isa/isareg.h> + +#define _RELOC(x) ((x) - KERNBASE) +#define RELOC(x) _RELOC(_C_LABEL(x)) + +/* + * Some hackage to deal with 64bit symbols in 32 bit mode. + * This may not be needed if things are cleaned up a little. + */ + + .text + .globl _C_LABEL(kernel_text) + .set _C_LABEL(kernel_text),KERNTEXTOFF + + .code32 + + .globl start +start: movw $0x1234,0x472 # warm boot + + /* + * Load parameters from stack + * (howto, bootdev, bootapiver, esym, extmem, cnvmem, ac, av) + */ + movl 4(%esp),%eax + movl %eax, RELOC(boothowto) + movl 8(%esp),%eax + movl %eax, RELOC(bootdev) + + /* + * Syms are placed after last load and bss of the kernel. + * XXX Boot ignores 2MB roundup of _end, so esyms can be < _end. + */ + movl 16(%esp), %eax + testl %eax,%eax + jz 1f + addl $KERNBASE_LO,%eax + movl $RELOC(esym),%ebp + movl %eax,(%ebp) + movl $KERNBASE_HI,4(%ebp) +1: + movl 20(%esp), %eax + movl %eax, RELOC(biosextmem) + movl 24(%esp), %eax + movl %eax, RELOC(biosbasemem) + + movl 12(%esp), %eax + movl %eax, RELOC(bootapiver) + + /* + * Copy the boot arguments to bootinfo[] in machdep.c. + * + * We are passed the size of the data /boot passed to us in + * 28(%esp). We copy up to bootinfo_size bytes of data into + * bootinfo and report back how much we copied in bootinfo_size. + * + * machdep.c can then take action if bootinfo_size >= bootinfo[] + * (which would meant that we may have been passed too much data). + */ + movl 28(%esp), %eax + movl %eax, %ecx + cmpl RELOC(bootinfo_size), %ecx /* Too much? */ + jb bi_size_ok + movl RELOC(bootinfo_size), %ecx /* Only copy this much */ +bi_size_ok: + movl %eax, RELOC(bootinfo_size) /* Report full amount */ + + movl $RELOC(bootinfo), %edi /* Destination */ + movl 32(%esp), %esi /* Source */ + rep movsb /* Copy this many bytes */ + + /* First, reset the PSL. */ + pushl $PSL_MBO + popfl + + xorl %eax,%eax + cpuid + movl %eax,RELOC(cpuid_level) + movl $RELOC(cpu_vendor),%ebp + movl %ebx,(%ebp) + movl %edx,4(%ebp) + movl %ecx,8(%ebp) + movl $0, 12(%ebp) + + movl $1,%eax + cpuid + movl %eax,RELOC(cpu_id) + movl %ebx,RELOC(cpu_ebxfeature) + movl %ecx,RELOC(cpu_ecxfeature) + movl %edx,RELOC(cpu_feature) + + movl $0x0a,%eax + cpuid + movl %eax,RELOC(_C_LABEL(cpu_perf_eax)) + movl %ebx,RELOC(_C_LABEL(cpu_perf_ebx)) + movl %edx,RELOC(_C_LABEL(cpu_perf_edx)) + + movl $0x80000001, %eax + cpuid + andl $CPUID_NXE, %edx /* other bits may clash */ + jz cont + + /* + * We have NX, set pg_nx accordingly. + * NX bit is bit 63 (bit 31 of the second 32 bit dword) - need + * to use 32 bit registers here + */ + pushl %edx + movl RELOC((pg_nx + 4)), %edx /* Second dword */ + orl $0x80000000, %edx /* Bit 31 (really 63) */ + movl %edx, RELOC((pg_nx + 4)) + popl %edx +cont: + orl %edx, RELOC(cpu_feature) + + movl $0x80000007,%eax + cpuid + movl %edx,RELOC(_C_LABEL(cpu_apmi_edx)) + + /* + * Finished with old stack; load new %esp now instead of later so we + * can trace this code without having to worry about the trace trap + * clobbering the memory test or the zeroing of the bss+bootstrap page + * tables. + * + * The boot program should check: + * text+data <= &stack_variable - more_space_for_stack + * text+data+bss+pad+space_for_page_tables <= end_of_memory + * Oops, the gdt is in the carcass of the boot program so clearing + * the rest of memory is still not possible. + */ + movl $RELOC(tmpstk),%esp + +/* + * Virtual address space of kernel: + * + * text | data | bss | [syms] | page dir | proc0 kstack | L1 ptp | L2 ptp | L3 + * 0 1 2 3 + */ + +#if L2_SLOT_KERNBASE > 0 +#define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1)) +#else +#define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1) +#endif + +#if L3_SLOT_KERNBASE > 0 +#define TABLE_L3_ENTRIES (2 * NKL3_KIMG_ENTRIES) +#else +#define TABLE_L3_ENTRIES NKL3_KIMG_ENTRIES +#endif + + +#define PROC0_PML4_OFF 0 +#define PROC0_STK_OFF (PROC0_PML4_OFF + NBPG) +#define PROC0_PTP3_OFF (PROC0_STK_OFF + UPAGES * NBPG) +#define PROC0_PTP2_OFF (PROC0_PTP3_OFF + NKL4_KIMG_ENTRIES * NBPG) +#define PROC0_PTP1_OFF (PROC0_PTP2_OFF + TABLE_L3_ENTRIES * NBPG) +#define PROC0_DMP3_OFF (PROC0_PTP1_OFF + TABLE_L2_ENTRIES * NBPG) +#define PROC0_DMP2_OFF (PROC0_DMP3_OFF + NDML3_ENTRIES * NBPG) +#define TABLESIZE \ + ((NKL4_KIMG_ENTRIES + TABLE_L3_ENTRIES + TABLE_L2_ENTRIES + 1 + UPAGES + \ + NDML3_ENTRIES + NDML2_ENTRIES) * NBPG) + +#define fillkpt \ +1: movl %eax,(%ebx) ; /* store phys addr */ \ + movl $0,4(%ebx) ; /* upper 32 bits 0 */ \ + addl $8,%ebx ; /* next pte/pde */ \ + addl $NBPG,%eax ; /* next phys page */ \ + loop 1b ; /* till finished */ + + +#define fillkpt_nx \ + pushl %ebp ; /* save */ \ +1: movl %eax,(%ebx) ; /* store phys addr */ \ + movl RELOC((pg_nx + 4)), %ebp ; /* NX bit? */ \ + movl %ebp,4(%ebx) ; /* upper 32 bits */ \ + addl $8,%ebx ; /* next pte/pde */ \ + addl $NBPG,%eax ; /* next phys page */ \ + loop 1b ; /* till finished */ \ + popl %ebp + + /* Find end of kernel image. */ + movl $RELOC(end),%edi +#if (NKSYMS || defined(DDB)) + /* Save the symbols (if loaded). */ + movl RELOC(esym),%eax + testl %eax,%eax + jz 1f + subl $KERNBASE_LO,%eax /* XXX */ + /* Page tables must be after symbols and after kernel image. */ + cmpl %eax,%edi + jg 1f + movl %eax,%edi +1: +#endif + /* Clear tables */ + movl %edi,%esi + addl $PGOFSET,%esi + andl $~PGOFSET,%esi + + movl %esi,%edi + xorl %eax,%eax + cld + movl $TABLESIZE,%ecx + shrl $2,%ecx + rep + stosl + + leal (PROC0_PTP1_OFF)(%esi), %ebx + + /* + * Compute etext - KERNBASE. This can't be > 4G, or we can't deal + * with it anyway, since we can't load it in 32 bit mode. So use + * the bottom 32 bits. + */ + movl $RELOC(etext),%edx + addl $PGOFSET,%edx + andl $~PGOFSET,%edx + + /* + * Skip the first MB. + */ + movl $(KERNTEXTOFF_LO - KERNBASE_LO),%eax + movl %eax,%ecx + shrl $(PGSHIFT-3),%ecx /* ((n >> PGSHIFT) << 3) for # pdes */ + addl %ecx,%ebx + + /* Map kernel text RO, X */ + movl %edx,%ecx + subl %eax,%ecx + shrl $PGSHIFT,%ecx + orl $(PG_V|PG_KR),%eax + fillkpt + + /* Map .rodata RO, NX */ + movl $RELOC(__rodata_start), %eax + movl $RELOC(erodata), %ecx + addl $PGOFSET, %ecx + andl $~PGOFSET, %ecx + subl %eax, %ecx + shrl $PGSHIFT, %ecx + orl $(PG_V|PG_KR), %eax + fillkpt_nx + + /* Map the data and BSS sections RW, NX */ + movl $RELOC(__data_start), %eax + movl $RELOC(__kernel_bss_end),%ecx + addl $PGOFSET, %ecx + andl $~PGOFSET, %ecx + subl %eax, %ecx + shrl $PGSHIFT,%ecx + orl $(PG_V|PG_KW), %eax + fillkpt_nx + + /* Map "hole" at end of BSS RO, NX */ + movl $RELOC(__kernel_bss_end), %eax + movl $RELOC(end), %ecx + addl $PGOFSET, %ecx + andl $~PGOFSET, %ecx + cmpl %eax, %ecx + je map_syms + subl %eax, %ecx + shrl $PGSHIFT, %ecx + orl $(PG_V|PG_KR), %eax + fillkpt_nx + +map_syms: + /* Map symbol space RO, NX */ + movl $RELOC(end), %eax + movl %esi, %ecx + addl $PGOFSET, %ecx + andl $~PGOFSET, %ecx + cmpl %eax, %ecx + je map_tables + subl %eax, %ecx + shrl $PGSHIFT, %ecx + orl $(PG_V|PG_KR), %eax + fillkpt_nx + +map_tables: + /* Map the bootstrap tables RW, NX */ + movl %esi, %edx + leal (PG_V|PG_KW)(%edx),%eax + movl $TABLESIZE,%ecx + shrl $PGSHIFT,%ecx + fillkpt_nx + + /* Map ISA I/O mem (later atdevbase) RW, NX */ + movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax + movl $(IOM_SIZE>>PGSHIFT),%ecx + fillkpt_nx + + /* Set up level 2 pages (RWX) */ + leal (PROC0_PTP2_OFF)(%esi),%ebx + leal (PROC0_PTP1_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $(NKL2_KIMG_ENTRIES+1),%ecx + fillkpt + +#if L2_SLOT_KERNBASE > 0 + /* If needed, set up L2 entries for actual kernel mapping (RWX) */ + leal (PROC0_PTP2_OFF+ L2_SLOT_KERNBASE*8)(%esi),%ebx + leal (PROC0_PTP1_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $(NKL2_KIMG_ENTRIES+1),%ecx + fillkpt +#endif + + /* Set up level 3 pages (RWX) */ + leal (PROC0_PTP3_OFF)(%esi),%ebx + leal (PROC0_PTP2_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $NKL3_KIMG_ENTRIES,%ecx + fillkpt + +#if L3_SLOT_KERNBASE > 0 + /* If needed, set up L3 entries for actual kernel mapping (RWX) */ + leal (PROC0_PTP3_OFF+ L3_SLOT_KERNBASE*8)(%esi),%ebx + leal (PROC0_PTP2_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $NKL3_KIMG_ENTRIES,%ecx + fillkpt +#endif + + /* Set up top level entries for identity mapping (RWX) */ + leal (PROC0_PML4_OFF)(%esi),%ebx + leal (PROC0_PTP3_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $NKL4_KIMG_ENTRIES,%ecx + fillkpt + + /* Set up top level entries for actual kernel mapping (RWX) */ + leal (PROC0_PML4_OFF + L4_SLOT_KERNBASE*8)(%esi),%ebx + leal (PROC0_PTP3_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $NKL4_KIMG_ENTRIES,%ecx + fillkpt + + /* + * Map the first 4 GB with the direct map. We'll map the rest + * in pmap_bootstrap. But we always need the first 4GB during + * bootstrap. The direct map is mapped RW, NX. We also change + * the permissions on the 2MB pages corresponding to the kernel + * PAs to RO to prevent someone writing to the kernel area + * via the direct map. + */ + leal (PROC0_DMP2_OFF)(%esi), %ebx + xorl %eax, %eax + movl $(NDML2_ENTRIES * NPDPG), %ecx +1: orl $(PG_V|PG_KW|PG_PS|PG_G), %eax + cmpl $__kernel_base_phys, %eax + jl store_pte + cmpl $__kernel_end_phys, %eax + jg store_pte + andl $(~PG_KW), %eax +store_pte: + movl %eax, (%ebx) + pushl %ebp + movl RELOC((pg_nx + 4)), %ebp + movl %ebp, 4(%ebx) + popl %ebp + addl $8, %ebx + addl $NBPD_L2, %eax + loop 1b + + leal (PROC0_DMP3_OFF)(%esi), %ebx + leal (PROC0_DMP2_OFF)(%esi), %eax + orl $(PG_V|PG_KW), %eax + movl $NDML2_ENTRIES, %ecx + fillkpt_nx + + leal (PROC0_PML4_OFF + PDIR_SLOT_DIRECT * 8)(%esi), %ebx + leal (PROC0_DMP3_OFF)(%esi), %eax + orl $(PG_V|PG_KW), %eax + movl $NDML3_ENTRIES, %ecx + fillkpt_nx + + /* Install recursive top level PDE */ + leal (PROC0_PML4_OFF + PDIR_SLOT_PTE*8)(%esi),%ebx + leal (PROC0_PML4_OFF)(%esi),%eax + orl $(PG_V|PG_KW),%eax + movl %eax,(%ebx) + pushl %ebp + movl RELOC((pg_nx + 4)), %ebp + movl %ebp, 4(%ebx) + popl %ebp + + /* Save phys. addr of PTD, for libkvm. */ + movl $RELOC(PTDpaddr),%ebp + movl %esi,(%ebp) + movl $0,4(%ebp) + + /* + * Startup checklist: + * 1. Enable PAE (and SSE while here). + */ + movl %cr4,%eax + orl $(CR4_DEFAULT),%eax + movl %eax,%cr4 + + /* + * 2. Set Long Mode Enable in EFER. Also enable the + * syscall extensions and NX (if available). + */ + movl $MSR_EFER,%ecx + rdmsr + xorl %eax,%eax /* XXX */ + orl $(EFER_LME|EFER_SCE),%eax + movl RELOC((pg_nx + 4)), %ebx + cmpl $0, %ebx + je write_efer + orl $(EFER_NXE), %eax +write_efer: + wrmsr + + /* + * 3. Load %cr3 with pointer to PML4. + */ + movl %esi,%eax + movl %eax,%cr3 + + /* + * 4. Enable paging and the rest of it. + */ + movl %cr0,%eax + orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP|CR0_WP),%eax + movl %eax,%cr0 + jmp compat +compat: + + /* + * 5. + * Not quite done yet, we're now in a compatibility segment, + * in legacy mode. We must jump to a long mode segment. + * Need to set up a temporary GDT with a long mode segment + * in it to do that. + */ + + movl $RELOC(gdt64),%eax + lgdt (%eax) + movl $RELOC(farjmp64),%eax + ljmp *(%eax) + +.code64 +longmode: + /* + * 6. + * Finally, we're in long mode. However, we're still + * in the identity mapped area (could not jump out + * of that earlier because it would have been a > 32bit + * jump). We can do that now, so here we go. + */ + movabsq $longmode_hi,%rax + jmp *%rax +longmode_hi: + /* + * We have arrived. + * There's no need anymore for the identity mapping in low + * memory, remove it. + */ + movq $KERNBASE,%r8 + +#if L2_SLOT_KERNBASE > 0 + movq $(NKL2_KIMG_ENTRIES+1),%rcx + leaq (PROC0_PTP2_OFF)(%rsi),%rbx + addq %r8, %rbx +1: movq $0 ,(%rbx) + addq $8,%rbx + loop 1b +#endif + +#if L3_SLOT_KERNBASE > 0 + movq $NKL3_KIMG_ENTRIES,%rcx + leaq (PROC0_PTP3_OFF)(%rsi),%rbx + addq %r8, %rbx +1: movq $0 ,(%rbx) + addq $8,%rbx + loop 1b +#endif + + movq $NKL4_KIMG_ENTRIES,%rcx + leaq (PROC0_PML4_OFF)(%rsi),%rbx # old, phys address of PML4 + addq %r8, %rbx # new, virtual address of PML4 +1: movq $0, (%rbx) + addq $8,%rbx + loop 1b + + /* Relocate atdevbase. */ + movq $(TABLESIZE+KERNBASE),%rdx + addq %rsi,%rdx + movq %rdx,_C_LABEL(atdevbase)(%rip) + + /* Record start of symbols */ + movq $__kernel_bss_end, _C_LABEL(ssym)(%rip) + + /* Set up bootstrap stack. */ + leaq (PROC0_STK_OFF)(%rsi),%rax + addq %r8,%rax + movq %rax,_C_LABEL(proc0paddr)(%rip) + leaq (USPACE-FRAMESIZE)(%rax),%rsp + movq %rsi,PCB_CR3(%rax) # pcb->pcb_cr3 + xorq %rbp,%rbp # mark end of frames + + xorw %ax,%ax + movw %ax,%gs + movw %ax,%fs + + /* XXX merge these */ + leaq TABLESIZE(%rsi),%rdi + call _C_LABEL(init_x86_64) + + call _C_LABEL(main) + + .section .codepatch,"a" + .align 8, 0xcc + .globl _C_LABEL(codepatch_begin) +_C_LABEL(codepatch_begin): + .previous + + .section .codepatchend,"a" + .globl _C_LABEL(codepatch_end) +_C_LABEL(codepatch_end): + .previous + + .data + +farjmp64: + .long longmode-KERNBASE + .word GSEL(GCODE_SEL, SEL_KPL) + + .globl _C_LABEL(cpu_private) + .comm _C_LABEL(cpu_private),NBPG,NBPG + +/* XXX we want some guard here */ + .space 512 +tmpstk: + diff --git a/sys/arch/amd64/conf/Makefile.amd64 b/sys/arch/amd64/conf/Makefile.amd64 index 6fd2206d2ae..8e122ece942 100644 --- a/sys/arch/amd64/conf/Makefile.amd64 +++ b/sys/arch/amd64/conf/Makefile.amd64 @@ -1,4 +1,4 @@ -# $OpenBSD: Makefile.amd64,v 1.76 2017/05/08 00:13:38 dlg Exp $ +# $OpenBSD: Makefile.amd64,v 1.77 2017/05/31 19:18:18 deraadt Exp $ # For instructions on building kernels consult the config(8) and options(4) # manual pages. @@ -30,6 +30,7 @@ CWARNFLAGS= -Werror -Wall -Wimplicit-function-declaration \ CMACHFLAGS= -mcmodel=kernel -mno-red-zone -mno-sse2 -mno-sse -mno-3dnow \ -mno-mmx -msoft-float -fno-omit-frame-pointer CMACHFLAGS+= -ffreestanding ${NOPIE_FLAGS} +SORTR= sort -R .if ${IDENT:M-DNO_PROPOLICE} CMACHFLAGS+= -fno-stack-protector .endif @@ -38,6 +39,7 @@ CMACHFLAGS+= -msave-args .endif .if ${IDENT:M-DSMALL_KERNEL} CMACHFLAGS+= -Wa,-n +SORTR= cat .endif DEBUG?= -g @@ -73,12 +75,13 @@ NORMAL_S= ${CC} ${AFLAGS} ${CPPFLAGS} -c $< # ${SYSTEM_LD_HEAD} # ${SYSTEM_LD} swapxxx.o # ${SYSTEM_LD_TAIL} -SYSTEM_HEAD= locore.o param.o ioconf.o -SYSTEM_OBJ= ${SYSTEM_HEAD} ${OBJS} +SYSTEM_HEAD= locore0.o gap.o +SYSTEM_OBJ= ${SYSTEM_HEAD} ${OBJS} param.o ioconf.o SYSTEM_DEP= Makefile ${SYSTEM_OBJ} ${LDSCRIPT} SYSTEM_LD_HEAD= @rm -f $@ SYSTEM_LD= @echo ${LD} ${LINKFLAGS} -o $@ '$${SYSTEM_HEAD} vers.o $${OBJS}'; \ - ${LD} ${LINKFLAGS} -o $@ ${SYSTEM_HEAD} vers.o ${OBJS} + echo ${OBJS} param.o ioconf.o vers.o | tr " " "\n" | ${SORTR} > lorder; \ + ${LD} ${LINKFLAGS} -o $@ ${SYSTEM_HEAD} `cat lorder` SYSTEM_LD_TAIL= @${SIZE} $@; chmod 755 $@ .if ${DEBUG} == "-g" @@ -122,8 +125,15 @@ vers.o: ${SYSTEM_DEP} ${SYSTEM_SWAP_DEP} sh $S/conf/newvers.sh ${CC} ${CFLAGS} ${CPPFLAGS} ${PROF} -c vers.c +gap.S: ${SYSTEM_SWAP_DEP} Makefile + sh $S/conf/makegap.sh 0xcc > gap.S + +gap.o: gap.S + ${CC} ${AFLAGS} ${CPPFLAGS} ${PROF} -c gap.S + clean: - rm -f *bsd *bsd.gdb *.[dio] [a-z]*.s assym.* ${DB_STRUCTINFO} param.c + rm -f *bsd *bsd.gdb *.[dio] [a-z]*.s assym.* ${DB_STRUCTINFO} \ + gap.S lorder param.c cleandir: clean rm -f Makefile *.h ioconf.c options machine ${_mach} vers.c @@ -135,8 +145,9 @@ db_structinfo.h: $S/ddb/db_structinfo.c $S/ddb/parse_structinfo.pl objdump -g db_structinfo.o | perl $S/ddb/parse_structinfo.pl > $@ rm -f db_structinfo.o -locore.o: ${_machdir}/${_mach}/locore.S assym.h -mutex.o vector.o copy.o spl.o mptramp.o acpi_wakecode.o vmm_support.o: assym.h +locore0.o: ${_machdir}/${_mach}/locore0.S assym.h +locore.o mutex.o vector.o copy.o spl.o: assym.h +mptramp.o acpi_wakecode.o vmm_support.o: assym.h # The install target can be redefined by putting a # install-kernel-${MACHINE_NAME} target into /etc/mk.conf diff --git a/sys/arch/amd64/conf/files.amd64 b/sys/arch/amd64/conf/files.amd64 index 2700c7c7158..bfbd6153d7a 100644 --- a/sys/arch/amd64/conf/files.amd64 +++ b/sys/arch/amd64/conf/files.amd64 @@ -1,4 +1,4 @@ -# $OpenBSD: files.amd64,v 1.88 2017/04/30 13:04:49 mpi Exp $ +# $OpenBSD: files.amd64,v 1.89 2017/05/31 19:18:19 deraadt Exp $ maxpartitions 16 maxusers 2 16 128 @@ -11,6 +11,7 @@ file arch/amd64/amd64/machdep.c file arch/amd64/amd64/hibernate_machdep.c hibernate file arch/amd64/amd64/identcpu.c file arch/amd64/amd64/via.c +file arch/amd64/amd64/locore.S file arch/amd64/amd64/aes_intel.S crypto file arch/amd64/amd64/aesni.c crypto file arch/amd64/amd64/amd64errata.c diff --git a/sys/arch/amd64/conf/ld.script b/sys/arch/amd64/conf/ld.script index b184a758160..c4dcae52ca4 100644 --- a/sys/arch/amd64/conf/ld.script +++ b/sys/arch/amd64/conf/ld.script @@ -1,4 +1,4 @@ -/* $OpenBSD: ld.script,v 1.4 2016/09/03 13:13:07 deraadt Exp $ */ +/* $OpenBSD: ld.script,v 1.5 2017/05/31 19:18:19 deraadt Exp $ */ /* * Copyright (c) 2009 Tobias Weingartner <weingart@tepid.org> @@ -52,7 +52,7 @@ SECTIONS __text_start = ABSOLUTE(.) & 0xfffffffffffff000; __text_size = SIZEOF(.text); __text_load = LOADADDR(.text); - locore.o(.text) + locore0.o(.text) *(.text .text.*) } :text PROVIDE (__etext = .); diff --git a/sys/arch/i386/conf/Makefile.i386 b/sys/arch/i386/conf/Makefile.i386 index f45631b23a8..687fdba75a4 100644 --- a/sys/arch/i386/conf/Makefile.i386 +++ b/sys/arch/i386/conf/Makefile.i386 @@ -1,4 +1,4 @@ -# $OpenBSD: Makefile.i386,v 1.103 2017/05/28 13:20:37 jsg Exp $ +# $OpenBSD: Makefile.i386,v 1.104 2017/05/31 19:18:18 deraadt Exp $ # For instructions on building kernels consult the config(8) and options(4) # manual pages. @@ -29,9 +29,13 @@ CWARNFLAGS= -Werror -Wall -Wimplicit-function-declaration \ CMACHFLAGS= CMACHFLAGS+= -ffreestanding ${NOPIE_FLAGS} +SORTR= sort -R .if ${IDENT:M-DNO_PROPOLICE} CMACHFLAGS+= -fno-stack-protector .endif + .if ${IDENT:M-DSMALL_KERNEL} +SORTR= cat +.endif DEBUG?= -g COPTS?= -O2 @@ -72,12 +76,13 @@ NORMAL_S= ${CC} ${AFLAGS} ${CPPFLAGS} -c $< # ${SYSTEM_LD_HEAD} # ${SYSTEM_LD} swapxxx.o # ${SYSTEM_LD_TAIL} -SYSTEM_HEAD= locore.o param.o ioconf.o -SYSTEM_OBJ= ${SYSTEM_HEAD} ${OBJS} +SYSTEM_HEAD= locore0.o gap.o +SYSTEM_OBJ= ${SYSTEM_HEAD} ${OBJS} param.o ioconf.o SYSTEM_DEP= Makefile ${SYSTEM_OBJ} ${LDSCRIPT} SYSTEM_LD_HEAD= @rm -f $@ SYSTEM_LD= @echo ${LD} ${LINKFLAGS} -o $@ '$${SYSTEM_HEAD} vers.o $${OBJS}'; \ - ${LD} ${LINKFLAGS} -o $@ ${SYSTEM_HEAD} vers.o ${OBJS} + echo ${OBJS} param.o ioconf.o vers.o | tr " " "\n" | ${SORTR} > lorder; \ + ${LD} ${LINKFLAGS} -o $@ ${SYSTEM_HEAD} `cat lorder` SYSTEM_LD_TAIL= @${SIZE} $@; chmod 755 $@ .if ${DEBUG} == "-g" @@ -121,8 +126,15 @@ vers.o: ${SYSTEM_DEP} ${SYSTEM_SWAP_DEP} sh $S/conf/newvers.sh ${CC} ${CFLAGS} ${CPPFLAGS} ${PROF} -c vers.c +gap.S: ${SYSTEM_SWAP_DEP} Makefile + sh $S/conf/makegap.sh 0xcc > gap.S + +gap.o: gap.S + ${CC} ${AFLAGS} ${CPPFLAGS} ${PROF} -c gap.S + clean: - rm -f *bsd *bsd.gdb *.[dio] [a-z]*.s assym.* ${DB_STRUCTINFO} param.c + rm -f *bsd *bsd.gdb *.[dio] [a-z]*.s assym.* ${DB_STRUCTINFO} \ + gap.S lorder param.c cleandir: clean rm -f Makefile *.h ioconf.c options machine ${_mach} vers.c @@ -134,8 +146,9 @@ db_structinfo.h: $S/ddb/db_structinfo.c $S/ddb/parse_structinfo.pl objdump -g db_structinfo.o | perl $S/ddb/parse_structinfo.pl > $@ rm -f db_structinfo.o -locore.o: ${_machdir}/${_mach}/locore.s assym.h -in_cksum.o mptramp.o kvm86call.o acpi_wakecode.o vmm_support.o: assym.h +locore0.o: ${_machdir}/${_mach}/locore0.S assym.h +locore.o mutex.o in_cksum.o mptramp.o: assym.h +kvm86call.o acpi_wakecode.o vmm_support.o: assym.h # The install target can be redefined by putting a # install-kernel-${MACHINE_NAME} target into /etc/mk.conf diff --git a/sys/arch/i386/conf/files.i386 b/sys/arch/i386/conf/files.i386 index 2ca9e1264fd..4388e969b29 100644 --- a/sys/arch/i386/conf/files.i386 +++ b/sys/arch/i386/conf/files.i386 @@ -1,4 +1,4 @@ -# $OpenBSD: files.i386,v 1.232 2017/04/30 13:04:49 mpi Exp $ +# $OpenBSD: files.i386,v 1.233 2017/05/31 19:18:18 deraadt Exp $ # # new style config file for i386 architecture # @@ -23,6 +23,7 @@ file arch/i386/i386/in_cksum.s file arch/i386/i386/machdep.c file arch/i386/i386/hibernate_machdep.c hibernate file arch/i386/i386/via.c +file arch/i386/i386/locore.s file arch/i386/i386/amd64errata.c !small_kernel file arch/i386/i386/longrun.c !small_kernel file arch/i386/i386/mem.c diff --git a/sys/arch/i386/conf/ld.script b/sys/arch/i386/conf/ld.script index 68f1bee0090..42444b0c31f 100644 --- a/sys/arch/i386/conf/ld.script +++ b/sys/arch/i386/conf/ld.script @@ -1,4 +1,4 @@ -/* $OpenBSD: ld.script,v 1.5 2016/10/18 18:44:47 deraadt Exp $ */ +/* $OpenBSD: ld.script,v 1.6 2017/05/31 19:18:18 deraadt Exp $ */ /* * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org> @@ -53,7 +53,7 @@ SECTIONS __text_start = ABSOLUTE(.) & 0xfffff000; __text_size = SIZEOF(.text); __text_load = LOADADDR(.text); - locore.o(.text) + locore0.o(.text) *(.text .text.*) } :text PROVIDE (__etext = .); diff --git a/sys/arch/i386/i386/autoconf.c b/sys/arch/i386/i386/autoconf.c index 7a00d6b386f..b9e101bdc8a 100644 --- a/sys/arch/i386/i386/autoconf.c +++ b/sys/arch/i386/i386/autoconf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: autoconf.c,v 1.101 2016/06/08 17:24:44 tedu Exp $ */ +/* $OpenBSD: autoconf.c,v 1.102 2017/05/31 19:18:18 deraadt Exp $ */ /* $NetBSD: autoconf.c,v 1.20 1996/05/03 19:41:56 christos Exp $ */ /*- @@ -109,6 +109,18 @@ void viac3_crypto_setup(void); extern int i386_has_xcrypt; #endif +void +unmap_startup(void) +{ + extern void *kernel_text, *endboot; + vaddr_t p = (vaddr_t)&kernel_text; + + do { + pmap_kremove(p, PAGE_SIZE); + p += PAGE_SIZE; + } while (p < (vaddr_t)&endboot); +} + /* * Determine i/o configuration for a machine. */ @@ -154,6 +166,8 @@ cpu_configure(void) proc0.p_addr->u_pcb.pcb_cr0 = rcr0(); + unmap_startup(); + #ifdef MULTIPROCESSOR /* propagate TSS configuration to the idle pcb's. */ cpu_init_idle_pcbs(); @@ -166,6 +180,7 @@ cpu_configure(void) */ cold = 0; + /* * At this point the RNG is running, and if FSXR is set we can * use it. Here we setup a periodic timeout to collect the data. diff --git a/sys/arch/i386/i386/locore.s b/sys/arch/i386/i386/locore.s index bf1e847bbcb..21b47049093 100644 --- a/sys/arch/i386/i386/locore.s +++ b/sys/arch/i386/i386/locore.s @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.s,v 1.174 2017/05/30 15:11:32 deraadt Exp $ */ +/* $OpenBSD: locore.s,v 1.175 2017/05/31 19:18:18 deraadt Exp $ */ /* $NetBSD: locore.s,v 1.145 1996/05/03 19:41:19 christos Exp $ */ /*- @@ -246,417 +246,7 @@ _C_LABEL(proc0paddr): .long 0 _C_LABEL(PTDpaddr): .long 0 # paddr of PTD, for libkvm _C_LABEL(PTDsize): .long NBPG # size of PTD, for libkvm - .space 512 -tmpstk: - - -#define RELOC(x) ((x) - KERNBASE) - .text - .globl start - .globl _C_LABEL(kernel_text) - _C_LABEL(kernel_text) = KERNTEXTOFF -start: movw $0x1234,0x472 # warm boot - - /* - * Load parameters from stack (howto, bootdev, unit, bootapiver, esym). - * note: (%esp) is return address of boot - * (If we want to hold onto /boot, it's physical %esp up to _end.) - */ - movl 4(%esp),%eax - movl %eax,RELOC(_C_LABEL(boothowto)) - movl 8(%esp),%eax - movl %eax,RELOC(_C_LABEL(bootdev)) - movl 16(%esp),%eax - testl %eax,%eax - jz 1f - addl $KERNBASE,%eax -1: movl %eax,RELOC(_C_LABEL(esym)) - movl $__kernel_bss_end, RELOC(_C_LABEL(ssym)) - - movl 12(%esp),%eax - movl %eax,RELOC(_C_LABEL(bootapiver)) - movl 28(%esp), %eax - movl %eax, RELOC(_C_LABEL(bootargc)) - movl 32(%esp), %eax - movl %eax, RELOC(_C_LABEL(bootargv)) - - /* First, reset the PSL. */ - pushl $PSL_MBO - popfl - - /* Clear segment registers; null until proc0 setup */ - xorl %eax,%eax - movw %ax,%fs - movw %ax,%gs - - /* Find out our CPU type. */ - -try386: /* Try to toggle alignment check flag; does not exist on 386. */ - pushfl - popl %eax - movl %eax,%ecx - orl $PSL_AC,%eax - pushl %eax - popfl - pushfl - popl %eax - xorl %ecx,%eax - andl $PSL_AC,%eax - pushl %ecx - popfl - - testl %eax,%eax - jnz try486 - - /* - * Try the test of a NexGen CPU -- ZF will not change on a DIV - * instruction on a NexGen, it will on an i386. Documented in - * Nx586 Processor Recognition Application Note, NexGen, Inc. - */ - movl $0x5555,%eax - xorl %edx,%edx - movl $2,%ecx - divl %ecx - jnz is386 - -isnx586: - /* - * Don't try cpuid, as Nx586s reportedly don't support the - * PSL_ID bit. - */ - movl $CPU_NX586,RELOC(_C_LABEL(cpu)) - jmp 2f - -is386: - movl $CPU_386,RELOC(_C_LABEL(cpu)) - jmp 2f - -try486: /* Try to toggle identification flag; does not exist on early 486s. */ - pushfl - popl %eax - movl %eax,%ecx - xorl $PSL_ID,%eax - pushl %eax - popfl - pushfl - popl %eax - xorl %ecx,%eax - andl $PSL_ID,%eax - pushl %ecx - popfl - - testl %eax,%eax - jnz try586 -is486: movl $CPU_486,RELOC(_C_LABEL(cpu)) - - /* - * Check Cyrix CPU - * Cyrix CPUs do not change the undefined flags following - * execution of the divide instruction which divides 5 by 2. - * - * Note: CPUID is enabled on M2, so it passes another way. - */ - pushfl - movl $0x5555, %eax - xorl %edx, %edx - movl $2, %ecx - clc - divl %ecx - jnc trycyrix486 - popfl - jmp 2f -trycyrix486: - movl $CPU_6x86,RELOC(_C_LABEL(cpu)) # set CPU type - /* - * Check for Cyrix 486 CPU by seeing if the flags change during a - * divide. This is documented in the Cx486SLC/e SMM Programmer's - * Guide. - */ - xorl %edx,%edx - cmpl %edx,%edx # set flags to known state - pushfl - popl %ecx # store flags in ecx - movl $-1,%eax - movl $4,%ebx - divl %ebx # do a long division - pushfl - popl %eax - xorl %ecx,%eax # are the flags different? - testl $0x8d5,%eax # only check C|PF|AF|Z|N|V - jne 2f # yes; must not be Cyrix CPU - movl $CPU_486DLC,RELOC(_C_LABEL(cpu)) # set CPU type - - /* Disable caching of the ISA hole only. */ - invd - movb $CCR0,%al # Configuration Register index (CCR0) - outb %al,$0x22 - inb $0x23,%al - orb $(CCR0_NC1|CCR0_BARB),%al - movb %al,%ah - movb $CCR0,%al - outb %al,$0x22 - movb %ah,%al - outb %al,$0x23 - invd - - jmp 2f - -try586: /* Use the `cpuid' instruction. */ - xorl %eax,%eax - cpuid - movl %eax,RELOC(_C_LABEL(cpuid_level)) - movl %ebx,RELOC(_C_LABEL(cpu_vendor)) # store vendor string - movl %edx,RELOC(_C_LABEL(cpu_vendor))+4 - movl %ecx,RELOC(_C_LABEL(cpu_vendor))+8 - movl $0, RELOC(_C_LABEL(cpu_vendor))+12 - - movl $1,%eax - xorl %ecx,%ecx - cpuid - movl %eax,RELOC(_C_LABEL(cpu_id)) # store cpu_id and features - movl %ebx,RELOC(_C_LABEL(cpu_miscinfo)) - movl %edx,RELOC(_C_LABEL(cpu_feature)) - movl %ecx,RELOC(_C_LABEL(cpu_ecxfeature)) - - movl RELOC(_C_LABEL(cpuid_level)),%eax - cmp $2,%eax - jl 1f - - movl $2,%eax - cpuid - - movl %eax,RELOC(_C_LABEL(cpu_cache_eax)) - movl %ebx,RELOC(_C_LABEL(cpu_cache_ebx)) - movl %ecx,RELOC(_C_LABEL(cpu_cache_ecx)) - movl %edx,RELOC(_C_LABEL(cpu_cache_edx)) - - movl $0x0a,%eax - cpuid - movl %eax,RELOC(_C_LABEL(cpu_perf_eax)) - movl %ebx,RELOC(_C_LABEL(cpu_perf_ebx)) - movl %edx,RELOC(_C_LABEL(cpu_perf_edx)) - -1: - /* Check if brand identification string is supported */ - movl $0x80000000,%eax - cpuid - cmpl $0x80000000,%eax - jbe 2f - movl $0x80000001,%eax - cpuid - movl %eax,RELOC(_C_LABEL(ecpu_eaxfeature)) - movl %edx,RELOC(_C_LABEL(ecpu_feature)) - movl %ecx,RELOC(_C_LABEL(ecpu_ecxfeature)) - movl $0x80000002,%eax - cpuid - movl %eax,RELOC(_C_LABEL(cpu_brandstr)) - movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+4 - movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+8 - movl %edx,RELOC(_C_LABEL(cpu_brandstr))+12 - movl $0x80000003,%eax - cpuid - movl %eax,RELOC(_C_LABEL(cpu_brandstr))+16 - movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+20 - movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+24 - movl %edx,RELOC(_C_LABEL(cpu_brandstr))+28 - movl $0x80000004,%eax - cpuid - movl %eax,RELOC(_C_LABEL(cpu_brandstr))+32 - movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+36 - movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+40 - andl $0x00ffffff,%edx /* Shouldn't be necessary */ - movl %edx,RELOC(_C_LABEL(cpu_brandstr))+44 - - movl $0x80000007,%eax - cpuid - movl %edx,RELOC(_C_LABEL(cpu_apmi_edx)) - -2: - /* - * Finished with old stack; load new %esp now instead of later so we - * can trace this code without having to worry about the trace trap - * clobbering the memory test or the zeroing of the bss+bootstrap page - * tables. - * - * The boot program should check: - * text+data <= &stack_variable - more_space_for_stack - * text+data+bss+pad+space_for_page_tables <= end_of_memory - * Oops, the gdt is in the carcass of the boot program so clearing - * the rest of memory is still not possible. - */ - movl $RELOC(tmpstk),%esp # bootstrap stack end location - -/* - * Virtual address space of kernel: - * - * text | data | bss | [syms] | proc0 kstack | page dir | Sysmap - * 0 1 2 6 - */ -#define PROC0STACK ((0) * NBPG) -#define PROC0PDIR (( UPAGES) * NBPG) -#define SYSMAP ((4+UPAGES) * NBPG) -#define TABLESIZE ((4+UPAGES) * NBPG) /* + _C_LABEL(nkpde) * NBPG */ - - /* Find end of kernel image. */ - movl $RELOC(_C_LABEL(end)),%edi -#if (NKSYMS || defined(DDB)) - /* Save the symbols (if loaded). */ - movl RELOC(_C_LABEL(esym)),%eax - testl %eax,%eax - jz 1f - subl $KERNBASE,%eax - movl %eax,%edi -1: -#endif - - /* Calculate where to start the bootstrap tables. */ - movl %edi,%esi # edi = esym ? esym : end - addl $PGOFSET, %esi # page align up - andl $~PGOFSET, %esi - - /* - * Calculate the size of the kernel page table directory, and - * how many entries it will have. - */ - movl RELOC(_C_LABEL(nkpde)),%ecx # get nkpde - cmpl $NKPTP_MIN,%ecx # larger than min? - jge 1f - movl $NKPTP_MIN,%ecx # set at min - jmp 2f -1: cmpl RELOC(_C_LABEL(nkptp_max)),%ecx # larger than max? - jle 2f - movl RELOC(_C_LABEL(nkptp_max)),%ecx -2: movl %ecx,RELOC(_C_LABEL(nkpde)) # and store it back - - /* Clear memory for bootstrap tables. */ - shll $PGSHIFT,%ecx - addl $TABLESIZE,%ecx - addl %esi,%ecx # end of tables - subl %edi,%ecx # size of tables - shrl $2,%ecx - xorl %eax, %eax - rep - stosl - -/* - * fillkpt - * eax = pte (page frame | control | status) - * ebx = page table address - * ecx = number of pages to map - */ -#define fillkpt \ -1: movl %eax,(%ebx) ; \ - addl $NBPG,%eax ; /* increment physical address */ \ - addl $4,%ebx ; /* next pte */ \ - loop 1b ; - -/* - * Build initial page tables. - */ - /* Calculate end of text segment, rounded to a page. */ - leal (RELOC(_C_LABEL(etext))+PGOFSET),%edx - andl $~PGOFSET,%edx - - /* Skip over the first 2MB. */ - movl $RELOC(KERNTEXTOFF),%eax - movl %eax,%ecx - shrl $PGSHIFT,%ecx - leal (SYSMAP)(%esi,%ecx,4),%ebx - - /* Map the kernel text read-only. */ - movl %edx,%ecx - subl %eax,%ecx - shrl $PGSHIFT,%ecx - orl $(PG_V|PG_KR),%eax - fillkpt - - /* Map the data, BSS, and bootstrap tables read-write. */ - leal (PG_V|PG_KW)(%edx),%eax - movl RELOC(_C_LABEL(nkpde)),%ecx - shll $PGSHIFT,%ecx - addl $TABLESIZE,%ecx - addl %esi,%ecx # end of tables - subl %edx,%ecx # subtract end of text - shrl $PGSHIFT,%ecx - fillkpt - - /* Map ISA I/O memory. */ - movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set - movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s, - fillkpt - -/* - * Construct a page table directory. - */ - movl RELOC(_C_LABEL(nkpde)),%ecx # count of pdes, - leal (PROC0PDIR+0*4)(%esi),%ebx # where temp maps! - leal (SYSMAP+PG_V|PG_KW|PG_U|PG_M)(%esi),%eax # pte for KPT in proc 0 - fillkpt - -/* - * Map kernel PDEs: this is the real mapping used - * after the temp mapping outlives its usefulness. - */ - movl RELOC(_C_LABEL(nkpde)),%ecx # count of pde s, - leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # map them high - leal (SYSMAP+PG_V|PG_KW|PG_U|PG_M)(%esi),%eax # pte for KPT in proc 0 - fillkpt - - /* Install a PDE recursively mapping page directory as a page table! */ - leal (PROC0PDIR+PG_V|PG_KW|PG_U|PG_M)(%esi),%eax # pte for ptd - movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot - addl $NBPG, %eax # pte for ptd[1] - movl %eax,(PROC0PDIR+(PDSLOT_PTE+1)*4)(%esi) # recursive PD slot - - /* Save phys. addr of PTD, for libkvm. */ - leal (PROC0PDIR)(%esi),%eax # phys address of ptd in proc 0 - movl %eax,RELOC(_C_LABEL(PTDpaddr)) - - /* Load base of page directory and enable mapping. */ - movl %eax,%cr3 # load ptd addr into mmu - movl %cr0,%eax # get control word - # enable paging & NPX emulation - orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP),%eax - movl %eax,%cr0 # and let's page NOW! - - pushl $begin # jump to high mem - ret - -begin: - /* Now running relocated at KERNBASE. Remove double mapping. */ - movl _C_LABEL(nkpde),%ecx # for this many pde s, - leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps! - addl $(KERNBASE), %ebx # now use relocated address -1: movl $0,(%ebx) - addl $4,%ebx # next pde - loop 1b - - /* Relocate atdevbase. */ - movl _C_LABEL(nkpde),%edx - shll $PGSHIFT,%edx - addl $(TABLESIZE+KERNBASE),%edx - addl %esi,%edx - movl %edx,_C_LABEL(atdevbase) - - /* Set up bootstrap stack. */ - leal (PROC0STACK+KERNBASE)(%esi),%eax - movl %eax,_C_LABEL(proc0paddr) - leal (USPACE-FRAMESIZE)(%eax),%esp - leal (PROC0PDIR)(%esi),%ebx # phys address of ptd in proc 0 - movl %ebx,PCB_CR3(%eax) # pcb->pcb_cr3 - xorl %ebp,%ebp # mark end of frames - - movl _C_LABEL(nkpde),%eax - shll $PGSHIFT,%eax - addl $TABLESIZE,%eax - addl %esi,%eax # skip past stack and page tables - pushl %eax - call _C_LABEL(init386) # wire 386 chip for unix operation - addl $4,%esp - - call _C_LABEL(main) - /* NOTREACHED */ NENTRY(proc_trampoline) #ifdef MULTIPROCESSOR diff --git a/sys/arch/i386/i386/locore0.S b/sys/arch/i386/i386/locore0.S new file mode 100644 index 00000000000..664f1ebd300 --- /dev/null +++ b/sys/arch/i386/i386/locore0.S @@ -0,0 +1,482 @@ +/* $OpenBSD: locore0.S,v 1.1 2017/05/31 19:18:18 deraadt Exp $ */ +/* $NetBSD: locore.s,v 1.145 1996/05/03 19:41:19 christos Exp $ */ + +/*- + * Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)locore.s 7.3 (Berkeley) 5/13/91 + */ + +#include "npx.h" +#include "assym.h" +#include "apm.h" +#include "lapic.h" +#include "ksyms.h" + +#include <sys/errno.h> +#include <sys/syscall.h> + +#include <machine/codepatch.h> +#include <machine/cputypes.h> +#include <machine/param.h> +#include <machine/pte.h> +#include <machine/segments.h> +#include <machine/specialreg.h> +#include <machine/trap.h> + +#include <dev/isa/isareg.h> + +/* + * override user-land alignment before including asm.h + */ + +#define ALIGN_DATA .align 4 +#define ALIGN_TEXT .align 4,0x90 /* 4-byte boundaries, NOP-filled */ +#define _ALIGN_TEXT ALIGN_TEXT +#include <machine/asm.h> + +/* + * Initialization + */ + .data + + .space 512 +tmpstk: + + +#define RELOC(x) ((x) - KERNBASE) + + .text + .globl start + .globl _C_LABEL(kernel_text) + _C_LABEL(kernel_text) = KERNTEXTOFF +start: movw $0x1234,0x472 # warm boot + + /* + * Load parameters from stack (howto, bootdev, unit, bootapiver, esym). + * note: (%esp) is return address of boot + * (If we want to hold onto /boot, it's physical %esp up to _end.) + */ + movl 4(%esp),%eax + movl %eax,RELOC(_C_LABEL(boothowto)) + movl 8(%esp),%eax + movl %eax,RELOC(_C_LABEL(bootdev)) + movl 16(%esp),%eax + testl %eax,%eax + jz 1f + addl $KERNBASE,%eax +1: movl %eax,RELOC(_C_LABEL(esym)) + movl $__kernel_bss_end, RELOC(_C_LABEL(ssym)) + + movl 12(%esp),%eax + movl %eax,RELOC(_C_LABEL(bootapiver)) + movl 28(%esp), %eax + movl %eax, RELOC(_C_LABEL(bootargc)) + movl 32(%esp), %eax + movl %eax, RELOC(_C_LABEL(bootargv)) + + /* First, reset the PSL. */ + pushl $PSL_MBO + popfl + + /* Clear segment registers; null until proc0 setup */ + xorl %eax,%eax + movw %ax,%fs + movw %ax,%gs + + /* Find out our CPU type. */ + +try386: /* Try to toggle alignment check flag; does not exist on 386. */ + pushfl + popl %eax + movl %eax,%ecx + orl $PSL_AC,%eax + pushl %eax + popfl + pushfl + popl %eax + xorl %ecx,%eax + andl $PSL_AC,%eax + pushl %ecx + popfl + + testl %eax,%eax + jnz try486 + + /* + * Try the test of a NexGen CPU -- ZF will not change on a DIV + * instruction on a NexGen, it will on an i386. Documented in + * Nx586 Processor Recognition Application Note, NexGen, Inc. + */ + movl $0x5555,%eax + xorl %edx,%edx + movl $2,%ecx + divl %ecx + jnz is386 + +isnx586: + /* + * Don't try cpuid, as Nx586s reportedly don't support the + * PSL_ID bit. + */ + movl $CPU_NX586,RELOC(_C_LABEL(cpu)) + jmp 2f + +is386: + movl $CPU_386,RELOC(_C_LABEL(cpu)) + jmp 2f + +try486: /* Try to toggle identification flag; does not exist on early 486s. */ + pushfl + popl %eax + movl %eax,%ecx + xorl $PSL_ID,%eax + pushl %eax + popfl + pushfl + popl %eax + xorl %ecx,%eax + andl $PSL_ID,%eax + pushl %ecx + popfl + + testl %eax,%eax + jnz try586 +is486: movl $CPU_486,RELOC(_C_LABEL(cpu)) + + /* + * Check Cyrix CPU + * Cyrix CPUs do not change the undefined flags following + * execution of the divide instruction which divides 5 by 2. + * + * Note: CPUID is enabled on M2, so it passes another way. + */ + pushfl + movl $0x5555, %eax + xorl %edx, %edx + movl $2, %ecx + clc + divl %ecx + jnc trycyrix486 + popfl + jmp 2f +trycyrix486: + movl $CPU_6x86,RELOC(_C_LABEL(cpu)) # set CPU type + /* + * Check for Cyrix 486 CPU by seeing if the flags change during a + * divide. This is documented in the Cx486SLC/e SMM Programmer's + * Guide. + */ + xorl %edx,%edx + cmpl %edx,%edx # set flags to known state + pushfl + popl %ecx # store flags in ecx + movl $-1,%eax + movl $4,%ebx + divl %ebx # do a long division + pushfl + popl %eax + xorl %ecx,%eax # are the flags different? + testl $0x8d5,%eax # only check C|PF|AF|Z|N|V + jne 2f # yes; must not be Cyrix CPU + movl $CPU_486DLC,RELOC(_C_LABEL(cpu)) # set CPU type + + /* Disable caching of the ISA hole only. */ + invd + movb $CCR0,%al # Configuration Register index (CCR0) + outb %al,$0x22 + inb $0x23,%al + orb $(CCR0_NC1|CCR0_BARB),%al + movb %al,%ah + movb $CCR0,%al + outb %al,$0x22 + movb %ah,%al + outb %al,$0x23 + invd + + jmp 2f + +try586: /* Use the `cpuid' instruction. */ + xorl %eax,%eax + cpuid + movl %eax,RELOC(_C_LABEL(cpuid_level)) + movl %ebx,RELOC(_C_LABEL(cpu_vendor)) # store vendor string + movl %edx,RELOC(_C_LABEL(cpu_vendor))+4 + movl %ecx,RELOC(_C_LABEL(cpu_vendor))+8 + movl $0, RELOC(_C_LABEL(cpu_vendor))+12 + + movl $1,%eax + xorl %ecx,%ecx + cpuid + movl %eax,RELOC(_C_LABEL(cpu_id)) # store cpu_id and features + movl %ebx,RELOC(_C_LABEL(cpu_miscinfo)) + movl %edx,RELOC(_C_LABEL(cpu_feature)) + movl %ecx,RELOC(_C_LABEL(cpu_ecxfeature)) + + movl RELOC(_C_LABEL(cpuid_level)),%eax + cmp $2,%eax + jl 1f + + movl $2,%eax + cpuid + + movl %eax,RELOC(_C_LABEL(cpu_cache_eax)) + movl %ebx,RELOC(_C_LABEL(cpu_cache_ebx)) + movl %ecx,RELOC(_C_LABEL(cpu_cache_ecx)) + movl %edx,RELOC(_C_LABEL(cpu_cache_edx)) + + movl $0x0a,%eax + cpuid + movl %eax,RELOC(_C_LABEL(cpu_perf_eax)) + movl %ebx,RELOC(_C_LABEL(cpu_perf_ebx)) + movl %edx,RELOC(_C_LABEL(cpu_perf_edx)) + +1: + /* Check if brand identification string is supported */ + movl $0x80000000,%eax + cpuid + cmpl $0x80000000,%eax + jbe 2f + movl $0x80000001,%eax + cpuid + movl %eax,RELOC(_C_LABEL(ecpu_eaxfeature)) + movl %edx,RELOC(_C_LABEL(ecpu_feature)) + movl %ecx,RELOC(_C_LABEL(ecpu_ecxfeature)) + movl $0x80000002,%eax + cpuid + movl %eax,RELOC(_C_LABEL(cpu_brandstr)) + movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+4 + movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+8 + movl %edx,RELOC(_C_LABEL(cpu_brandstr))+12 + movl $0x80000003,%eax + cpuid + movl %eax,RELOC(_C_LABEL(cpu_brandstr))+16 + movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+20 + movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+24 + movl %edx,RELOC(_C_LABEL(cpu_brandstr))+28 + movl $0x80000004,%eax + cpuid + movl %eax,RELOC(_C_LABEL(cpu_brandstr))+32 + movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+36 + movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+40 + andl $0x00ffffff,%edx /* Shouldn't be necessary */ + movl %edx,RELOC(_C_LABEL(cpu_brandstr))+44 + + movl $0x80000007,%eax + cpuid + movl %edx,RELOC(_C_LABEL(cpu_apmi_edx)) + +2: + /* + * Finished with old stack; load new %esp now instead of later so we + * can trace this code without having to worry about the trace trap + * clobbering the memory test or the zeroing of the bss+bootstrap page + * tables. + * + * The boot program should check: + * text+data <= &stack_variable - more_space_for_stack + * text+data+bss+pad+space_for_page_tables <= end_of_memory + * Oops, the gdt is in the carcass of the boot program so clearing + * the rest of memory is still not possible. + */ + movl $RELOC(tmpstk),%esp # bootstrap stack end location + +/* + * Virtual address space of kernel: + * + * text | data | bss | [syms] | proc0 kstack | page dir | Sysmap + * 0 1 2 6 + */ +#define PROC0STACK ((0) * NBPG) +#define PROC0PDIR (( UPAGES) * NBPG) +#define SYSMAP ((4+UPAGES) * NBPG) +#define TABLESIZE ((4+UPAGES) * NBPG) /* + _C_LABEL(nkpde) * NBPG */ + + /* Find end of kernel image. */ + movl $RELOC(_C_LABEL(end)),%edi +#if (NKSYMS || defined(DDB)) + /* Save the symbols (if loaded). */ + movl RELOC(_C_LABEL(esym)),%eax + testl %eax,%eax + jz 1f + subl $KERNBASE,%eax + movl %eax,%edi +1: +#endif + + /* Calculate where to start the bootstrap tables. */ + movl %edi,%esi # edi = esym ? esym : end + addl $PGOFSET, %esi # page align up + andl $~PGOFSET, %esi + + /* + * Calculate the size of the kernel page table directory, and + * how many entries it will have. + */ + movl RELOC(_C_LABEL(nkpde)),%ecx # get nkpde + cmpl $NKPTP_MIN,%ecx # larger than min? + jge 1f + movl $NKPTP_MIN,%ecx # set at min + jmp 2f +1: cmpl RELOC(_C_LABEL(nkptp_max)),%ecx # larger than max? + jle 2f + movl RELOC(_C_LABEL(nkptp_max)),%ecx +2: movl %ecx,RELOC(_C_LABEL(nkpde)) # and store it back + + /* Clear memory for bootstrap tables. */ + shll $PGSHIFT,%ecx + addl $TABLESIZE,%ecx + addl %esi,%ecx # end of tables + subl %edi,%ecx # size of tables + shrl $2,%ecx + xorl %eax, %eax + rep + stosl + +/* + * fillkpt + * eax = pte (page frame | control | status) + * ebx = page table address + * ecx = number of pages to map + */ +#define fillkpt \ +1: movl %eax,(%ebx) ; \ + addl $NBPG,%eax ; /* increment physical address */ \ + addl $4,%ebx ; /* next pte */ \ + loop 1b ; + +/* + * Build initial page tables. + */ + /* Calculate end of text segment, rounded to a page. */ + leal (RELOC(_C_LABEL(etext))+PGOFSET),%edx + andl $~PGOFSET,%edx + + /* Skip over the first 2MB. */ + movl $RELOC(KERNTEXTOFF),%eax + movl %eax,%ecx + shrl $PGSHIFT,%ecx + leal (SYSMAP)(%esi,%ecx,4),%ebx + + /* Map the kernel text read-only. */ + movl %edx,%ecx + subl %eax,%ecx + shrl $PGSHIFT,%ecx + orl $(PG_V|PG_KR),%eax + fillkpt + + /* Map the data, BSS, and bootstrap tables read-write. */ + leal (PG_V|PG_KW)(%edx),%eax + movl RELOC(_C_LABEL(nkpde)),%ecx + shll $PGSHIFT,%ecx + addl $TABLESIZE,%ecx + addl %esi,%ecx # end of tables + subl %edx,%ecx # subtract end of text + shrl $PGSHIFT,%ecx + fillkpt + + /* Map ISA I/O memory. */ + movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set + movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s, + fillkpt + +/* + * Construct a page table directory. + */ + movl RELOC(_C_LABEL(nkpde)),%ecx # count of pdes, + leal (PROC0PDIR+0*4)(%esi),%ebx # where temp maps! + leal (SYSMAP+PG_V|PG_KW|PG_U|PG_M)(%esi),%eax # pte for KPT in proc 0 + fillkpt + +/* + * Map kernel PDEs: this is the real mapping used + * after the temp mapping outlives its usefulness. + */ + movl RELOC(_C_LABEL(nkpde)),%ecx # count of pde s, + leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # map them high + leal (SYSMAP+PG_V|PG_KW|PG_U|PG_M)(%esi),%eax # pte for KPT in proc 0 + fillkpt + + /* Install a PDE recursively mapping page directory as a page table! */ + leal (PROC0PDIR+PG_V|PG_KW|PG_U|PG_M)(%esi),%eax # pte for ptd + movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot + addl $NBPG, %eax # pte for ptd[1] + movl %eax,(PROC0PDIR+(PDSLOT_PTE+1)*4)(%esi) # recursive PD slot + + /* Save phys. addr of PTD, for libkvm. */ + leal (PROC0PDIR)(%esi),%eax # phys address of ptd in proc 0 + movl %eax,RELOC(_C_LABEL(PTDpaddr)) + + /* Load base of page directory and enable mapping. */ + movl %eax,%cr3 # load ptd addr into mmu + movl %cr0,%eax # get control word + # enable paging & NPX emulation + orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP),%eax + movl %eax,%cr0 # and let's page NOW! + + pushl $begin # jump to high mem + ret + +begin: + /* Now running relocated at KERNBASE. Remove double mapping. */ + movl _C_LABEL(nkpde),%ecx # for this many pde s, + leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps! + addl $(KERNBASE), %ebx # now use relocated address +1: movl $0,(%ebx) + addl $4,%ebx # next pde + loop 1b + + /* Relocate atdevbase. */ + movl _C_LABEL(nkpde),%edx + shll $PGSHIFT,%edx + addl $(TABLESIZE+KERNBASE),%edx + addl %esi,%edx + movl %edx,_C_LABEL(atdevbase) + + /* Set up bootstrap stack. */ + leal (PROC0STACK+KERNBASE)(%esi),%eax + movl %eax,_C_LABEL(proc0paddr) + leal (USPACE-FRAMESIZE)(%eax),%esp + leal (PROC0PDIR)(%esi),%ebx # phys address of ptd in proc 0 + movl %ebx,PCB_CR3(%eax) # pcb->pcb_cr3 + xorl %ebp,%ebp # mark end of frames + + movl _C_LABEL(nkpde),%eax + shll $PGSHIFT,%eax + addl $TABLESIZE,%eax + addl %esi,%eax # skip past stack and page tables + pushl %eax + call _C_LABEL(init386) # wire 386 chip for unix operation + addl $4,%esp + + call _C_LABEL(main) + /* NOTREACHED */ diff --git a/sys/conf/makegap.sh b/sys/conf/makegap.sh new file mode 100644 index 00000000000..49a56e1c7e8 --- /dev/null +++ b/sys/conf/makegap.sh @@ -0,0 +1,32 @@ +#!/bin/sh - + +PADBYTE=$1 + +cat << __EOF__ +#include <machine/asm.h> +#include <machine/param.h> + + .text + .align PAGE_SIZE, $PADBYTE + .space $RANDOM, $PADBYTE + .align PAGE_SIZE, $PADBYTE + + .globl endboot +_C_LABEL(endboot): + .space PAGE_SIZE, $PADBYTE + .space $RANDOM % PAGE_SIZE, $PADBYTE + .align 16, $PADBYTE + + /* + * Randomly bias future data, bss, and rodata objects, + * does not help for objects in locore.S though + */ + .data + .space $RANDOM % PAGE_SIZE, $PADBYTE + + .bss + .space $RANDOM % PAGE_SIZE, $PADBYTE + + .section .rodata + .space $RANDOM % PAGE_SIZE, $PADBYTE +__EOF__ |