diff options
author | Philip Guenther <guenther@cvs.openbsd.org> | 2018-05-22 15:55:31 +0000 |
---|---|---|
committer | Philip Guenther <guenther@cvs.openbsd.org> | 2018-05-22 15:55:31 +0000 |
commit | 0b4fa9bb3d4d2bbaafe1414f86abd2d5043a197b (patch) | |
tree | bca82c90b0929634db0cc5d097c653d5a26fcfa0 | |
parent | 024d0751e9b1fcc40a90e2fc57ca424babeb5ee9 (diff) |
Define CR0_DEFAULT with our default CR0_* flags for various .S files.
Replace a hex constant with the correct CR0_* define in mptramp.S.
Clean up lots and lots of whitespace glitches.
no binary change.
ok mlarkin@
-rw-r--r-- | sys/arch/amd64/amd64/acpi_wakecode.S | 124 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/locore0.S | 86 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/mptramp.S | 100 | ||||
-rw-r--r-- | sys/arch/amd64/include/specialreg.h | 13 |
4 files changed, 164 insertions, 159 deletions
diff --git a/sys/arch/amd64/amd64/acpi_wakecode.S b/sys/arch/amd64/amd64/acpi_wakecode.S index 1121e693777..4eabf3e23c7 100644 --- a/sys/arch/amd64/amd64/acpi_wakecode.S +++ b/sys/arch/amd64/amd64/acpi_wakecode.S @@ -1,4 +1,4 @@ -/* $OpenBSD: acpi_wakecode.S,v 1.41 2017/08/30 23:40:22 mlarkin Exp $ */ +/* $OpenBSD: acpi_wakecode.S,v 1.42 2018/05/22 15:55:30 guenther Exp $ */ /* * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org> * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org> @@ -73,7 +73,7 @@ * ACPI before we went to sleep. ACPI's wakeup vector is a * physical address - in our case, it's calculated and mapped * by the kernel and stuffed into a low page early in the boot - * process. + * process. * * We wakeup in real mode, at some phys addr based on the ACPI * specification (cs = phys>>8, ip = phys & 0xF). For example, @@ -103,7 +103,7 @@ _ACPI_TRMP_OFFSET(.Lacpi_s3_vector_real) cld /* - * Set up segment registers for real mode. + * Set up segment registers for real mode. * We'll only be in real mode for a moment, and we don't have * ant real dependencies on data or stack, so we'll just use * the code segment for data and stack (eg, a 64k memory space). @@ -117,8 +117,8 @@ _ACPI_TRMP_OFFSET(.Lacpi_s3_vector_real) /* * Set up stack to grow down from offset 0x0FFE. - * We will only be doing a few push/pops and no calls in real - * mode, so as long as the real mode code in the segment + * We will only be doing a few push/pops and no calls in real + * mode, so as long as the real mode code in the segment * plus stack doesn't exceed 0x0FFE (4094) bytes, we'll be ok. */ movw $0x0FFE,%sp @@ -132,8 +132,8 @@ _ACPI_TRMP_OFFSET(.Lacpi_s3_vector_real) /* * Flush instruction prefetch queue */ - jmp 1f -1: jmp 1f + jmp 1f +1: jmp 1f 1: /* @@ -143,7 +143,7 @@ _ACPI_TRMP_OFFSET(.Lacpi_s3_vector_real) * with the same range. This GDT will only be in use for a short * time, until we restore the saved GDT that we had when we went * to sleep. - */ + */ addr32 lgdtl .Ltmp_gdt /* @@ -156,7 +156,7 @@ _ACPI_TRMP_OFFSET(.Lacpi_s3_vector_real) /* * Force CPU into protected mode by making an intersegment jump (to * ourselves, just a few lines down from here). We rely on the kernel - * to fixup the jump target addres previously. + * to fixup the jump target addres previously. */ ljmpl $0x8, $.Lacpi_protected_mode_trampoline @@ -171,7 +171,7 @@ _C_LABEL(acpi_protected_mode_resume): * * Set up segment selectors for protected mode. * We've already set up our cs via the intersegment jump earlier, - * but we need to set ds,es,fs,gs,ss to all point to the + * but we need to set ds,es,fs,gs,ss to all point to the * 4GB flat data segment we defined earlier. */ movw $GSEL(GDATA_SEL,SEL_KPL),%ax @@ -186,7 +186,7 @@ _C_LABEL(acpi_protected_mode_resume): * because we haven't put anything on the stack via a * call or push that we haven't cleaned up already. */ - addl $(ACPI_TRAMP_DATA), %esp + addl $(ACPI_TRAMP_DATA), %esp /* Set CR4 to something sane for entry into long mode */ mov $(CR4_PAE|CR4_OSFXSR|CR4_OSXMMEXCPT|CR4_PSE),%eax @@ -196,13 +196,13 @@ _C_LABEL(acpi_protected_mode_resume): * Set up a temporary long mode GDT describing 2 * segments, one for code and one for data. */ - lgdt .Ltmp_gdt64 + lgdt .Ltmp_gdt64 /* Restore saved EFER (LME, NXE, etc) */ movl $MSR_EFER, %ecx rdmsr movl .Lacpi_saved_efer, %eax - andl $(EFER_LME | EFER_NXE | EFER_SCE), %eax + andl $(EFER_LME | EFER_NXE | EFER_SCE), %eax wrmsr /* Reenable paging using temporary cr3 */ @@ -211,22 +211,22 @@ _C_LABEL(acpi_protected_mode_resume): movl %eax, %cr3 /* Flush the prefetch queue again */ - jmp 1f + jmp 1f 1: jmp 1f 1: /* Reenable paging by setting the appropriate bits in CR0 */ - movl %cr0,%eax - orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP|CR0_WP),%eax - movl %eax,%cr0 + movl %cr0,%eax + orl $CR0_DEFAULT,%eax + movl %eax,%cr0 /* Flush the prefetch queue again */ - jmp 1f + jmp 1f 1: jmp 1f 1: /* Enter long mode by making another intersegment jump */ - ljmp $0x8, $.Lacpi_long_mode_trampoline + ljmp $0x8, $.Lacpi_long_mode_trampoline .code64 .align 16, 0xcc @@ -240,10 +240,10 @@ _C_LABEL(acpi_long_mode_resume): lgdt .Lacpi_saved_gdt /* Reset segment registers */ - movw $GSEL(GDATA_SEL, SEL_KPL),%ax - movw %ax,%ds - movw %ax,%es - movw %ax,%ss + movw $GSEL(GDATA_SEL, SEL_KPL),%ax + movw %ax,%ds + movw %ax,%es + movw %ax,%ss xorw %ax, %ax movw %ax, %fs @@ -301,20 +301,20 @@ _C_LABEL(acpi_long_mode_resume): movq %rax, %cr3 /* Flush the prefetch queue again */ - jmp 1f + jmp 1f 1: jmp 1f 1: movq .Lacpi_saved_cr2, %rax movq %rax, %cr2 - movq .Lacpi_saved_cr0, %rax + movq .Lacpi_saved_cr0, %rax movq %rax, %cr0 /* Flush the prefetch queue again */ - jmp 1f + jmp 1f 1: jmp 1f 1: - + lldt .Lacpi_saved_ldt lidt .Lacpi_saved_idt @@ -325,32 +325,32 @@ _C_LABEL(acpi_long_mode_resume): andb $0xF9, 5(%rax,%rcx) ltr %cx - pushq .Lacpi_saved_fl + pushq .Lacpi_saved_fl popfq - movq .Lacpi_saved_rbx, %rbx - movq .Lacpi_saved_rcx, %rcx - movq .Lacpi_saved_rdx, %rdx - movq .Lacpi_saved_rbp, %rbp - movq .Lacpi_saved_rsi, %rsi - movq .Lacpi_saved_rdi, %rdi - movq .Lacpi_saved_rsp, %rsp - - movq .Lacpi_saved_r8, %r8 - movq .Lacpi_saved_r9, %r9 - movq .Lacpi_saved_r10, %r10 - movq .Lacpi_saved_r11, %r11 - movq .Lacpi_saved_r12, %r12 - movq .Lacpi_saved_r13, %r13 - movq .Lacpi_saved_r14, %r14 - movq .Lacpi_saved_r15, %r15 - - /* Poke CR3 one more time. Might not be necessary */ + movq .Lacpi_saved_rbx, %rbx + movq .Lacpi_saved_rcx, %rcx + movq .Lacpi_saved_rdx, %rdx + movq .Lacpi_saved_rbp, %rbp + movq .Lacpi_saved_rsi, %rsi + movq .Lacpi_saved_rdi, %rdi + movq .Lacpi_saved_rsp, %rsp + + movq .Lacpi_saved_r8, %r8 + movq .Lacpi_saved_r9, %r9 + movq .Lacpi_saved_r10, %r10 + movq .Lacpi_saved_r11, %r11 + movq .Lacpi_saved_r12, %r12 + movq .Lacpi_saved_r13, %r13 + movq .Lacpi_saved_r14, %r14 + movq .Lacpi_saved_r15, %r15 + + /* Poke CR3 one more time. Might not be necessary */ movq .Lacpi_saved_cr3, %rax movq %rax, %cr3 - xorq %rax, %rax - jmp *.Lacpi_saved_ret + xorq %rax, %rax + jmp *.Lacpi_saved_ret #ifdef HIBERNATE /* @@ -456,9 +456,9 @@ NENTRY(hibernate_activate_resume_pt_machdep) */ NENTRY(hibernate_switch_stack_machdep) movq (%rsp), %rax - movq %rax, HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET - movq $(HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET), %rax - movq %rax, %rsp + movq %rax, HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET + movq $(HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET), %rax + movq %rax, %rsp /* On our own stack from here onward */ ret @@ -495,14 +495,14 @@ _ACPI_TRMP_DATA_LABEL(.Ltmp_gdtable) * Limit: 0xffffffff * Base: 0x00000000 * Descriptor Type: Code - * Segment Type: CRA + * Segment Type: CRA * Present: True * Priv: 0 * AVL: False * 64-bit: False * 32-bit: True * - */ + */ .word 0xffff, 0 .byte 0, 0x9f, 0xcf, 0 @@ -510,7 +510,7 @@ _ACPI_TRMP_DATA_LABEL(.Ltmp_gdtable) * Data * Limit: 0xffffffff * Base: 0x00000000 - * Descriptor Type: + * Descriptor Type: * Segment Type: W * Present: True * Priv: 0 @@ -518,7 +518,7 @@ _ACPI_TRMP_DATA_LABEL(.Ltmp_gdtable) * 64-bit: False * 32-bit: True * - */ + */ .word 0xffff, 0 .byte 0, 0x93, 0xcf, 0 _ACPI_TRMP_DATA_LABEL(.Ltmp_gdt_end) @@ -531,7 +531,7 @@ _ACPI_TRMP_DATA_OFFSET(.Lclean_idt) .align 8, 0xcc _ACPI_TRMP_DATA_LABEL(.Ltmp_gdt64) - .word .Ltmp_gdt64_end - .Ltmp_gdtable64 + .word .Ltmp_gdt64_end - .Ltmp_gdtable64 .long .Ltmp_gdtable64 .align 8, 0xcc @@ -543,7 +543,7 @@ _ACPI_TRMP_DATA_LABEL(.Ltmp_gdt64_end) .align 8, 0xcc _ACPI_TRMP_DATA_LABEL(.Ltmp_gdt6416) - .word .Ltmp_gdt6416_end - .Ltmp_gdtable6416 + .word .Ltmp_gdt6416_end - .Ltmp_gdtable6416 .quad .Ltmp_gdtable6416 .align 8, 0xcc @@ -610,9 +610,9 @@ _ACPI_TRMP_DATA_LABEL(.Lacpi_saved_gdt) .space 10 .align 8, 0xcc -_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ldt) +_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ldt) .space 10 - + _ACPI_TRMP_DATA_LABEL(.Lacpi_saved_tr) .short 0 @@ -699,7 +699,7 @@ NENTRY(acpi_savecpu) movq %rax, .Lacpi_saved_cr4 movq %cr8, %rax movq %rax, .Lacpi_saved_cr8 - + pushq %rcx pushq %rdx #if NLAPIC > 0 @@ -743,11 +743,11 @@ NENTRY(acpi_savecpu) rdmsr movl %eax, .Lacpi_saved_kgs movl %edx, .Lacpi_saved_kgs+4 - + movl $MSR_EFER, %ecx rdmsr movl %eax, .Lacpi_saved_efer - popq %rdx + popq %rdx popq %rcx sgdt .Lacpi_saved_gdt diff --git a/sys/arch/amd64/amd64/locore0.S b/sys/arch/amd64/amd64/locore0.S index 53ef3672be5..662b58fb5bc 100644 --- a/sys/arch/amd64/amd64/locore0.S +++ b/sys/arch/amd64/amd64/locore0.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore0.S,v 1.7 2018/02/21 19:24:15 guenther Exp $ */ +/* $OpenBSD: locore0.S,v 1.8 2018/05/22 15:55:30 guenther Exp $ */ /* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */ /* @@ -156,12 +156,12 @@ start: movw $0x1234,0x472 # warm boot * XXX Boot ignores 2MB roundup of _end, so esyms can be < _end. */ movl 16(%esp), %eax - testl %eax,%eax - jz 1f - addl $KERNBASE_LO,%eax - movl $RELOC(esym),%ebp - movl %eax,(%ebp) - movl $KERNBASE_HI,4(%ebp) + testl %eax,%eax + jz 1f + addl $KERNBASE_LO,%eax + movl $RELOC(esym),%ebp + movl %eax,(%ebp) + movl $KERNBASE_HI,4(%ebp) 1: movl 24(%esp), %eax movl %eax, RELOC(biosbasemem) @@ -179,14 +179,14 @@ start: movw $0x1234,0x472 # warm boot * machdep.c can then take action if bootinfo_size >= bootinfo[] * (which would meant that we may have been passed too much data). */ - movl 28(%esp), %eax + movl 28(%esp), %eax movl %eax, %ecx cmpl RELOC(bootinfo_size), %ecx /* Too much? */ jb bi_size_ok movl RELOC(bootinfo_size), %ecx /* Only copy this much */ bi_size_ok: movl %eax, RELOC(bootinfo_size) /* Report full amount */ - + movl $RELOC(bootinfo), %edi /* Destination */ movl 32(%esp), %esi /* Source */ rep movsb /* Copy this many bytes */ @@ -202,7 +202,7 @@ bi_size_ok: movl %ebx,(%ebp) movl %edx,4(%ebp) movl %ecx,8(%ebp) - movl $0, 12(%ebp) + movl $0, 12(%ebp) /* * Determine if CPU has meltdown. Certain Intel CPUs do not properly @@ -213,7 +213,7 @@ bi_size_ok: */ movl $0x1, RELOC(cpu_meltdown) /* assume insecure at first */ movl $0x0, RELOC(pg_g_kern) - + cmpl $0x756e6547, %ebx # "Genu" jne .Lcpu_secure cmpl $0x6c65746e, %ecx # "ntel" @@ -234,7 +234,7 @@ bi_size_ok: cpuid testl $SEFF0EDX_ARCH_CAP, %edx jz .Lcpu_check_finished - + /* IA32_ARCH_CAPABILITIES MSR avaialble, use it to check CPU security */ movl $MSR_ARCH_CAPABILITIES, %ecx rdmsr @@ -271,11 +271,11 @@ bi_size_ok: */ pushl %edx movl RELOC((pg_nx + 4)), %edx /* Second dword */ - orl $0x80000000, %edx /* Bit 31 (really 63) */ + orl $0x80000000, %edx /* Bit 31 (really 63) */ movl %edx, RELOC((pg_nx + 4)) popl %edx cont: - orl %edx, RELOC(cpu_feature) + orl %edx, RELOC(cpu_feature) movl $0x80000007,%eax cpuid @@ -298,7 +298,7 @@ cont: /* * Virtual address space of kernel: * - * text | data | bss | [syms] | page dir | proc0 kstack | L1 ptp | L2 ptp | L3 + * text | data | bss | [syms] | page dir | proc0 kstack | L1 ptp | L2 ptp | L3 * 0 1 2 3 */ @@ -327,20 +327,20 @@ cont: NDML3_ENTRIES + NDML2_ENTRIES) * NBPG) #define fillkpt \ -1: movl %eax,(%ebx) ; /* store phys addr */ \ - movl $0,4(%ebx) ; /* upper 32 bits 0 */ \ - addl $8,%ebx ; /* next pte/pde */ \ - addl $NBPG,%eax ; /* next phys page */ \ +1: movl %eax,(%ebx) ; /* store phys addr */ \ + movl $0,4(%ebx) ; /* upper 32 bits 0 */ \ + addl $8,%ebx ; /* next pte/pde */ \ + addl $NBPG,%eax ; /* next phys page */ \ loop 1b ; /* till finished */ #define fillkpt_nx \ pushl %ebp ; /* save */ \ -1: movl %eax,(%ebx) ; /* store phys addr */ \ +1: movl %eax,(%ebx) ; /* store phys addr */ \ movl RELOC((pg_nx + 4)), %ebp ; /* NX bit? */ \ - movl %ebp,4(%ebx) ; /* upper 32 bits */ \ - addl $8,%ebx ; /* next pte/pde */ \ - addl $NBPG,%eax ; /* next phys page */ \ + movl %ebp,4(%ebx) ; /* upper 32 bits */ \ + addl $8,%ebx ; /* next pte/pde */ \ + addl $NBPG,%eax ; /* next phys page */ \ loop 1b ; /* till finished */ \ popl %ebp @@ -394,7 +394,7 @@ cont: movl %edx,%ecx subl %eax,%ecx shrl $PGSHIFT,%ecx - orl $(PG_V|PG_KR),%eax + orl $(PG_V|PG_KR),%eax fillkpt /* Map .rodata RO, NX */ @@ -456,7 +456,7 @@ map_tables: fillkpt_nx /* Set up level 2 pages (RWX) */ - leal (PROC0_PTP2_OFF)(%esi),%ebx + leal (PROC0_PTP2_OFF)(%esi),%ebx leal (PROC0_PTP1_OFF)(%esi),%eax orl $(PG_V|PG_KW), %eax movl $(NKL2_KIMG_ENTRIES+1),%ecx @@ -465,14 +465,14 @@ map_tables: #if L2_SLOT_KERNBASE > 0 /* If needed, set up L2 entries for actual kernel mapping (RWX) */ leal (PROC0_PTP2_OFF+ L2_SLOT_KERNBASE*8)(%esi),%ebx - leal (PROC0_PTP1_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl $(NKL2_KIMG_ENTRIES+1),%ecx + leal (PROC0_PTP1_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $(NKL2_KIMG_ENTRIES+1),%ecx fillkpt #endif /* Set up level 3 pages (RWX) */ - leal (PROC0_PTP3_OFF)(%esi),%ebx + leal (PROC0_PTP3_OFF)(%esi),%ebx leal (PROC0_PTP2_OFF)(%esi),%eax orl $(PG_V|PG_KW), %eax movl $NKL3_KIMG_ENTRIES,%ecx @@ -481,21 +481,21 @@ map_tables: #if L3_SLOT_KERNBASE > 0 /* If needed, set up L3 entries for actual kernel mapping (RWX) */ leal (PROC0_PTP3_OFF+ L3_SLOT_KERNBASE*8)(%esi),%ebx - leal (PROC0_PTP2_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl $NKL3_KIMG_ENTRIES,%ecx + leal (PROC0_PTP2_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $NKL3_KIMG_ENTRIES,%ecx fillkpt #endif /* Set up top level entries for identity mapping (RWX) */ - leal (PROC0_PML4_OFF)(%esi),%ebx + leal (PROC0_PML4_OFF)(%esi),%ebx leal (PROC0_PTP3_OFF)(%esi),%eax orl $(PG_V|PG_KW), %eax movl $NKL4_KIMG_ENTRIES,%ecx fillkpt /* Set up top level entries for actual kernel mapping (RWX) */ - leal (PROC0_PML4_OFF + L4_SLOT_KERNBASE*8)(%esi),%ebx + leal (PROC0_PML4_OFF + L4_SLOT_KERNBASE*8)(%esi),%ebx leal (PROC0_PTP3_OFF)(%esi),%eax orl $(PG_V|PG_KW), %eax movl $NKL4_KIMG_ENTRIES,%ecx @@ -549,7 +549,7 @@ store_pte: pushl %ebp movl RELOC((pg_nx + 4)), %ebp movl %ebp, 4(%ebx) - popl %ebp + popl %ebp /* Save phys. addr of PTD, for libkvm. */ movl $RELOC(PTDpaddr),%ebp @@ -568,15 +568,15 @@ store_pte: * 2. Set Long Mode Enable in EFER. Also enable the * syscall extensions and NX (if available). */ - movl $MSR_EFER,%ecx + movl $MSR_EFER,%ecx rdmsr xorl %eax,%eax /* XXX */ orl $(EFER_LME|EFER_SCE),%eax movl RELOC((pg_nx + 4)), %ebx cmpl $0, %ebx - je write_efer + je write_efer orl $(EFER_NXE), %eax -write_efer: +write_efer: wrmsr /* @@ -589,7 +589,7 @@ write_efer: * 4. Enable paging and the rest of it. */ movl %cr0,%eax - orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP|CR0_WP),%eax + orl $CR0_DEFAULT,%eax movl %eax,%cr0 jmp compat compat: @@ -645,7 +645,7 @@ longmode_hi: #endif movq $NKL4_KIMG_ENTRIES,%rcx - leaq (PROC0_PML4_OFF)(%rsi),%rbx # old, phys address of PML4 + leaq (PROC0_PML4_OFF)(%rsi),%rbx # old, phys address of PML4 addq %r8, %rbx # new, virtual address of PML4 1: movq $0, (%rbx) addq $8,%rbx @@ -665,7 +665,7 @@ longmode_hi: movq %rax,_C_LABEL(proc0paddr)(%rip) leaq (USPACE-FRAMESIZE)(%rax),%rsp movq %rsi,PCB_CR3(%rax) # pcb->pcb_cr3 - xorq %rbp,%rbp # mark end of frames + xorq %rbp,%rbp # mark end of frames xorw %ax,%ax movw %ax,%gs @@ -675,10 +675,10 @@ longmode_hi: leaq TABLESIZE(%rsi),%rdi call _C_LABEL(init_x86_64) - call _C_LABEL(main) + call _C_LABEL(main) .section .codepatch,"a" - .align 8, 0xcc + .align 8, 0xcc .globl _C_LABEL(codepatch_begin) _C_LABEL(codepatch_begin): .previous diff --git a/sys/arch/amd64/amd64/mptramp.S b/sys/arch/amd64/amd64/mptramp.S index 051508311c5..d84a4571afc 100644 --- a/sys/arch/amd64/amd64/mptramp.S +++ b/sys/arch/amd64/amd64/mptramp.S @@ -1,4 +1,4 @@ -/* $OpenBSD: mptramp.S,v 1.15 2017/06/29 08:14:36 mlarkin Exp $ */ +/* $OpenBSD: mptramp.S,v 1.16 2018/05/22 15:55:30 guenther Exp $ */ /* $NetBSD: mptramp.S,v 1.1 2003/04/26 18:39:30 fvdl Exp $ */ /*- @@ -31,7 +31,7 @@ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ - + /* * Copyright (c) 1999 Stefan Grefen * @@ -45,11 +45,11 @@ * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: - * This product includes software developed by the NetBSD - * Foundation, Inc. and its contributors. - * 4. Neither the name of The NetBSD Foundation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE @@ -65,7 +65,7 @@ */ /* * MP startup ... - * the stuff from cpu_spinup_trampoline to mp_startup + * the stuff from cpu_spinup_trampoline to mp_startup * is copied into the first 640 KB * * We startup the processors now when the kthreads become ready. @@ -73,9 +73,9 @@ * 1) Get the processors running kernel-code from a special * page-table and stack page, do chip identification. * 2) halt the processors waiting for them to be enabled - * by a idle-thread + * by a idle-thread */ - + #include "assym.h" #include <machine/param.h> #include <machine/asm.h> @@ -88,15 +88,15 @@ #define addr32 #endif -#define _RELOC(x) ((x) - KERNBASE) -#define RELOC(x) _RELOC(_C_LABEL(x)) +#define _RELOC(x) ((x) - KERNBASE) +#define RELOC(x) _RELOC(_C_LABEL(x)) -#define _TRMP_LABEL(a) a = . - _C_LABEL(cpu_spinup_trampoline) + MP_TRAMPOLINE -#define _TRMP_OFFSET(a) a = . - _C_LABEL(cpu_spinup_trampoline) +#define _TRMP_LABEL(a) a = . - _C_LABEL(cpu_spinup_trampoline) + MP_TRAMPOLINE +#define _TRMP_OFFSET(a) a = . - _C_LABEL(cpu_spinup_trampoline) -#define _TRMP_DATA_LABEL(a) a = . - _C_LABEL(mp_tramp_data_start) + \ - MP_TRAMP_DATA -#define _TRMP_DATA_OFFSET(a) a = . - _C_LABEL(mp_tramp_data_start) +#define _TRMP_DATA_LABEL(a) a = . - _C_LABEL(mp_tramp_data_start) + \ + MP_TRAMP_DATA +#define _TRMP_DATA_OFFSET(a) a = . - _C_LABEL(mp_tramp_data_start) .global _C_LABEL(cpu_spinup_trampoline) .global _C_LABEL(cpu_spinup_trampoline_end) @@ -113,32 +113,32 @@ .code16 _C_LABEL(cpu_spinup_trampoline): cli - movw $(MP_TRAMP_DATA >> 4), %ax + movw $(MP_TRAMP_DATA >> 4), %ax movw %ax, %ds movw %cs, %ax - movw %ax, %es - movw %ax, %ss - addr32 lgdtl (.Lmptramp_gdt32_desc) # load flat descriptor table - movl %cr0, %eax # get cr0 - orl $0x1, %eax # enable protected mode - movl %eax, %cr0 # doit + movw %ax, %es + movw %ax, %ss + addr32 lgdtl (.Lmptramp_gdt32_desc) # load flat descriptor table + movl %cr0, %eax # get cr0 + orl $CR0_PE, %eax # enable protected mode + movl %eax, %cr0 # doit ljmpl $0x8, $.Lmp_startup _TRMP_LABEL(.Lmp_startup) .code32 - movl $0x10, %eax # data segment - movw %ax, %ds - movw %ax, %ss - movw %ax, %es - movw %ax, %fs - movw %ax, %gs + movl $0x10, %eax # data segment + movw %ax, %ds + movw %ax, %ss + movw %ax, %es + movw %ax, %fs + movw %ax, %gs - movl $(MP_TRAMP_DATA + NBPG - 16),%esp # bootstrap stack end, + movl $(MP_TRAMP_DATA + NBPG - 16),%esp # bootstrap stack end, # with scratch space.. /* First, reset the PSL. */ - pushl $PSL_MBO + pushl $PSL_MBO popfl movl %cr4,%eax @@ -166,20 +166,20 @@ _TRMP_LABEL(.Lmp_startup) movl $mp_pdirpa, %ecx movl (%ecx), %ecx - movl %ecx,%cr3 # load ptd addr into mmu + movl %ecx,%cr3 # load ptd addr into mmu - movl $GSEL(GDATA_SEL, SEL_KPL),%eax #switch to new segment - movl %eax,%ds - movl %eax,%es - movl %eax,%ss + movl $GSEL(GDATA_SEL, SEL_KPL),%eax #switch to new segment + movl %eax,%ds + movl %eax,%es + movl %eax,%ss movl $.Lmptramp_gdt64_desc,%eax lgdt (%eax) movl $.Lmptramp_jmp64,%eax - movl %cr0,%ecx # get control word - orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP|CR0_WP),%ecx + movl %cr0,%ecx # get control word + orl $CR0_DEFAULT,%ecx movl %ecx, %cr0 ljmp *(%eax) @@ -189,21 +189,21 @@ _TRMP_LABEL(.Lmptramp_longmode) movabsq $_C_LABEL(cpu_spinup_trampoline_end),%rax jmp *%rax - + _C_LABEL(cpu_spinup_trampoline_end): #end of code copied to MP_TRAMPOLINE - .globl _C_LABEL(x2apic_enabled) + .globl _C_LABEL(x2apic_enabled) movl x2apic_enabled,%eax testl %eax,%eax jz 1f - mov $MSR_APICBASE,%ecx - mov $0,%edx + mov $MSR_APICBASE,%ecx + mov $0,%edx rdmsr - orl $APICBASE_ENABLE_X2APIC,%eax + orl $APICBASE_ENABLE_X2APIC,%eax wrmsr - mov $MSR_X2APIC_ID,%ecx + mov $MSR_X2APIC_ID,%ecx rdmsr - andl $X2APIC_ID_MASK,%eax + andl $X2APIC_ID_MASK,%eax jmp 2f 1: movl _C_LABEL(local_apic)+LAPIC_ID,%eax @@ -230,16 +230,16 @@ _C_LABEL(cpu_spinup_trampoline_end): #end of code copied to MP_TRAMPOLINE /* Switch address space. */ movq PCB_CR3(%rsi),%rax movq %rax,%cr3 - movl PCB_CR0(%rsi),%eax - movq %rax,%cr0 + movl PCB_CR0(%rsi),%eax + movq %rax,%cr0 call _C_LABEL(cpu_hatch) /* NOTREACHED */ .section .rodata _C_LABEL(mp_tramp_data_start): _TRMP_DATA_LABEL(.Lmptramp_jmp64) - .long .Lmptramp_longmode - .word GSEL(GCODE_SEL, SEL_KPL) + .long .Lmptramp_longmode + .word GSEL(GCODE_SEL, SEL_KPL) .global mp_pdirpa _TRMP_DATA_LABEL(mp_pdirpa) diff --git a/sys/arch/amd64/include/specialreg.h b/sys/arch/amd64/include/specialreg.h index 2e8f7c37e8c..ebbd13c76dd 100644 --- a/sys/arch/amd64/include/specialreg.h +++ b/sys/arch/amd64/include/specialreg.h @@ -1,4 +1,4 @@ -/* $OpenBSD: specialreg.h,v 1.70 2018/04/18 06:50:35 pd Exp $ */ +/* $OpenBSD: specialreg.h,v 1.71 2018/05/22 15:55:30 guenther Exp $ */ /* $NetBSD: specialreg.h,v 1.1 2003/04/26 18:39:48 fvdl Exp $ */ /* $NetBSD: x86/specialreg.h,v 1.2 2003/04/25 21:54:30 fvdl Exp $ */ @@ -347,7 +347,7 @@ #define MSR_BIOS_SIGN 0x08b #define MSR_PERFCTR0 0x0c1 #define MSR_PERFCTR1 0x0c2 -#define MSR_FSB_FREQ 0x0cd /* Core Duo/Solo only */ +#define MSR_FSB_FREQ 0x0cd /* Core Duo/Solo only */ #define MSR_MTRRcap 0x0fe #define MTRRcap_FIXED 0x100 /* bit 8 - fixed MTRRs supported */ #define MTRRcap_WC 0x400 /* bit 10 - WC type supported */ @@ -548,7 +548,7 @@ * NCRx+0: A31-A24 of starting address * NCRx+1: A23-A16 of starting address * NCRx+2: A15-A12 of starting address | NCR_SIZE_xx. - * + * * The non-cacheable region's starting address must be aligned to the * size indicated by the NCR_SIZE_xx field. */ @@ -1185,7 +1185,7 @@ #define VMCS_GUEST_IA32_SYSENTER_EIP 0x6826 /* Natural-width host state fields */ -#define VMCS_HOST_IA32_CR0 0x6C00 +#define VMCS_HOST_IA32_CR0 0x6C00 #define VMCS_HOST_IA32_CR3 0x6C02 #define VMCS_HOST_IA32_CR4 0x6C04 #define VMCS_HOST_IA32_FS_BASE 0x6C06 @@ -1396,3 +1396,8 @@ #define PAT_WB 0x6UL #define PAT_UCMINUS 0x7UL + +/* + * Default cr0 flags. + */ +#define CR0_DEFAULT (CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP|CR0_WP) |