summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPhilip Guenthe <guenther@cvs.openbsd.org>2010-09-28 03:53:15 +0000
committerPhilip Guenthe <guenther@cvs.openbsd.org>2010-09-28 03:53:15 +0000
commit5748e6aa24962647ed31865b3976f89bb472de18 (patch)
tree492d74dda1abe387db92c060e2ea9c113ea35d53
parentb93e0adfcd7e935651416edab680477908e15a83 (diff)
Correct the handling of GS.base when iretq faults: the fault happens
with CPL == 0 but the user's GS.base, so the normal INTRENTRY handling won't work. Contrawise, the asm that trap() redirects us to when that happens (resume_iret) sees a trapframe showing CPL==3 but it's run with the kernel's GS.base, so INTRENTRY won't work there either. asm style fixes drahn@ and mikeb@ ok kettenis@
-rw-r--r--sys/arch/amd64/amd64/locore.S28
-rw-r--r--sys/arch/amd64/amd64/trap.c38
-rw-r--r--sys/arch/amd64/amd64/vector.S59
-rw-r--r--sys/arch/amd64/include/frameasm.h14
4 files changed, 85 insertions, 54 deletions
diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S
index db5ae42f520..8d4459f86c9 100644
--- a/sys/arch/amd64/amd64/locore.S
+++ b/sys/arch/amd64/amd64/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.39 2009/06/09 02:56:38 krw Exp $ */
+/* $OpenBSD: locore.S,v 1.40 2010/09/28 03:53:14 guenther Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
@@ -1049,12 +1049,28 @@ _C_LABEL(osyscall_return):
jmp 2b
iret_return:
-#ifndef DIAGNOSTIC
-1: INTRFASTEXIT
-#else /* DIAGNOSTIC */
-1: cmpl $IPL_NONE,CPUVAR(ILEVEL)
+1:
+#ifdef DIAGNOSTIC
+ cmpl $IPL_NONE,CPUVAR(ILEVEL)
jne 3f
- INTRFASTEXIT
+#endif /* DIAGNOSTIC */
+ .globl intr_fast_exit
+intr_fast_exit:
+ INTR_RESTORE_GPRS
+ testq $SEL_UPL,56(%rsp)
+ je 5f
+ cli
+ swapgs
+ movw 0(%rsp),%gs
+ movw 8(%rsp),%fs
+ movw 16(%rsp),%es
+ movw 24(%rsp),%ds
+5: addq $48,%rsp
+ .globl _C_LABEL(doreti_iret)
+_C_LABEL(doreti_iret):
+ iretq
+
+#ifdef DIAGNOSTIC
3: sti
movabsq $4f, %rdi
xorq %rax,%rax
diff --git a/sys/arch/amd64/amd64/trap.c b/sys/arch/amd64/amd64/trap.c
index 1dccaf218cf..8f358769fc8 100644
--- a/sys/arch/amd64/amd64/trap.c
+++ b/sys/arch/amd64/amd64/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.18 2010/05/06 21:33:51 nicm Exp $ */
+/* $OpenBSD: trap.c,v 1.19 2010/09/28 03:53:14 guenther Exp $ */
/* $NetBSD: trap.c,v 1.2 2003/05/04 23:51:56 fvdl Exp $ */
/*-
@@ -150,12 +150,10 @@ trap(struct trapframe *frame)
struct proc *p = curproc;
int type = (int)frame->tf_trapno;
struct pcb *pcb;
- extern char resume_iret[], IDTVEC(oosyscall)[];
+ extern char doreti_iret[], resume_iret[], IDTVEC(oosyscall)[];
#if 0
extern char resume_pop_ds[], resume_pop_es[];
#endif
- struct trapframe *vframe;
- void *resume;
caddr_t onfault;
int error;
uint64_t cr2;
@@ -235,34 +233,14 @@ copyfault:
/*
* Check for failure during return to user mode.
- *
- * XXXfvdl check for rex prefix?
- *
- * We do this by looking at the instruction we faulted on. The
- * specific instructions we recognize only happen when
- * returning from a trap, syscall, or interrupt.
- *
- * XXX
- * The heuristic used here will currently fail for the case of
- * one of the 2 pop instructions faulting when returning from a
- * a fast interrupt. This should not be possible. It can be
- * fixed by rearranging the trap frame so that the stack format
- * at this point is the same as on exit from a `slow'
- * interrupt.
+ * We do this by looking at the address of the
+ * instruction that faulted.
*/
- switch (*(u_char *)frame->tf_rip) {
- case 0xcf: /* iret */
- vframe = (void *)((u_int64_t)&frame->tf_rsp - 44);
- resume = resume_iret;
- break;
- default:
- goto we_re_toast;
+ if (frame->tf_rip == (u_int64_t)doreti_iret) {
+ frame->tf_rip = (u_int64_t)resume_iret;
+ return;
}
- if (KERNELMODE(vframe->tf_cs, vframe->tf_rflags))
- goto we_re_toast;
-
- frame->tf_rip = (u_int64_t)resume;
- return;
+ goto we_re_toast;
case T_PROTFLT|T_USER: /* protection fault */
case T_TSSFLT|T_USER:
diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S
index 2f90fb7545e..be7ebb39deb 100644
--- a/sys/arch/amd64/amd64/vector.S
+++ b/sys/arch/amd64/amd64/vector.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: vector.S,v 1.25 2010/04/05 19:04:32 kettenis Exp $ */
+/* $OpenBSD: vector.S,v 1.26 2010/09/28 03:53:14 guenther Exp $ */
/* $NetBSD: vector.S,v 1.5 2004/06/28 09:13:11 fvdl Exp $ */
/*
@@ -137,8 +137,46 @@ IDTVEC(trap0b)
TRAP(T_SEGNPFLT)
IDTVEC(trap0c)
TRAP(T_STKFLT)
+
+ /*
+ * If iretq faults, we'll get a trap at doreti_iret with CPL==0 but
+ * the user's GS.base, which INTRENTRY wouldn't handle correctly
+ * (it would skip the swapgs), so locally expand both it and
+ * INTR_SAVE_GPRS, but add an extra test comparing %rip to doreti_iret
+ * so that we can do the necessary swapgs in that case.
+ */
IDTVEC(trap0d)
- TRAP(T_PROTFLT)
+ subq $TF_ERR,%rsp
+ movl $T_PROTFLT,TF_TRAPNO(%rsp)
+ movq %rdi,TF_RDI(%rsp)
+ leaq _C_LABEL(doreti_iret)(%rip),%rdi
+ cmpq %rdi,TF_RIP(%rsp)
+ je 1f
+ testq $SEL_UPL,TF_CS(%rsp)
+ jz 2f
+1: swapgs
+2: movw %gs,TF_GS(%rsp)
+ movw %fs,TF_FS(%rsp)
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+ movq %r15,TF_R15(%rsp)
+ movq %r14,TF_R14(%rsp)
+ movq %r13,TF_R13(%rsp)
+ movq %r12,TF_R12(%rsp)
+ movq %r11,TF_R11(%rsp)
+ movq %r10,TF_R10(%rsp)
+ movq %r9,TF_R9(%rsp)
+ movq %r8,TF_R8(%rsp)
+ /*movq %rdi,TF_RDI(%rsp) done above */
+ movq %rsi,TF_RSI(%rsp)
+ movq %rbp,TF_RBP(%rsp)
+ movq %rbx,TF_RBX(%rsp)
+ movq %rdx,TF_RDX(%rsp)
+ movq %rcx,TF_RCX(%rsp)
+ movq %rax,TF_RAX(%rsp)
+ sti
+ jmp calltrap
+
IDTVEC(trap0e)
TRAP(T_PAGEFLT)
IDTVEC(intrspurious)
@@ -187,12 +225,21 @@ IDTVEC(exceptions)
/*
* If an error is detected during trap, syscall, or interrupt exit, trap() will
- * change %eip to point to one of these labels. We clean up the stack, if
- * necessary, and resume as if we were handling a general protection fault.
- * This will cause the process to get a SIGBUS.
+ * change %rip to point to this label. At that point, we'll be running with
+ * the kernel GS.base, but the trap frame will be from CPL==3, so we can't
+ * go through INTRENTRY as it would do the swapgs that we don't want/need.
+ * So, locally expand INTRENTRY but without the swapgs: manually
+ * clean up the stack and resume as if we were handling a general
+ * protection fault. This will cause the process to get a SIGBUS.
*/
NENTRY(resume_iret)
- ZTRAP(T_PROTFLT)
+ pushq $0
+ pushq $T_PROTFLT
+ subq $32,%rsp
+ INTR_SAVE_GPRS
+ sti
+ jmp calltrap
+
/*
* All traps go through here. Call the generic trap handler, and
* check for ASTs afterwards.
diff --git a/sys/arch/amd64/include/frameasm.h b/sys/arch/amd64/include/frameasm.h
index c23b0e65a4d..5e1fa6145e1 100644
--- a/sys/arch/amd64/include/frameasm.h
+++ b/sys/arch/amd64/include/frameasm.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: frameasm.h,v 1.1 2004/01/28 01:39:39 mickey Exp $ */
+/* $OpenBSD: frameasm.h,v 1.2 2010/09/28 03:53:14 guenther Exp $ */
/* $NetBSD: frameasm.h,v 1.1 2003/04/26 18:39:40 fvdl Exp $ */
#ifndef _AMD64_MACHINE_FRAMEASM_H
@@ -60,17 +60,7 @@
98: INTR_SAVE_GPRS
#define INTRFASTEXIT \
- INTR_RESTORE_GPRS ; \
- testq $SEL_UPL,56(%rsp) ; \
- je 99f ; \
- cli ; \
- swapgs ; \
- movw 0(%rsp),%gs ; \
- movw 8(%rsp),%fs ; \
- movw 16(%rsp),%es ; \
- movw 24(%rsp),%ds ; \
-99: addq $48,%rsp ; \
- iretq
+ jmp intr_fast_exit
#define INTR_RECURSE_HWFRAME \
movq %rsp,%r10 ; \