summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorPhilip Guenther <guenther@cvs.openbsd.org>2023-07-10 03:32:11 +0000
committerPhilip Guenther <guenther@cvs.openbsd.org>2023-07-10 03:32:11 +0000
commitc6d5e8a44b5ede8f032b74f1574bea606ea39171 (patch)
treec0604f2ae00fabc2fb22eeab4a38562b87f0e3f1 /sys
parentb02482b1585ad5a4720161152647bc14e2a46b8f (diff)
Enable Indirect Branch Tracking for amd64 userland, using XSAVES/XRSTORS
to save/restore the state and enabling it at exec-time (and for signal handling) if the PS_NOBTCFI flag isn't set. Note: this changes the format of the sc_fpstate data in the signal context to possibly be in compressed format: starting now we just guarantee that that state is in a format understood by the XRSTOR instruction of the system that is being executed on. At this time, passing sigreturn a corrupt sc_fpstate now results in the process exiting with no attempt to fix it up or send a T_PROTFLT trap. That may change. prodding by deraadt@ issues with my original signal handling design identified by kettenis@ lots of base and ports preparation for this by deraadt@ and the libressl and ports teams ok deraadt@ kettenis@
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/amd64/amd64/cpu.c65
-rw-r--r--sys/arch/amd64/amd64/locore.S54
-rw-r--r--sys/arch/amd64/amd64/machdep.c116
-rw-r--r--sys/arch/amd64/amd64/vmm_machdep.c13
-rw-r--r--sys/arch/amd64/include/codepatch.h3
-rw-r--r--sys/arch/amd64/include/fpu.h24
-rw-r--r--sys/arch/amd64/include/specialreg.h6
7 files changed, 199 insertions, 82 deletions
diff --git a/sys/arch/amd64/amd64/cpu.c b/sys/arch/amd64/amd64/cpu.c
index e2c4067d0e1..7bd566d4e31 100644
--- a/sys/arch/amd64/amd64/cpu.c
+++ b/sys/arch/amd64/amd64/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.169 2023/06/15 22:18:06 cheloha Exp $ */
+/* $OpenBSD: cpu.c,v 1.170 2023/07/10 03:32:10 guenther Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
@@ -162,6 +162,7 @@ int cpu_perf_edx = 0; /* cpuid(0xa).edx */
int cpu_apmi_edx = 0; /* cpuid(0x80000007).edx */
int ecpu_ecxfeature = 0; /* cpuid(0x80000001).ecx */
int cpu_meltdown = 0;
+int cpu_use_xsaves = 0;
void
replacesmap(void)
@@ -699,10 +700,9 @@ cpu_attach(struct device *parent, struct device *self, void *aux)
}
static void
-replacexsave(void)
+replacexsave(int xsave_ext)
{
- extern long _xrstor, _xsave, _xsaveopt;
- u_int32_t eax, ebx, ecx, edx;
+ extern long _xrstor, _xrstors, _xsave, _xsaves, _xsaveopt;
static int replacedone = 0;
int s;
@@ -710,12 +710,13 @@ replacexsave(void)
return;
replacedone = 1;
- /* find out whether xsaveopt is supported */
- CPUID_LEAF(0xd, 1, eax, ebx, ecx, edx);
s = splhigh();
+ codepatch_replace(CPTAG_XRSTORS,
+ (xsave_ext & XSAVE_XSAVES) ? &_xrstors : &_xrstor, 4);
codepatch_replace(CPTAG_XRSTOR, &_xrstor, 4);
codepatch_replace(CPTAG_XSAVE,
- (eax & XSAVE_XSAVEOPT) ? &_xsaveopt : &_xsave, 4);
+ (xsave_ext & XSAVE_XSAVES) ? &_xsaves :
+ (xsave_ext & XSAVE_XSAVEOPT) ? &_xsaveopt : &_xsave, 4);
splx(s);
}
@@ -764,20 +765,46 @@ cpu_init(struct cpu_info *ci)
KASSERT(ebx == fpu_save_len);
}
- replacexsave();
+ /* check for xsaves, xsaveopt, and supervisor features */
+ CPUID_LEAF(0xd, 1, eax, ebx, ecx, edx);
+ /* Disable XSAVES on AMD family 17h due to Erratum 1386 */
+ if (!strcmp(cpu_vendor, "AuthenticAMD") &&
+ ci->ci_family == 0x17) {
+ eax &= ~XSAVE_XSAVES;
+ }
+ if (eax & XSAVE_XSAVES) {
+#ifndef SMALL_KERNEL
+ if (ci->ci_feature_sefflags_edx & SEFF0EDX_IBT)
+ xsave_mask |= ecx & XFEATURE_CET_U;
+#endif
+ if (xsave_mask & XFEATURE_XSS_MASK) {
+ wrmsr(MSR_XSS, xsave_mask & XFEATURE_XSS_MASK);
+ CPUID_LEAF(0xd, 1, eax, ebx, ecx, edx);
+ KASSERT(ebx <= sizeof(struct savefpu));
+ }
+ if (CPU_IS_PRIMARY(ci))
+ cpu_use_xsaves = 1;
+ }
+
+ replacexsave(eax);
}
- /* Give proc0 a clean FPU save area */
- sfp = &proc0.p_addr->u_pcb.pcb_savefpu;
- memset(sfp, 0, fpu_save_len);
- sfp->fp_fxsave.fx_fcw = __INITIAL_NPXCW__;
- sfp->fp_fxsave.fx_mxcsr = __INITIAL_MXCSR__;
- fpureset();
- if (xsave_mask) {
- /* must not use xsaveopt here */
- xsave(sfp, xsave_mask);
- } else
- fxsave(sfp);
+ if (CPU_IS_PRIMARY(ci)) {
+ /* Clean our FPU save area */
+ sfp = fpu_cleandata;
+ memset(sfp, 0, fpu_save_len);
+ sfp->fp_fxsave.fx_fcw = __INITIAL_NPXCW__;
+ sfp->fp_fxsave.fx_mxcsr = __INITIAL_MXCSR__;
+ xrstor_user(sfp, xsave_mask);
+ if (cpu_use_xsaves || !xsave_mask)
+ fpusave(sfp);
+ else {
+ /* must not use xsaveopt here */
+ xsave(sfp, xsave_mask);
+ }
+ } else {
+ fpureset();
+ }
#if NVMM > 0
/* Re-enable VMM if needed */
diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S
index ec41559e6bf..529100d865a 100644
--- a/sys/arch/amd64/amd64/locore.S
+++ b/sys/arch/amd64/amd64/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.135 2023/07/05 18:23:10 anton Exp $ */
+/* $OpenBSD: locore.S,v 1.136 2023/07/10 03:32:10 guenther Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
@@ -342,7 +342,7 @@ switch_exited:
#endif
CODEPATCH_START
fxrstor64 (%rdi)
- CODEPATCH_END(CPTAG_XRSTOR)
+ CODEPATCH_END(CPTAG_XRSTORS)
andl $~CPUPF_USERXSTATE,CPUVAR(PFLAGS)
.Lxstate_reset:
@@ -680,7 +680,7 @@ KUTEXT_PAGE_END
/* untouched state so can't fault */
CODEPATCH_START
fxrstor64 (%rdi)
- CODEPATCH_END(CPTAG_XRSTOR)
+ CODEPATCH_END(CPTAG_XRSTORS)
#if PCB_SAVEFPU != 0
subq $PCB_SAVEFPU,%rdi
#endif
@@ -868,10 +868,14 @@ KTEXT_PAGE_END
#if PCB_SAVEFPU != 0
addq $PCB_SAVEFPU,%rdi
#endif
- movq xsave_mask(%rip),%rsi
- call xrstor_user
- testl %eax,%eax
- jnz .Lintr_xrstor_faulted
+ movq xsave_mask(%rip),%rdx
+ movl %edx,%eax
+ shrq $32, %rdx
+ CODEPATCH_START
+ fxrstor64 (%rdi)
+ CODEPATCH_END(CPTAG_XRSTORS)
+ //testl %eax,%eax
+ //jnz .Lintr_xrstor_faulted
.Lintr_restore_fsbase: /* CPU doesn't have curproc's FS.base */
orl $CPUPF_USERSEGS,CPUVAR(PFLAGS)
movq CPUVAR(CURPCB),%rdx
@@ -894,7 +898,7 @@ KTEXT_PAGE_END
#endif
CODEPATCH_START
fxrstor64 (%rdi)
- CODEPATCH_END(CPTAG_XRSTOR)
+ CODEPATCH_END(CPTAG_XRSTORS)
movq $T_PROTFLT,TF_TRAPNO(%rsp)
jmp recall_trap
@@ -945,7 +949,6 @@ NENTRY(intr_fast_exit)
testq $PSL_I,%rdx
jnz .Lintr_exit_not_blocked
#endif /* DIAGNOSTIC */
- call pku_xonly /* XXX guenther disapproves, but foo3 locks */
movq TF_RDI(%rsp),%rdi
movq TF_RSI(%rsp),%rsi
movq TF_R8(%rsp),%r8
@@ -992,8 +995,14 @@ END(intr_fast_exit)
/*
* FPU/"extended CPU state" handling
+ * void xrstor_kern(sfp, mask)
+ * using first of xrstors/xrstor/fxrstor, load given state
+ * which is assumed to be trusted: i.e., unaltered from
+ * xsaves/xsaveopt/xsave/fxsave by kernel
* int xrstor_user(sfp, mask)
- * load given state, returns 0/1 if okay/it trapped
+ * using first of xrstor/fxrstor, load given state which might
+ * not be trustable: #GP faults will be caught; returns 0/1 if
+ * okay/it trapped.
* void fpusave(sfp)
* save current state, but retain it in the FPU
* void fpusavereset(sfp)
@@ -1002,6 +1011,19 @@ END(intr_fast_exit)
* load specified %xcr# register, returns 0/1 if okay/it trapped
*/
+ENTRY(xrstor_kern)
+ RETGUARD_SETUP(xrstor_kern, r11)
+ movq %rsi, %rdx
+ movl %esi, %eax
+ shrq $32, %rdx
+ CODEPATCH_START
+ fxrstor64 (%rdi)
+ CODEPATCH_END(CPTAG_XRSTORS)
+ RETGUARD_CHECK(xrstor_kern, r11)
+ ret
+ lfence
+END(xrstor_kern)
+
ENTRY(xrstor_user)
RETGUARD_SETUP(xrstor_user, r11)
movq %rsi, %rdx
@@ -1050,7 +1072,7 @@ ENTRY(fpusavereset)
#endif
CODEPATCH_START
fxrstor64 (%rdi)
- CODEPATCH_END(CPTAG_XRSTOR)
+ CODEPATCH_END(CPTAG_XRSTORS)
RETGUARD_CHECK(fpusavereset, r11)
ret
lfence
@@ -1081,9 +1103,17 @@ END(xsetbv_user)
_xrstor:
xrstor64 (%rdi)
+ .globl _xrstors
+_xrstors:
+ xrstors64 (%rdi)
+
.globl _xsave
_xsave:
- xsave64 (%rdi)
+ xsave64 (%rdi)
+
+ .globl _xsaves
+_xsaves:
+ xsaves64 (%rdi)
.globl _xsaveopt
_xsaveopt:
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c
index cec6ee378ed..ddbeedb12b5 100644
--- a/sys/arch/amd64/amd64/machdep.c
+++ b/sys/arch/amd64/amd64/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.284 2022/11/29 21:41:39 guenther Exp $ */
+/* $OpenBSD: machdep.c,v 1.285 2023/07/10 03:32:10 guenther Exp $ */
/* $NetBSD: machdep.c,v 1.3 2003/05/07 22:58:18 fvdl Exp $ */
/*-
@@ -564,6 +564,63 @@ cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
/* NOTREACHED */
}
+static inline void
+maybe_enable_user_cet(struct proc *p)
+{
+#ifndef SMALL_KERNEL
+ /* Enable indirect-branch tracking if present and not disabled */
+ if ((xsave_mask & XFEATURE_CET_U) &&
+ (p->p_p->ps_flags & PS_NOBTCFI) == 0) {
+ uint64_t msr = rdmsr(MSR_U_CET);
+ wrmsr(MSR_U_CET, msr | MSR_CET_ENDBR_EN | MSR_CET_NO_TRACK_EN);
+ }
+#endif
+}
+
+static inline void
+initialize_thread_xstate(struct proc *p)
+{
+ if (cpu_use_xsaves) {
+ xrstors(fpu_cleandata, xsave_mask);
+ maybe_enable_user_cet(p);
+ } else {
+ /* Reset FPU state in PCB */
+ memcpy(&p->p_addr->u_pcb.pcb_savefpu, fpu_cleandata,
+ fpu_save_len);
+
+ if (curcpu()->ci_pflags & CPUPF_USERXSTATE) {
+ /* state in CPU is obsolete; reset it */
+ fpureset();
+ }
+ }
+
+ /* The reset state _is_ the userspace state for this thread now */
+ curcpu()->ci_pflags |= CPUPF_USERXSTATE;
+}
+
+/*
+ * Copy out the FPU state, massaging it to be usable from userspace
+ * and acceptable to xrstor_user()
+ */
+static inline int
+copyoutfpu(struct savefpu *sfp, char *sp, size_t len)
+{
+ uint64_t bvs[2];
+
+ if (copyout(sfp, sp, len))
+ return 1;
+ if (len > offsetof(struct savefpu, fp_xstate.xstate_bv)) {
+ sp += offsetof(struct savefpu, fp_xstate.xstate_bv);
+ len -= offsetof(struct savefpu, fp_xstate.xstate_bv);
+ bvs[0] = sfp->fp_xstate.xstate_bv & XFEATURE_XCR0_MASK;
+ bvs[1] = sfp->fp_xstate.xstate_xcomp_bv &
+ (XFEATURE_XCR0_MASK | XFEATURE_COMPRESSED);
+ if (copyout(bvs, sp, min(len, sizeof bvs)))
+ return 1;
+ }
+ return 0;
+}
+
/*
* Send an interrupt to process.
*
@@ -613,23 +670,22 @@ sendsig(sig_t catcher, int sig, sigset_t mask, const siginfo_t *ksip,
else
sp = tf->tf_rsp - 128;
- sp &= ~15ULL; /* just in case */
- sss = (sizeof(ksc) + 15) & ~15;
+ sp -= fpu_save_len;
+ if (cpu_use_xsaves)
+ sp &= ~63ULL; /* just in case */
+ else
+ sp &= ~15ULL; /* just in case */
/* Save FPU state to PCB if necessary, then copy it out */
- if (curcpu()->ci_pflags & CPUPF_USERXSTATE) {
- curcpu()->ci_pflags &= ~CPUPF_USERXSTATE;
- fpusavereset(&p->p_addr->u_pcb.pcb_savefpu);
- }
- sp -= fpu_save_len;
- ksc.sc_fpstate = (struct fxsave64 *)sp;
- if (copyout(sfp, (void *)sp, fpu_save_len))
+ if (curcpu()->ci_pflags & CPUPF_USERXSTATE)
+ fpusave(&p->p_addr->u_pcb.pcb_savefpu);
+ if (copyoutfpu(sfp, (void *)sp, fpu_save_len))
return 1;
- /* Now reset the FPU state in PCB */
- memcpy(&p->p_addr->u_pcb.pcb_savefpu,
- &proc0.p_addr->u_pcb.pcb_savefpu, fpu_save_len);
+ initialize_thread_xstate(p);
+ ksc.sc_fpstate = (struct fxsave64 *)sp;
+ sss = (sizeof(ksc) + 15) & ~15;
sip = 0;
if (info) {
sip = sp - ((sizeof(*ksip) + 15) & ~15);
@@ -658,9 +714,6 @@ sendsig(sig_t catcher, int sig, sigset_t mask, const siginfo_t *ksip,
tf->tf_rsp = scp;
tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
- /* The reset state _is_ the userspace state for this thread now */
- curcpu()->ci_pflags |= CPUPF_USERXSTATE;
-
return 0;
}
@@ -682,6 +735,7 @@ sys_sigreturn(struct proc *p, void *v, register_t *retval)
} */ *uap = v;
struct sigcontext ksc, *scp = SCARG(uap, sigcntxp);
struct trapframe *tf = p->p_md.md_regs;
+ struct savefpu *sfp = &p->p_addr->u_pcb.pcb_savefpu;
int error;
if (PROC_PC(p) != p->p_p->ps_sigcoderet) {
@@ -706,7 +760,7 @@ sys_sigreturn(struct proc *p, void *v, register_t *retval)
!USERMODE(ksc.sc_cs, ksc.sc_eflags))
return (EINVAL);
- /* Current state is obsolete; toss it and force a reload */
+ /* Current FPU state is obsolete; toss it and force a reload */
if (curcpu()->ci_pflags & CPUPF_USERXSTATE) {
curcpu()->ci_pflags &= ~CPUPF_USERXSTATE;
fpureset();
@@ -714,15 +768,17 @@ sys_sigreturn(struct proc *p, void *v, register_t *retval)
/* Copy in the FPU state to restore */
if (__predict_true(ksc.sc_fpstate != NULL)) {
- struct fxsave64 *fx = &p->p_addr->u_pcb.pcb_savefpu.fp_fxsave;
-
- if ((error = copyin(ksc.sc_fpstate, fx, fpu_save_len)))
- return (error);
- fx->fx_mxcsr &= fpu_mxcsr_mask;
+ if ((error = copyin(ksc.sc_fpstate, sfp, fpu_save_len)))
+ return error;
+ if (xrstor_user(sfp, xsave_mask)) {
+ memcpy(sfp, fpu_cleandata, fpu_save_len);
+ return EINVAL;
+ }
+ maybe_enable_user_cet(p);
+ curcpu()->ci_pflags |= CPUPF_USERXSTATE;
} else {
/* shouldn't happen, but handle it */
- memcpy(&p->p_addr->u_pcb.pcb_savefpu,
- &proc0.p_addr->u_pcb.pcb_savefpu, fpu_save_len);
+ initialize_thread_xstate(p);
}
tf->tf_rdi = ksc.sc_rdi;
@@ -1146,17 +1202,7 @@ setregs(struct proc *p, struct exec_package *pack, u_long stack,
{
struct trapframe *tf;
- /* Reset FPU state in PCB */
- memcpy(&p->p_addr->u_pcb.pcb_savefpu,
- &proc0.p_addr->u_pcb.pcb_savefpu, fpu_save_len);
-
- if (curcpu()->ci_pflags & CPUPF_USERXSTATE) {
- /* state in CPU is obsolete; reset it */
- fpureset();
- } else {
- /* the reset state _is_ the userspace state now */
- curcpu()->ci_pflags |= CPUPF_USERXSTATE;
- }
+ initialize_thread_xstate(p);
/* To reset all registers we have to return via iretq */
p->p_md.md_flags |= MDP_IRET;
diff --git a/sys/arch/amd64/amd64/vmm_machdep.c b/sys/arch/amd64/amd64/vmm_machdep.c
index dc30e35a8be..24a376a8f3b 100644
--- a/sys/arch/amd64/amd64/vmm_machdep.c
+++ b/sys/arch/amd64/amd64/vmm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmm_machdep.c,v 1.3 2023/04/26 15:40:51 mlarkin Exp $ */
+/* $OpenBSD: vmm_machdep.c,v 1.4 2023/07/10 03:32:10 guenther Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@@ -3733,13 +3733,8 @@ vmm_fpurestore(struct vcpu *vcpu)
fpusavereset(&curproc->p_addr->u_pcb.pcb_savefpu);
}
- if (vcpu->vc_fpuinited) {
- if (xrstor_user(&vcpu->vc_g_fpu, xsave_mask)) {
- DPRINTF("%s: guest attempted to set invalid %s\n",
- __func__, "xsave/xrstor state");
- return EINVAL;
- }
- }
+ if (vcpu->vc_fpuinited)
+ xrstor_kern(&vcpu->vc_g_fpu, xsave_mask);
if (xsave_mask) {
/* Restore guest %xcr0 */
@@ -3769,7 +3764,7 @@ vmm_fpusave(struct vcpu *vcpu)
vcpu->vc_gueststate.vg_xcr0 = xgetbv(0);
/* Restore host %xcr0 */
- xsetbv(0, xsave_mask);
+ xsetbv(0, xsave_mask & XFEATURE_XCR0_MASK);
}
/*
diff --git a/sys/arch/amd64/include/codepatch.h b/sys/arch/amd64/include/codepatch.h
index a4d8a60959b..50618bddad0 100644
--- a/sys/arch/amd64/include/codepatch.h
+++ b/sys/arch/amd64/include/codepatch.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: codepatch.h,v 1.14 2020/03/11 07:27:08 guenther Exp $ */
+/* $OpenBSD: codepatch.h,v 1.15 2023/07/10 03:32:10 guenther Exp $ */
/*
* Copyright (c) 2014-2015 Stefan Fritsch <sf@sfritsch.de>
*
@@ -65,6 +65,7 @@ void codepatch_disable(void);
#define CPTAG_MDS_VMM 10
#define CPTAG_FENCE_SWAPGS_MIS_TAKEN 11
#define CPTAG_FENCE_NO_SAFE_SMAP 12
+#define CPTAG_XRSTORS 13
/*
* stac/clac SMAP instructions have lfence like semantics. Let's
diff --git a/sys/arch/amd64/include/fpu.h b/sys/arch/amd64/include/fpu.h
index 6f05c9e6ac9..2a024f2dce4 100644
--- a/sys/arch/amd64/include/fpu.h
+++ b/sys/arch/amd64/include/fpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: fpu.h,v 1.18 2023/05/22 00:39:57 guenther Exp $ */
+/* $OpenBSD: fpu.h,v 1.19 2023/07/10 03:32:10 guenther Exp $ */
/* $NetBSD: fpu.h,v 1.1 2003/04/26 18:39:40 fvdl Exp $ */
#ifndef _MACHINE_FPU_H_
@@ -40,6 +40,7 @@ struct savefpu {
struct fxsave64 fp_fxsave; /* see above */
struct xstate_hdr fp_xstate;
u_int64_t fp_ymm[16][2];
+ u_int64_t fp_cet_u[2];
};
/*
@@ -60,6 +61,7 @@ struct cpu_info;
extern size_t fpu_save_len;
extern uint32_t fpu_mxcsr_mask;
extern uint64_t xsave_mask;
+extern int cpu_use_xsaves;
void fpuinit(struct cpu_info *);
int fputrap(int _type);
@@ -68,9 +70,13 @@ void fpusavereset(struct savefpu *);
void fpu_kernel_enter(void);
void fpu_kernel_exit(void);
+/* pointer to fxsave/xsave/xsaves data with everything reset */
+#define fpu_cleandata (&proc0.p_addr->u_pcb.pcb_savefpu)
+
int xrstor_user(struct savefpu *_addr, uint64_t _mask);
+void xrstor_kern(struct savefpu *_addr, uint64_t _mask);
#define fpureset() \
- xrstor_user(&proc0.p_addr->u_pcb.pcb_savefpu, xsave_mask)
+ xrstor_kern(fpu_cleandata, xsave_mask)
int xsetbv_user(uint32_t _reg, uint64_t _mask);
#define fninit() __asm("fninit")
@@ -87,9 +93,17 @@ xsave(struct savefpu *addr, uint64_t mask)
lo = mask;
hi = mask >> 32;
- /* should be xsave64, but where we use this it doesn't matter */
- __asm volatile("xsave %0" : "=m" (*addr) : "a" (lo), "d" (hi) :
- "memory");
+ __asm volatile("xsave64 %0" : "+m" (*addr) : "a" (lo), "d" (hi));
+}
+
+static inline void
+xrstors(const struct savefpu *addr, uint64_t mask)
+{
+ uint32_t lo, hi;
+
+ lo = mask;
+ hi = mask >> 32;
+ __asm volatile("xrstors64 %0" : : "m" (*addr), "a" (lo), "d" (hi));
}
#endif
diff --git a/sys/arch/amd64/include/specialreg.h b/sys/arch/amd64/include/specialreg.h
index e24c5322d08..ca1eb336d9f 100644
--- a/sys/arch/amd64/include/specialreg.h
+++ b/sys/arch/amd64/include/specialreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: specialreg.h,v 1.102 2023/04/22 18:27:28 guenther Exp $ */
+/* $OpenBSD: specialreg.h,v 1.103 2023/07/10 03:32:10 guenther Exp $ */
/* $NetBSD: specialreg.h,v 1.1 2003/04/26 18:39:48 fvdl Exp $ */
/* $NetBSD: x86/specialreg.h,v 1.2 2003/04/25 21:54:30 fvdl Exp $ */
@@ -118,6 +118,9 @@
#define XFEATURE_TILEDATA 0x00040000 /* AMX state */
#define XFEATURE_AMX (XFEATURE_TILEDATA | XFEATURE_TILEDATA)
+/* valid only in xcomp_bv field: */
+#define XFEATURE_COMPRESSED (1ULL << 63) /* compressed format */
+
/* which bits are for XCR0 and which for the XSS MSR? */
#define XFEATURE_XCR0_MASK \
(XFEATURE_X87 | XFEATURE_SSE | XFEATURE_AVX | XFEATURE_MPX | \
@@ -525,6 +528,7 @@
#define MSR_MC3_MISC 0x413
#define MSR_U_CET 0x6a0
#define MSR_CET_ENDBR_EN (1 << 2)
+#define MSR_CET_NO_TRACK_EN (1 << 4)
#define MSR_S_CET 0x6a2
#define MSR_PKRS 0x6e1
#define MSR_XSS 0xda0