summaryrefslogtreecommitdiff
path: root/sys/arch/amd64
diff options
context:
space:
mode:
authorMike Larkin <mlarkin@cvs.openbsd.org>2012-10-19 16:38:31 +0000
committerMike Larkin <mlarkin@cvs.openbsd.org>2012-10-19 16:38:31 +0000
commit14846c424b752562086c94b0f700a45e6bb34454 (patch)
treeb5aea7538edd9440f706768b400aef33799ccb5f /sys/arch/amd64
parent533ab861931418af72a197d0f54b609fd572661f (diff)
amd64 hibernate "unpack-time" mmu/pmap code and asm goo. Work in
progress.
Diffstat (limited to 'sys/arch/amd64')
-rw-r--r--sys/arch/amd64/amd64/acpi_wakecode.S140
-rw-r--r--sys/arch/amd64/amd64/hibernate_machdep.c192
-rw-r--r--sys/arch/amd64/amd64/machdep.c11
-rw-r--r--sys/arch/amd64/include/hibernate_var.h75
4 files changed, 362 insertions, 56 deletions
diff --git a/sys/arch/amd64/amd64/acpi_wakecode.S b/sys/arch/amd64/amd64/acpi_wakecode.S
index d81619850f7..4b9bdfc1d92 100644
--- a/sys/arch/amd64/amd64/acpi_wakecode.S
+++ b/sys/arch/amd64/amd64/acpi_wakecode.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: acpi_wakecode.S,v 1.12 2010/11/18 21:15:13 miod Exp $ */
+/* $OpenBSD: acpi_wakecode.S,v 1.13 2012/10/19 16:38:30 mlarkin Exp $ */
/*
* Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
* Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
@@ -45,6 +45,9 @@
#include "assym.h"
#include <machine/asm.h>
+#ifdef HIBERNATE
+#include <machine/hibernate_var.h>
+#endif /* HIBERNATE */
#include <machine/specialreg.h>
#include <machine/param.h>
#include <machine/segments.h>
@@ -84,6 +87,7 @@
.global _C_LABEL(acpi_pdirpa)
.global _C_LABEL(do_real_mode_post)
_C_LABEL(acpi_real_mode_resume):
+_ACPI_TRMP_OFFSET(acpi_s3_vector_real)
nop
cli
cld
@@ -374,6 +378,89 @@ _C_LABEL(acpi_long_mode_resume):
xorq %rax, %rax
jmp *acpi_saved_ret
+#ifdef HIBERNATE
+ /*
+ * hibernate_resume_machdep drops to real mode and
+ * restarts the OS using the saved S3 resume vector
+ */
+ .code64
+NENTRY(hibernate_resume_machdep)
+ cli
+ /* Jump to the identity mapped version of ourself */
+ mov $hibernate_resume_vector_2, %rax
+ jmp *%rax
+_ACPI_TRMP_LABEL(hibernate_resume_vector_2)
+
+ /* Exit long mode */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ andl $(~EFER_LME), %eax
+ wrmsr
+
+ .code32
+ /* Get out of 32 bit CS */
+ lgdt gdt_16
+ ljmp $0x8, $hibernate_resume_vector_3
+
+_ACPI_TRMP_LABEL(hibernate_resume_vector_3)
+ .code16
+
+ movl %cr0, %eax
+ /* Disable CR0.PG - no paging */
+ andl $(~CR0_PG), %eax
+ /* Disable CR0.PE - real mode */
+ andl $(~CR0_PE), %eax
+ movl %eax, %cr0
+
+ /* Flush TLB */
+ xorl %eax, %eax
+ movl %eax, %cr3
+
+ /* Set up real mode segment selectors */
+ movw $0x0400, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movl $0x0FFE, %esp
+ lidtl clean_idt
+
+ /* Jump to the S3 resume vector */
+ ljmp $0x0400, $acpi_s3_vector_real
+
+ .code32
+ /* Switch to hibernate resume pagetable */
+NENTRY(hibernate_activate_resume_pt_machdep)
+ /* Enable large pages */
+ movl %cr4, %eax
+ orl $(CR4_PSE), %eax
+ movl %eax, %cr4
+
+ movl $HIBERNATE_PML4_PAGE, %eax
+ movl %eax, %cr3
+ jmp 1f
+
+1: nop
+ ret
+
+ /*
+ * Switch to the private resume-time hibernate stack
+ */
+NENTRY(hibernate_switch_stack_machdep)
+ movl (%esp), %eax
+ movl %eax, HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET
+ movl $(HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET), %eax
+ movl %eax, %esp
+
+ /* On our own stack from here onward */
+ ret
+
+NENTRY(hibernate_flush)
+ wbinvd
+ invlpg HIBERNATE_INFLATE_PAGE
+ invlpg HIBERNATE_COPY_PAGE
+ ret
+#endif /* HIBERNATE */
.align 8
_ACPI_TRMP_OFFSET(tmp_gdt)
.word tmp_gdt_end - tmp_gdtable
@@ -425,6 +512,56 @@ _ACPI_TRMP_OFFSET(clean_idt)
.long 0
.word 0
+ /*
+ * gdt_16 is the gdt used when returning to real mode for bios
+ * reads/writes (sets up a 16 bit segment)
+ */
+ .align 8
+_ACPI_TRMP_LABEL(gdt_16)
+ .word gdt_16_end - gdt_16_table
+ .long gdt_16_table
+
+ .align 8
+_ACPI_TRMP_LABEL(gdt_16_table)
+ /*
+ * null
+ */
+ .word 0, 0
+ .byte 0, 0, 0, 0
+ /*
+ * Code
+ * Limit: 0xffffffff
+ * Base: 0x00000000
+ * Descriptor Type: Code
+ * Segment Type: CRA
+ * Present: True
+ * Priv: 0
+ * AVL: False
+ * 64-bit: False
+ * 32-bit: False
+ *
+ */
+ .word 0xffff, 0
+ .byte 0, 0x9f, 0x8f, 0
+
+ /*
+ * Data
+ * Limit: 0xffffffff
+ * Base: 0x00000000
+ * Descriptor Type:
+ * Segment Type: W
+ * Present: True
+ * Priv: 0
+ * AVL: False
+ * 64-bit: False
+ * 32-bit: False
+ *
+ */
+ .word 0xffff, 0
+ .byte 0, 0x93, 0x8f, 0
+
+_ACPI_TRMP_LABEL(gdt_16_end)
+
.align 8
_ACPI_TRMP_LABEL(tmp_gdt64)
.word tmp_gdt64_end - tmp_gdtable64
@@ -539,6 +676,7 @@ _C_LABEL(acpi_resume_end):
* for use during the ACPI suspend/resume process.
*/
+ .code64
NENTRY(acpi_savecpu)
movq (%rsp), %rax
movq %rax, acpi_saved_ret
diff --git a/sys/arch/amd64/amd64/hibernate_machdep.c b/sys/arch/amd64/amd64/hibernate_machdep.c
index c793aca1287..fd846c24324 100644
--- a/sys/arch/amd64/amd64/hibernate_machdep.c
+++ b/sys/arch/amd64/amd64/hibernate_machdep.c
@@ -52,7 +52,7 @@
/* Hibernate support */
void hibernate_enter_resume_4k_pte(vaddr_t, paddr_t);
void hibernate_enter_resume_4k_pde(vaddr_t);
-void hibernate_enter_resume_4m_pde(vaddr_t, paddr_t);
+void hibernate_enter_resume_2m_pde(vaddr_t, paddr_t);
extern void hibernate_resume_machdep(void);
extern void hibernate_flush(void);
@@ -62,7 +62,7 @@ extern phys_ram_seg_t mem_clusters[];
extern struct hibernate_state *hibernate_state;
/*
- * i386 MD Hibernate functions
+ * amd64 MD Hibernate functions
*/
/*
@@ -140,28 +140,69 @@ get_hibernate_info_md(union hibernate_info *hiber_info)
* the specified size.
*
* size : 0 if a 4KB mapping is desired
- * 1 if a 4MB mapping is desired
+ * 1 if a 2MB mapping is desired
*/
void
hibernate_enter_resume_mapping(vaddr_t va, paddr_t pa, int size)
{
if (size)
- return hibernate_enter_resume_4m_pde(va, pa);
+ return hibernate_enter_resume_2m_pde(va, pa);
else
return hibernate_enter_resume_4k_pte(va, pa);
}
/*
- * Enter a 4MB PDE mapping for the supplied VA/PA into the resume-time pmap
+ * Enter a 2MB PDE mapping for the supplied VA/PA into the resume-time pmap
*/
void
-hibernate_enter_resume_4m_pde(vaddr_t va, paddr_t pa)
+hibernate_enter_resume_2m_pde(vaddr_t va, paddr_t pa)
{
pt_entry_t *pde, npde;
- pde = s4pde_4m(va);
- npde = (pa & PMAP_PA_MASK_4M) | PG_RW | PG_V | PG_u | PG_M | PG_PS;
- *pde = npde;
+ if (pa < HIBERNATE_512GB) {
+ if (pa < HIBERNATE_1GB) {
+ pde = s4pde_2m_low(va);
+ npde = (pa & PMAP_PA_MASK_2M) |
+ PG_RW | PG_V | PG_u | PG_M | PG_PS;
+ *pde = npde;
+ } else {
+ /*
+ * pa in first 512GB, but not first 1GB - first map
+ * the page's 1GB containing region
+ */
+ pde = s4pde_1g_low2(va);
+ npde = (pa & PMAP_PA_MASK_1G) |
+ PG_RW | PG_V | PG_u | PG_PS;
+ *pde = npde;
+
+ /* Finally, map the page's region (2MB) */
+ pde = s4pde_2m_low2(va);
+ npde = (pa & PMAP_PA_MASK_2M) |
+ PG_RW | PG_V | PG_u | PG_M | PG_PS;
+ *pde = npde;
+ }
+ } else {
+ /*
+ * pa not in first 512GB - first map the page's 512GB
+ * containing region
+ */
+ pde = s4pde_512g(va);
+ npde = (pa & PMAP_PA_MASK_512G) |
+ PG_RW | PG_V | PG_u;
+ *pde = npde;
+
+ /* Next, map the page's 1GB containing region */
+ pde = s4pde_1g_hi(va);
+ npde = (pa & PMAP_PA_MASK_1G) |
+ PG_RW | PG_V | PG_u | PG_PS;
+ *pde = npde;
+
+ /* Finally, map the page's region (2MB) */
+ pde = s4pde_2m_hi(va);
+ npde = (pa & PMAP_PA_MASK_2M) |
+ PG_RW | PG_V | PG_u | PG_M | PG_PS;
+ *pde = npde;
+ }
}
/*
@@ -170,23 +211,84 @@ hibernate_enter_resume_4m_pde(vaddr_t va, paddr_t pa)
void
hibernate_enter_resume_4k_pte(vaddr_t va, paddr_t pa)
{
- pt_entry_t *pte, npte;
+ pt_entry_t *pde, npde;
- pte = s4pte_4k(va);
- npte = (pa & PMAP_PA_MASK) | PG_RW | PG_V | PG_u | PG_M;
- *pte = npte;
+ if (pa < HIBERNATE_512GB) {
+ if (pa < HIBERNATE_1GB) {
+ /* Map the 2MB region containing the page */
+ pde = s4pde_2m_low(va);
+ npde = (pa & PMAP_PA_MASK_2M) |
+ PG_RW | PG_V | PG_u | PG_M | PG_PS;
+ *pde = npde;
+
+ /* Map the page */
+ pde = s4pte_4k_low(va);
+ npde = (pa & PMAP_PA_MASK) |
+ PG_RW | PG_V | PG_u | PG_M | PG_PS;
+ *pde = npde;
+ } else {
+ /*
+ * pa in first 512GB, but not first 1GB - first map
+ * the page's 1GB containing region
+ */
+ pde = s4pde_1g_low2(va);
+ npde = (pa & PMAP_PA_MASK_1G) |
+ PG_RW | PG_V | PG_u | PG_PS;
+ *pde = npde;
+
+ /* Next, map the page's region (2MB) */
+ pde = s4pde_2m_low2(va);
+ npde = (pa & PMAP_PA_MASK_2M) |
+ PG_RW | PG_V | PG_u | PG_M | PG_PS;
+ *pde = npde;
+
+ /* Finally, map the page */
+ pde = s4pte_4k_low2(va);
+ npde = (pa & PMAP_PA_MASK) |
+ PG_RW | PG_V | PG_u | PG_M | PG_PS;
+ *pde = npde;
+ }
+ } else {
+ /*
+ * pa not in first 512GB - first map the page's 512GB
+ * containing region
+ */
+ pde = s4pde_512g(va);
+ npde = (pa & PMAP_PA_MASK_512G) |
+ PG_RW | PG_V | PG_u;
+ *pde = npde;
+
+ /* Next, map the page's 1GB containing region */
+ pde = s4pde_1g_hi(va);
+ npde = (pa & PMAP_PA_MASK_1G) |
+ PG_RW | PG_V | PG_u | PG_PS;
+ *pde = npde;
+
+ /* Next, map the page's region (2MB) */
+ pde = s4pde_2m_hi(va);
+ npde = (pa & PMAP_PA_MASK_2M) |
+ PG_RW | PG_V | PG_u | PG_M | PG_PS;
+ *pde = npde;
+
+ /* Finally, map the page */
+ pde = s4pte_4k_hi(va);
+ npde = (pa & PMAP_PA_MASK) |
+ PG_RW | PG_V | PG_u | PG_M | PG_PS;
+ *pde = npde;
+ }
}
/*
* Enter a 4KB PDE mapping for the supplied VA into the resume-time pmap.
+ * Note - on amd64, this is only used for low pages (< 2MB phys)
*/
void
hibernate_enter_resume_4k_pde(vaddr_t va)
{
pt_entry_t *pde, npde;
- pde = s4pde_4k(va);
- npde = (HIBERNATE_PT_PAGE & PMAP_PA_MASK) | PG_RW | PG_V | PG_u | PG_M;
+ pde = s4pte_4k_low(va);
+ npde = (HIBERNATE_PDE_LOW & PMAP_PA_MASK) | PG_RW | PG_V | PG_u | PG_M;
*pde = npde;
}
@@ -202,16 +304,30 @@ hibernate_populate_resume_pt(union hibernate_info *hib_info,
{
int phys_page_number, i;
paddr_t pa, piglet_start, piglet_end;
- vaddr_t kern_start_4m_va, kern_end_4m_va, page;
-
- /* Identity map PD, PT, and stack pages */
- pmap_kenter_pa(HIBERNATE_PT_PAGE, HIBERNATE_PT_PAGE, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_PD_PAGE, HIBERNATE_PD_PAGE, VM_PROT_ALL);
+ vaddr_t kern_start_2m_va, kern_end_2m_va, page;
+
+ /* Identity map MMU and stack pages */
+ pmap_kenter_pa(HIBERNATE_PML4_PAGE, HIBERNATE_PML4_PAGE, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_PML4E_LOW, HIBERNATE_PML4E_LOW, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_PML4E_HI, HIBERNATE_PML4E_HI, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_PDPTE_LOW, HIBERNATE_PDPTE_LOW, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_PDPTE_LOW2, HIBERNATE_PDPTE_LOW2, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_PDPTE_HI, HIBERNATE_PDPTE_HI, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_PDE_LOW, HIBERNATE_PDE_LOW, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_PDE_LOW2, HIBERNATE_PDE_LOW2, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_PDE_HI, HIBERNATE_PDE_HI, VM_PROT_ALL);
pmap_kenter_pa(HIBERNATE_STACK_PAGE, HIBERNATE_STACK_PAGE, VM_PROT_ALL);
pmap_activate(curproc);
- bzero((caddr_t)HIBERNATE_PT_PAGE, PAGE_SIZE);
- bzero((caddr_t)HIBERNATE_PD_PAGE, PAGE_SIZE);
+ bzero((caddr_t)HIBERNATE_PML4_PAGE, PAGE_SIZE);
+ bzero((caddr_t)HIBERNATE_PML4E_LOW, PAGE_SIZE);
+ bzero((caddr_t)HIBERNATE_PML4E_HI, PAGE_SIZE);
+ bzero((caddr_t)HIBERNATE_PDPTE_LOW, PAGE_SIZE);
+ bzero((caddr_t)HIBERNATE_PDPTE_LOW2, PAGE_SIZE);
+ bzero((caddr_t)HIBERNATE_PDPTE_HI, PAGE_SIZE);
+ bzero((caddr_t)HIBERNATE_PDE_LOW, PAGE_SIZE);
+ bzero((caddr_t)HIBERNATE_PDE_LOW2, PAGE_SIZE);
+ bzero((caddr_t)HIBERNATE_PDE_HI, PAGE_SIZE);
bzero((caddr_t)HIBERNATE_STACK_PAGE, PAGE_SIZE);
/* PDE for low pages */
@@ -226,41 +342,41 @@ hibernate_populate_resume_pt(union hibernate_info *hib_info,
}
/*
- * Map current kernel VA range using 4M pages
+ * Map current kernel VA range using 2M pages
*/
- kern_start_4m_va = (paddr_t)&start & ~(PAGE_MASK_4M);
- kern_end_4m_va = (paddr_t)&end & ~(PAGE_MASK_4M);
+ kern_start_2m_va = (paddr_t)&start & ~(PAGE_MASK_2M);
+ kern_end_2m_va = (paddr_t)&end & ~(PAGE_MASK_2M);
phys_page_number = 0;
- for (page = kern_start_4m_va; page <= kern_end_4m_va;
- page += NBPD, phys_page_number++) {
- pa = (paddr_t)(phys_page_number * NBPD);
+ for (page = kern_start_2m_va; page <= kern_end_2m_va;
+ page += NBPD_L2, phys_page_number++) {
+ pa = (paddr_t)(phys_page_number * NBPD_L2);
hibernate_enter_resume_mapping(page, pa, 1);
}
/*
* Identity map the image (pig) area
*/
- phys_page_number = image_start / NBPD;
- image_start &= ~(PAGE_MASK_4M);
- image_end &= ~(PAGE_MASK_4M);
+ phys_page_number = image_start / NBPD_L2;
+ image_start &= ~(PAGE_MASK_2M);
+ image_end &= ~(PAGE_MASK_2M);
for (page = image_start; page <= image_end ;
- page += NBPD, phys_page_number++) {
- pa = (paddr_t)(phys_page_number * NBPD);
+ page += NBPD_L2, phys_page_number++) {
+ pa = (paddr_t)(phys_page_number * NBPD_L2);
hibernate_enter_resume_mapping(page, pa, 1);
}
/*
* Map the piglet
*/
- phys_page_number = hib_info->piglet_pa / NBPD;
+ phys_page_number = hib_info->piglet_pa / NBPD_L2;
piglet_start = hib_info->piglet_va;
piglet_end = piglet_start + HIBERNATE_CHUNK_SIZE * 3;
- piglet_start &= ~(PAGE_MASK_4M);
- piglet_end &= ~(PAGE_MASK_4M);
+ piglet_start &= ~(PAGE_MASK_2M);
+ piglet_end &= ~(PAGE_MASK_2M);
for (page = piglet_start; page <= piglet_end ;
- page += NBPD, phys_page_number++) {
- pa = (paddr_t)(phys_page_number * NBPD);
+ page += NBPD_L2, phys_page_number++) {
+ pa = (paddr_t)(phys_page_number * NBPD_L2);
hibernate_enter_resume_mapping(page, pa, 1);
}
}
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c
index 61bd68d98a1..636b066b12f 100644
--- a/sys/arch/amd64/amd64/machdep.c
+++ b/sys/arch/amd64/amd64/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.157 2012/10/09 12:58:07 jsing Exp $ */
+/* $OpenBSD: machdep.c,v 1.158 2012/10/19 16:38:30 mlarkin Exp $ */
/* $NetBSD: machdep.c,v 1.3 2003/05/07 22:58:18 fvdl Exp $ */
/*-
@@ -145,6 +145,10 @@ extern int db_console;
#include <dev/softraidvar.h>
#endif
+#ifdef HIBERNATE
+#include <machine/hibernate_var.h>
+#endif /* HIBERNATE */
+
/* the following is used externally (sysctl_hw) */
char machine[] = MACHINE;
@@ -1291,6 +1295,11 @@ init_x86_64(paddr_t first_avail)
avail_start = ACPI_TRAMPOLINE + PAGE_SIZE;
#endif
+#ifdef HIBERNATE
+ if (avail_start < HIBERNATE_HIBALLOC_PAGE + PAGE_SIZE)
+ avail_start = HIBERNATE_HIBALLOC_PAGE + PAGE_SIZE;
+#endif /* HIBERNATE */
+
/*
* We need to go through the BIOS memory map given, and
* fill out mem_clusters and mem_cluster_cnt stuff, taking
diff --git a/sys/arch/amd64/include/hibernate_var.h b/sys/arch/amd64/include/hibernate_var.h
index c27bec0e6f2..7e17415360a 100644
--- a/sys/arch/amd64/include/hibernate_var.h
+++ b/sys/arch/amd64/include/hibernate_var.h
@@ -16,20 +16,52 @@
/* amd64 hibernate support definitions */
-#define NBPD 4194304
+#define PAGE_MASK_2M (NBPD_L2 - 1)
+#define PMAP_PA_MASK_2M ~((paddr_t)PAGE_MASK_2M)
-#define PAGE_SHIFT_4M 22
-#define PAGE_MASK_4M (NBPD - 1)
-#define PMAP_PA_MASK_4M ~((paddr_t)PAGE_MASK_4M)
+#define PAGE_MASK_1G (NBPD_L3 - 1)
+#define PMAP_PA_MASK_1G ~((paddr_t)PAGE_MASK_1G)
-#define PIGLET_PAGE_MASK ~((paddr_t)PAGE_MASK_4M)
+#define PAGE_MASK_512G (NBPD_L4 - 1)
+#define PMAP_PA_MASK_512G ~((paddr_t)PAGE_MASK_512G)
-#define HIBERNATE_PD_PAGE (PAGE_SIZE * 5)
-#define HIBERNATE_PT_PAGE (PAGE_SIZE * 6)
-#define HIBERNATE_STACK_PAGE (PAGE_SIZE * 8)
-#define HIBERNATE_INFLATE_PAGE (PAGE_SIZE * 9)
-#define HIBERNATE_COPY_PAGE (PAGE_SIZE * 10)
-#define HIBERNATE_HIBALLOC_PAGE (PAGE_SIZE * 11)
+#define HIBERNATE_512GB ((paddr_t)1 << (paddr_t)39)
+#define HIBERNATE_1GB ((paddr_t)1 << (paddr_t)30)
+
+#define PIGLET_PAGE_MASK ~((paddr_t)PAGE_MASK_2M)
+
+/*
+ * amd64 uses a fixed PML4E to map the first 512GB phys mem plus one more
+ * to map any ranges of phys mem past 512GB (if needed)
+ */
+#define HIBERNATE_PML4_PAGE (PAGE_SIZE * 5)
+#define HIBERNATE_PML4E_LOW (PAGE_SIZE * 6)
+#define HIBERNATE_PML4E_HI (PAGE_SIZE * 7)
+
+/*
+ * amd64 uses one fixed PDPTE to map the first 1GB phys mem plus one more
+ * to map any other 1GB ranges within the first 512GB phys, plus one more to
+ * map any 1GB range in any subsequent 512GB range
+ */
+#define HIBERNATE_PDPTE_LOW (PAGE_SIZE * 8)
+#define HIBERNATE_PDPTE_LOW2 (PAGE_SIZE * 9)
+#define HIBERNATE_PDPTE_HI (PAGE_SIZE * 10)
+
+/*
+ * amd64 uses one fixed PDE to map the first 2MB phys mem plus one more
+ * to map any other 2MB range within the first 1GB, plus one more to map any
+ * 2MB range in any subsequent 512GB range. These PDEs point to 512 PTEs each
+ * (4KB pages) or may directly map a 2MB range
+ */
+#define HIBERNATE_PDE_LOW (PAGE_SIZE * 11)
+#define HIBERNATE_PDE_LOW2 (PAGE_SIZE * 12)
+#define HIBERNATE_PDE_HI (PAGE_SIZE * 13)
+
+#define HIBERNATE_STACK_PAGE (PAGE_SIZE * 14)
+#define HIBERNATE_INFLATE_PAGE (PAGE_SIZE * 15)
+#define HIBERNATE_COPY_PAGE (PAGE_SIZE * 16)
+/* HIBERNATE_HIBALLOC_PAGE must be the last stolen page (see machdep.c) */
+#define HIBERNATE_HIBALLOC_PAGE (PAGE_SIZE * 17)
/* Use 4MB hibernation chunks */
#define HIBERNATE_CHUNK_SIZE 0x400000
@@ -38,8 +70,19 @@
#define HIBERNATE_STACK_OFFSET 0x0F00
-#define atop_4m(x) ((x) >> PAGE_SHIFT_4M)
-#define atop_4k(x) ((x) >> PAGE_SHIFT)
-#define s4pde_4m(va) ((pt_entry_t *)HIBERNATE_PD_PAGE + atop_4m(va))
-#define s4pde_4k(va) ((pt_entry_t *)HIBERNATE_PD_PAGE + atop_4k(va))
-#define s4pte_4k(va) ((pt_entry_t *)HIBERNATE_PT_PAGE + atop_4k(va))
+#define atop_4k(x) ((x) >> L1_SHIFT)
+#define atop_2m(x) ((x) >> L2_SHIFT)
+#define atop_1g(x) ((x) >> L3_SHIFT)
+#define atop_512g(x) ((x) >> L4_SHIFT)
+
+#define s4pte_4k_low(va) ((pt_entry_t *)HIBERNATE_PDE_LOW + atop_4k(va))
+#define s4pte_4k_low2(va) ((pt_entry_t *)HIBERNATE_PDE_LOW2 + atop_4k(va))
+#define s4pte_4k_hi(va) ((pt_entry_t *)HIBERNATE_PDE_HI + atop_4k(va))
+
+#define s4pde_2m_low(va) ((pt_entry_t *)HIBERNATE_PDPTE_LOW + atop_2m(va))
+#define s4pde_2m_low2(va) ((pt_entry_t *)HIBERNATE_PDPTE_LOW2 + atop_2m(va))
+#define s4pde_2m_hi(va) ((pt_entry_t *)HIBERNATE_PDPTE_HI + atop_2m(va))
+#define s4pde_1g_low(va) ((pt_entry_t *)HIBERNATE_PML4E_LOW + atop_1g(va))
+#define s4pde_1g_low2(va) ((pt_entry_t *)HIBERNATE_PML4E_LOW + atop_1g(va))
+#define s4pde_1g_hi(va) ((pt_entry_t *)HIBERNATE_PML4E_HI + atop_1g(va))
+#define s4pde_512g(va) ((pt_entry_t *)HIBERNATE_PML4_PAGE + atop_512g(va))