summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMike Larkin <mlarkin@cvs.openbsd.org>2014-05-29 08:00:25 +0000
committerMike Larkin <mlarkin@cvs.openbsd.org>2014-05-29 08:00:25 +0000
commit1969382c3468542142aa467212267b3f1d40b719 (patch)
tree33cd83d25d78a9d95a6b36ef8159f3304d9adf14 /sys
parent551b1952d9013d00f458416a1d85fe206688fcfb (diff)
Read MAXPHYS bytes at a time in the hibernate image read, instead of
PAGE_SIZE bytes. Completes the MAXPHYS optimizations in the read path. with input from guenther@ who suggested a version that was a little easier to understand. Tested on i386 and amd64.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/subr_hibernate.c44
1 files changed, 30 insertions, 14 deletions
diff --git a/sys/kern/subr_hibernate.c b/sys/kern/subr_hibernate.c
index 1916ee88b85..9d928096b12 100644
--- a/sys/kern/subr_hibernate.c
+++ b/sys/kern/subr_hibernate.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_hibernate.c,v 1.90 2014/05/21 02:26:49 mlarkin Exp $ */
+/* $OpenBSD: subr_hibernate.c,v 1.91 2014/05/29 08:00:24 mlarkin Exp $ */
/*
* Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
@@ -1653,6 +1653,7 @@ hibernate_read_chunks(union hibernate_info *hib, paddr_t pig_start,
daddr_t blkctr;
size_t processed, compressed_size, read_size;
int overlap, found, nchunks, nochunks = 0, nfchunks = 0, npchunks = 0;
+ int num_io_pages;
short *ochunks, *pchunks, *fchunks, i, j;
vaddr_t tempva = (vaddr_t)NULL, hibernate_fchunk_area = (vaddr_t)NULL;
@@ -1665,7 +1666,8 @@ hibernate_read_chunks(union hibernate_info *hib, paddr_t pig_start,
* used only during image read. They dissappear from existence
* when the suspended kernel is unpacked on top of us.
*/
- tempva = (vaddr_t)km_alloc(2*PAGE_SIZE, &kv_any, &kp_none, &kd_nowait);
+ tempva = (vaddr_t)km_alloc(MAXPHYS + PAGE_SIZE, &kv_any, &kp_none,
+ &kd_nowait);
if (!tempva)
return (1);
hibernate_fchunk_area = (vaddr_t)km_alloc(24*PAGE_SIZE, &kv_any,
@@ -1683,11 +1685,10 @@ hibernate_read_chunks(union hibernate_info *hib, paddr_t pig_start,
ochunks = (short *)(hibernate_fchunk_area + (16*PAGE_SIZE));
/* Map the chunk ordering region */
- for(i=0; i<24 ; i++) {
+ for(i=0; i<24 ; i++)
pmap_kenter_pa(hibernate_fchunk_area + (i*PAGE_SIZE),
piglet_base + ((4+i)*PAGE_SIZE), VM_PROT_ALL);
- pmap_update(pmap_kernel());
- }
+ pmap_update(pmap_kernel());
nchunks = hib->chunk_ctr;
@@ -1803,23 +1804,38 @@ hibernate_read_chunks(union hibernate_info *hib, paddr_t pig_start,
compressed_size = chunks[fchunks[i]].compressed_size;
while (processed < compressed_size) {
- pmap_kenter_pa(tempva, img_cur, VM_PROT_ALL);
- pmap_kenter_pa(tempva + PAGE_SIZE, img_cur+PAGE_SIZE,
- VM_PROT_ALL);
- pmap_update(pmap_kernel());
-
- if (compressed_size - processed >= PAGE_SIZE)
- read_size = PAGE_SIZE;
+ if (compressed_size - processed >= MAXPHYS)
+ read_size = MAXPHYS;
else
read_size = compressed_size - processed;
+ /*
+ * We're reading read_size bytes, offset from the
+ * start of a page by img_cur % PAGE_SIZE, so the
+ * end will be read_size + (img_cur % PAGE_SIZE)
+ * from the start of the first page. Round that
+ * up to the next page size.
+ */
+ num_io_pages = (read_size + (img_cur % PAGE_SIZE)
+ + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ KASSERT(num_io_pages <= MAXPHYS/PAGE_SIZE + 1);
+
+ /* Map pages for this read */
+ for (j = 0; j < num_io_pages; j ++)
+ pmap_kenter_pa(tempva + j * PAGE_SIZE,
+ img_cur + j * PAGE_SIZE, VM_PROT_ALL);
+
+ pmap_update(pmap_kernel());
+
hibernate_block_io(hib, blkctr, read_size,
tempva + (img_cur & PAGE_MASK), 0);
blkctr += (read_size / DEV_BSIZE);
- pmap_kremove(tempva, PAGE_SIZE);
- pmap_kremove(tempva + PAGE_SIZE, PAGE_SIZE);
+ pmap_kremove(tempva, num_io_pages * PAGE_SIZE);
+ pmap_update(pmap_kernel());
+
processed += read_size;
img_cur += read_size;
}