summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorAriane van der Steldt <ariane@cvs.openbsd.org>2011-07-08 21:00:54 +0000
committerAriane van der Steldt <ariane@cvs.openbsd.org>2011-07-08 21:00:54 +0000
commit8da15dd837b06c6c3806f4ff006ed07de54071e3 (patch)
treeec3c3a8d3a82e090ca0f9a9aef93555e77092aa5 /sys/kern
parent9e05c7d3b2cef8835e34352f177adf48ff4992ed (diff)
Ensure all pages in pmemrange can be marked as dirty.
It'd be a very bad idea to hand out dirty pages as zeroed, just because we came back from hibernate. No callers at the moment, will be called on hibernate resume path.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/subr_hibernate.c43
1 files changed, 42 insertions, 1 deletions
diff --git a/sys/kern/subr_hibernate.c b/sys/kern/subr_hibernate.c
index c622e1a2cb8..7ac3bf26277 100644
--- a/sys/kern/subr_hibernate.c
+++ b/sys/kern/subr_hibernate.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_hibernate.c,v 1.5 2011/07/08 18:34:46 ariane Exp $ */
+/* $OpenBSD: subr_hibernate.c,v 1.6 2011/07/08 21:00:53 ariane Exp $ */
/*
* Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
@@ -264,6 +264,47 @@ uvm_pmr_zero_everything(void)
}
/*
+ * Mark all memory as dirty.
+ *
+ * Used to inform the system that the clean memory isn't clean for some
+ * reason, for example because we just came back from hibernate.
+ */
+void
+uvm_pmr_dirty_everything(void)
+{
+ struct uvm_pmemrange *pmr;
+ struct vm_page *pg;
+ int i;
+
+ uvm_lock_fpageq();
+ TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) {
+ /* Dirty single pages. */
+ while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_ZERO]))
+ != NULL) {
+ uvm_pmr_remove(pmr, pg);
+ uvm_pagezero(pg);
+ atomic_clearbits_int(&pg->pg_flags, PG_ZERO);
+ uvm_pmr_insert(pmr, pg, 0);
+ }
+
+ /* Dirty multi page ranges. */
+ while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_ZEOR]))
+ != NULL) {
+ pg--; /* Size tree always has second page. */
+ uvm_pmr_remove(pmr, pg);
+ for (i = 0; i < pg->fpgsz; i++) {
+ uvm_pagezero(&pg[i]);
+ atomic_clearbits_int(&pg[i].pg_flags, PG_ZERO);
+ }
+ uvm_pmr_insert(pmr, pg, 0);
+ }
+ }
+
+ uvmexp.zeropages = 0;
+ uvm_unlock_fpageq();
+}
+
+/*
* Allocate the highest address that can hold sz.
*
* sz in bytes.