summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClaudio Jeker <claudio@cvs.openbsd.org>2023-08-18 09:18:53 +0000
committerClaudio Jeker <claudio@cvs.openbsd.org>2023-08-18 09:18:53 +0000
commit7bbd927f317b7b5aece83993602bd9edf8198194 (patch)
tree32e06cd8e105ccd5e2deaa0b9d78ca79998285f0
parent32da8f5b8fbb23a4d32a689ec398dab6567bbe0d (diff)
Move the loadavg calculation to sched_bsd.c as update_loadav()
With this uvm_meter() is no more and update_loadav() uses a simple timeout instead of getting called via schedcpu(). OK deraadt@ mpi@ cheloha@
-rw-r--r--sys/kern/sched_bsd.c50
-rw-r--r--sys/uvm/uvm_extern.h3
-rw-r--r--sys/uvm/uvm_meter.c50
3 files changed, 51 insertions, 52 deletions
diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c
index ae38ac60a34..e06aa801d98 100644
--- a/sys/kern/sched_bsd.c
+++ b/sys/kern/sched_bsd.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sched_bsd.c,v 1.81 2023/08/14 08:33:24 mpi Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.82 2023/08/18 09:18:52 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@@ -61,9 +61,24 @@ int lbolt; /* once a second sleep address */
struct __mp_lock sched_lock;
#endif
+void update_loadavg(void *);
void schedcpu(void *);
uint32_t decay_aftersleep(uint32_t, uint32_t);
+extern struct cpuset sched_idle_cpus;
+
+/*
+ * constants for averages over 1, 5, and 15 minutes when sampling at
+ * 5 second intervals.
+ */
+static const fixpt_t cexp[3] = {
+ 0.9200444146293232 * FSCALE, /* exp(-1/12) */
+ 0.9834714538216174 * FSCALE, /* exp(-1/60) */
+ 0.9944598480048967 * FSCALE, /* exp(-1/180) */
+};
+
+struct loadavg averunnable;
+
/*
* Force switch among equal priority processes every 100ms.
*/
@@ -95,6 +110,34 @@ roundrobin(struct clockintr *cl, void *cf)
need_resched(ci);
}
+
+
+/*
+ * update_loadav: compute a tenex style load average of a quantity on
+ * 1, 5, and 15 minute intervals.
+ */
+void
+update_loadavg(void *arg)
+{
+ struct timeout *to = (struct timeout *)arg;
+ CPU_INFO_ITERATOR cii;
+ struct cpu_info *ci;
+ u_int i, nrun = 0;
+
+ CPU_INFO_FOREACH(cii, ci) {
+ if (!cpuset_isset(&sched_idle_cpus, ci))
+ nrun++;
+ nrun += ci->ci_schedstate.spc_nrun;
+ }
+
+ for (i = 0; i < 3; i++) {
+ averunnable.ldavg[i] = (cexp[i] * averunnable.ldavg[i] +
+ nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
+ }
+
+ timeout_add_sec(to, 5);
+}
+
/*
* Constants for digital decay and forget:
* 90% of (p_estcpu) usage in 5 * loadav time
@@ -236,7 +279,6 @@ schedcpu(void *arg)
}
SCHED_UNLOCK(s);
}
- uvm_meter();
wakeup(&lbolt);
timeout_add_sec(to, 1);
}
@@ -691,6 +733,7 @@ void
scheduler_start(void)
{
static struct timeout schedcpu_to;
+ static struct timeout loadavg_to;
/*
* We avoid polluting the global namespace by keeping the scheduler
@@ -699,7 +742,10 @@ scheduler_start(void)
* its job.
*/
timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
+ timeout_set(&loadavg_to, update_loadavg, &loadavg_to);
+
schedcpu(&schedcpu_to);
+ update_loadavg(&loadavg_to);
#ifndef SMALL_KERNEL
if (perfpolicy == PERFPOL_AUTO)
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index a6fc5f04121..53461acb3d4 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.170 2023/06/21 21:16:21 cheloha Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.171 2023/08/18 09:18:52 claudio Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -414,7 +414,6 @@ void uvmspace_free(struct vmspace *);
struct vmspace *uvmspace_share(struct process *);
int uvm_share(vm_map_t, vaddr_t, vm_prot_t,
vm_map_t, vaddr_t, vsize_t);
-void uvm_meter(void);
int uvm_sysctl(int *, u_int, void *, size_t *,
void *, size_t, struct proc *);
struct vm_page *uvm_pagealloc(struct uvm_object *,
diff --git a/sys/uvm/uvm_meter.c b/sys/uvm/uvm_meter.c
index e0c5d41ae8d..25bc9126d9d 100644
--- a/sys/uvm/uvm_meter.c
+++ b/sys/uvm/uvm_meter.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_meter.c,v 1.48 2023/08/03 16:12:08 claudio Exp $ */
+/* $OpenBSD: uvm_meter.c,v 1.49 2023/08/18 09:18:52 claudio Exp $ */
/* $NetBSD: uvm_meter.c,v 1.21 2001/07/14 06:36:03 matt Exp $ */
/*
@@ -63,58 +63,12 @@
#define MAXSLP 20
int maxslp = MAXSLP; /* patchable ... */
-struct loadavg averunnable;
-/*
- * constants for averages over 1, 5, and 15 minutes when sampling at
- * 5 second intervals.
- */
-
-static const fixpt_t cexp[3] = {
- 0.9200444146293232 * FSCALE, /* exp(-1/12) */
- 0.9834714538216174 * FSCALE, /* exp(-1/60) */
- 0.9944598480048967 * FSCALE, /* exp(-1/180) */
-};
+extern struct loadavg averunnable;
-
-static void uvm_loadav(struct loadavg *);
void uvm_total(struct vmtotal *);
void uvmexp_read(struct uvmexp *);
-/*
- * uvm_meter: calculate load average
- */
-void
-uvm_meter(void)
-{
- if ((gettime() % 5) == 0)
- uvm_loadav(&averunnable);
-}
-
-/*
- * uvm_loadav: compute a tenex style load average of a quantity on
- * 1, 5, and 15 minute intervals.
- */
-static void
-uvm_loadav(struct loadavg *avg)
-{
- extern struct cpuset sched_idle_cpus;
- CPU_INFO_ITERATOR cii;
- struct cpu_info *ci;
- u_int i, nrun = 0;
-
- CPU_INFO_FOREACH(cii, ci) {
- if (!cpuset_isset(&sched_idle_cpus, ci))
- nrun++;
- nrun += ci->ci_schedstate.spc_nrun;
- }
-
- for (i = 0; i < 3; i++) {
- avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
- nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
- }
-}
-
char malloc_conf[16];
/*