summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristiano F. Haesbaert <haesbaert@cvs.openbsd.org>2012-03-10 22:02:33 +0000
committerChristiano F. Haesbaert <haesbaert@cvs.openbsd.org>2012-03-10 22:02:33 +0000
commitf2dbf1f4bd912a7f5772a1c24f52454eb41b08b7 (patch)
tree435072c11618381e70270304b97c94541dfebe0d
parent905de215ee721696a80f0bc2e514186742bf1985 (diff)
Account for sched_noidle and document the scheduler variables.
ok tedu@
-rw-r--r--sys/kern/kern_sched.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index b83b5ba0857..1c2c30f924a 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sched.c,v 1.24 2011/10/12 18:30:09 miod Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.25 2012/03/10 22:02:32 haesbaert Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@@ -45,6 +45,16 @@ struct cpuset sched_queued_cpus;
struct cpuset sched_all_cpus;
/*
+ * Some general scheduler counters.
+ */
+uint64_t sched_nmigrations; /* Cpu migration counter */
+uint64_t sched_nomigrations; /* Cpu no migration counter */
+uint64_t sched_noidle; /* Times we didn't pick the idle task */
+uint64_t sched_stolen; /* Times we stole proc from other cpus */
+uint64_t sched_choose; /* Times we chose a cpu */
+uint64_t sched_wasidle; /* Times we came out of idle */
+
+/*
* A few notes about cpu_switchto that is implemented in MD code.
*
* cpu_switchto takes two arguments, the old proc and the proc
@@ -275,6 +285,7 @@ again:
queue = ffs(spc->spc_whichqs) - 1;
p = TAILQ_FIRST(&spc->spc_qs[queue]);
remrunqueue(p);
+ sched_noidle++;
KASSERT(p->p_stat == SRUN);
} else if ((p = sched_steal_proc(curcpu())) == NULL) {
p = spc->spc_idleproc;
@@ -301,14 +312,6 @@ again:
return (p);
}
-uint64_t sched_nmigrations;
-uint64_t sched_noidle;
-uint64_t sched_stolen;
-
-uint64_t sched_choose;
-uint64_t sched_wasidle;
-uint64_t sched_nomigrations;
-
struct cpu_info *
sched_choosecpu_fork(struct proc *parent, int flags)
{
@@ -517,9 +520,8 @@ sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p)
sched_cost_priority;
cost += sched_cost_runnable;
}
- if (cpuset_isset(&sched_queued_cpus, ci)) {
+ if (cpuset_isset(&sched_queued_cpus, ci))
cost += spc->spc_nrun * sched_cost_runnable;
- }
/*
* Higher load on the destination means we don't want to go there.