summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2010-05-25 19:59:36 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2010-05-25 19:59:36 +0000
commitfa6c36de7e6cd7c0c10c3dd3fe982de6c0ce3ff8 (patch)
tree72a9ebe8f9ae612ed87a377879aebadaed304a43 /sys
parent331281ea533954a7a7e639b03604836764f4dd29 (diff)
Actively remove processes from the runqueues of a CPU when we stop it.
Also make sure not to take the scheduler lock once we have stopped a CPU such that we can safely take it away without having to worry about deadlock because it happened to own the scheduler lock. Fixes issues with suspen on SMP machines. ok mlarkin@, marco@, art@, deraadt@
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_sched.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index 3bee9d98f0c..82f606fdfe5 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sched.c,v 1.20 2010/05/14 18:47:56 kettenis Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.21 2010/05/25 19:59:35 kettenis Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@@ -42,6 +42,7 @@ struct proc *sched_steal_proc(struct cpu_info *);
*/
struct cpuset sched_idle_cpus;
struct cpuset sched_queued_cpus;
+struct cpuset sched_all_cpus;
/*
* A few notes about cpu_switchto that is implemented in MD code.
@@ -84,6 +85,7 @@ sched_init_cpu(struct cpu_info *ci)
* structures.
*/
cpuset_init_cpu(ci);
+ cpuset_add(&sched_all_cpus, ci);
}
void
@@ -147,9 +149,13 @@ sched_idle(void *v)
cpuset_add(&sched_idle_cpus, ci);
cpu_idle_enter();
while (spc->spc_whichqs == 0) {
- if (spc->spc_schedflags & SPCF_SHOULDHALT) {
+ if (spc->spc_schedflags & SPCF_SHOULDHALT &&
+ (spc->spc_schedflags & SPCF_HALTED) == 0) {
+ cpuset_del(&sched_idle_cpus, ci);
+ SCHED_LOCK(s);
atomic_setbits_int(&spc->spc_schedflags,
- SPCF_HALTED);
+ spc->spc_whichqs ? 0 : SPCF_HALTED);
+ SCHED_UNLOCK(s);
wakeup(spc);
}
cpu_idle_cycle();
@@ -248,6 +254,15 @@ sched_chooseproc(void)
SCHED_ASSERT_LOCKED();
if (spc->spc_schedflags & SPCF_SHOULDHALT) {
+ if (spc->spc_whichqs) {
+ for (queue = 0; queue < SCHED_NQS; queue++) {
+ TAILQ_FOREACH(p, &spc->spc_qs[queue], p_runq) {
+ remrunqueue(p);
+ p->p_cpu = sched_choosecpu(p);
+ setrunqueue(p);
+ }
+ }
+ }
p = spc->spc_idleproc;
KASSERT(p);
p->p_stat = SRUN;
@@ -323,7 +338,7 @@ sched_choosecpu_fork(struct proc *parent, int flags)
*/
cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus);
if (cpuset_first(&set) == NULL)
- cpuset_add_all(&set);
+ cpuset_copy(&set, &sched_all_cpus);
while ((ci = cpuset_first(&set)) != NULL) {
cpuset_del(&set, ci);
@@ -380,7 +395,7 @@ sched_choosecpu(struct proc *p)
}
if (cpuset_first(&set) == NULL)
- cpuset_add_all(&set);
+ cpuset_copy(&set, &sched_all_cpus);
while ((ci = cpuset_first(&set)) != NULL) {
int cost = sched_proc_to_cpu_cost(ci, p);
@@ -554,6 +569,7 @@ sched_start_secondary_cpus(void)
if (CPU_IS_PRIMARY(ci))
continue;
+ cpuset_add(&sched_all_cpus, ci);
atomic_clearbits_int(&spc->spc_schedflags,
SPCF_SHOULDHALT | SPCF_HALTED);
}
@@ -573,6 +589,7 @@ sched_stop_secondary_cpus(void)
if (CPU_IS_PRIMARY(ci))
continue;
+ cpuset_del(&sched_all_cpus, ci);
atomic_setbits_int(&spc->spc_schedflags, SPCF_SHOULDHALT);
}
CPU_INFO_FOREACH(cii, ci) {