summaryrefslogtreecommitdiff
path: root/sys/kern/sched_bsd.c
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2011-07-06 01:49:43 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2011-07-06 01:49:43 +0000
commit25bb7d4aa9c94e53d29ef5d4df5d5e815d5671db (patch)
tree1ad114e7ba96deb7be83fd7cdf4027879685a942 /sys/kern/sched_bsd.c
parent5ff81261978fb8f156cf84f28dee710ba45ca58e (diff)
Stop using the P_BIGLOCK flag to figure out when we should release the
biglock in mi_switch and just check if we're holding the biglock. The idea is that the first entry point into the kernel uses KERNEL_PROC_LOCK and recursive calls use KERNEL_LOCK. This assumption is violated in at least one place and has been causing confusion for lots of people. Initial bug report and analysis from Pedro. kettenis@ beck@ oga@ thib@ dlg@ ok
Diffstat (limited to 'sys/kern/sched_bsd.c')
-rw-r--r--sys/kern/sched_bsd.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c
index 9290d0105c8..e24b022ee38 100644
--- a/sys/kern/sched_bsd.c
+++ b/sys/kern/sched_bsd.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sched_bsd.c,v 1.25 2011/03/07 07:07:13 guenther Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.26 2011/07/06 01:49:42 art Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@@ -366,8 +366,10 @@ mi_switch(void)
* Release the kernel_lock, as we are about to yield the CPU.
*/
sched_count = __mp_release_all_but_one(&sched_lock);
- if (p->p_flag & P_BIGLOCK)
+ if (__mp_lock_held(&kernel_lock))
hold_count = __mp_release_all(&kernel_lock);
+ else
+ hold_count = 0;
#endif
/*
@@ -448,7 +450,7 @@ mi_switch(void)
* released the scheduler lock to avoid deadlock, and before
* we reacquire the interlock and the scheduler lock.
*/
- if (p->p_flag & P_BIGLOCK)
+ if (hold_count)
__mp_acquire_count(&kernel_lock, hold_count);
__mp_acquire_count(&sched_lock, sched_count + 1);
#endif