diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_exit.c | 19 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 13 | ||||
-rw-r--r-- | sys/kern/kern_sig.c | 77 |
3 files changed, 64 insertions, 45 deletions
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 2863d1806d0..7dbb63daca8 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_exit.c,v 1.212 2023/08/29 16:19:34 claudio Exp $ */ +/* $OpenBSD: kern_exit.c,v 1.213 2023/09/04 13:18:41 claudio Exp $ */ /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ /* @@ -119,7 +119,7 @@ exit1(struct proc *p, int xexit, int xsig, int flags) struct process *pr, *qr, *nqr; struct rusage *rup; struct timespec ts; - int s; + int s, wake; atomic_setbits_int(&p->p_flag, P_WEXIT); @@ -161,9 +161,17 @@ exit1(struct proc *p, int xexit, int xsig, int flags) TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); SCHED_UNLOCK(s); + mtx_enter(&pr->ps_mtx); + pr->ps_threadcnt--; + wake = (pr->ps_single && pr->ps_singlecnt == pr->ps_threadcnt); + mtx_leave(&pr->ps_mtx); + if (wake) + wakeup(&pr->ps_singlecnt); + if ((p->p_flag & P_THREAD) == 0) { /* main thread gotta wait because it has the pid, et al */ - while (pr->ps_threadcnt > 1) + /* XXX locking depends on kernel lock here. */ + while (pr->ps_threadcnt > 0) tsleep_nsec(&pr->ps_threads, PWAIT, "thrdeath", INFSLP); if (pr->ps_flags & PS_PROFIL) stopprofclock(pr); @@ -337,9 +345,8 @@ exit1(struct proc *p, int xexit, int xsig, int flags) /* just a thread? detach it from its process */ if (p->p_flag & P_THREAD) { /* scheduler_wait_hook(pr->ps_mainproc, p); XXX */ - if (--pr->ps_threadcnt == 1) + if (pr->ps_threadcnt == 0) wakeup(&pr->ps_threads); - KASSERT(pr->ps_threadcnt > 0); } /* Release the thread's read reference of resource limit structure. */ @@ -823,7 +830,7 @@ process_zap(struct process *pr) if (otvp) vrele(otvp); - KASSERT(pr->ps_threadcnt == 1); + KASSERT(pr->ps_threadcnt == 0); if (pr->ps_ptstat != NULL) free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat)); pool_put(&rusage_pool, pr->ps_ru); diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 8d1f245935a..6d515011b11 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_fork.c,v 1.249 2023/08/14 08:33:24 mpi Exp $ */ +/* $OpenBSD: kern_fork.c,v 1.250 2023/09/04 13:18:41 claudio Exp $ */ /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ /* @@ -543,7 +543,6 @@ thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr, /* other links */ p->p_p = pr; - pr->ps_threadcnt++; /* local copies */ p->p_fd = pr->ps_fd; @@ -564,16 +563,18 @@ thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr, SCHED_LOCK(s); TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); + SCHED_UNLOCK(s); + + mtx_enter(&pr->ps_mtx); + pr->ps_threadcnt++; /* * if somebody else wants to take us to single threaded mode, * count ourselves in. */ - if (pr->ps_single) { - atomic_inc_int(&pr->ps_singlecount); + if (pr->ps_single) atomic_setbits_int(&p->p_flag, P_SUSPSINGLE); - } - SCHED_UNLOCK(s); + mtx_leave(&pr->ps_mtx); /* * Return tid to parent thread and copy it out to userspace diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index af0e0d7f4eb..75f48bb680b 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sig.c,v 1.313 2023/08/16 07:55:52 claudio Exp $ */ +/* $OpenBSD: kern_sig.c,v 1.314 2023/09/04 13:18:41 claudio Exp $ */ /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ /* @@ -2008,11 +2008,12 @@ userret(struct proc *p) } int -single_thread_check_locked(struct proc *p, int deep, int s) +single_thread_check_locked(struct proc *p, int deep) { struct process *pr = p->p_p; + int s, wake; - SCHED_ASSERT_LOCKED(); + MUTEX_ASSERT_LOCKED(&pr->ps_mtx); if (pr->ps_single == NULL || pr->ps_single == p) return (0); @@ -2026,19 +2027,24 @@ single_thread_check_locked(struct proc *p, int deep, int s) return (EINTR); } - if (atomic_dec_int_nv(&pr->ps_singlecount) == 0) - wakeup(&pr->ps_singlecount); - if (pr->ps_flags & PS_SINGLEEXIT) { - SCHED_UNLOCK(s); + mtx_leave(&pr->ps_mtx); KERNEL_LOCK(); exit1(p, 0, 0, EXIT_THREAD_NOCHECK); /* NOTREACHED */ } /* not exiting and don't need to unwind, so suspend */ + wake = (++pr->ps_singlecnt == pr->ps_threadcnt); + mtx_leave(&pr->ps_mtx); + if (wake) + wakeup(&pr->ps_singlecnt); + + SCHED_LOCK(s); p->p_stat = SSTOP; mi_switch(); + SCHED_UNLOCK(s); + mtx_enter(&pr->ps_mtx); } while (pr->ps_single != NULL); return (0); @@ -2047,11 +2053,11 @@ single_thread_check_locked(struct proc *p, int deep, int s) int single_thread_check(struct proc *p, int deep) { - int s, error; + int error; - SCHED_LOCK(s); - error = single_thread_check_locked(p, deep, s); - SCHED_UNLOCK(s); + mtx_enter(&p->p_p->ps_mtx); + error = single_thread_check_locked(p, deep); + mtx_leave(&p->p_p->ps_mtx); return error; } @@ -2071,13 +2077,14 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait) struct process *pr = p->p_p; struct proc *q; int error, s; + u_int count = 0; KASSERT(curproc == p); - SCHED_LOCK(s); - error = single_thread_check_locked(p, (mode == SINGLE_UNWIND), s); + mtx_enter(&pr->ps_mtx); + error = single_thread_check_locked(p, (mode == SINGLE_UNWIND)); if (error) { - SCHED_UNLOCK(s); + mtx_leave(&pr->ps_mtx); return error; } @@ -2096,26 +2103,24 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait) panic("single_thread_mode = %d", mode); #endif } - pr->ps_singlecount = 0; - membar_producer(); + pr->ps_singlecnt = 1; /* count ourselfs in already */ pr->ps_single = p; + mtx_leave(&pr->ps_mtx); + + SCHED_LOCK(s); TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { if (q == p) continue; if (q->p_flag & P_WEXIT) { - if (mode == SINGLE_EXIT) { - if (q->p_stat == SSTOP) { - setrunnable(q); - atomic_inc_int(&pr->ps_singlecount); - } - } + if (mode == SINGLE_EXIT && q->p_stat == SSTOP) + setrunnable(q); continue; } atomic_setbits_int(&q->p_flag, P_SUSPSINGLE); switch (q->p_stat) { case SIDL: + case SDEAD: case SRUN: - atomic_inc_int(&pr->ps_singlecount); break; case SSLEEP: /* if it's not interruptible, then just have to wait */ @@ -2123,29 +2128,31 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait) /* merely need to suspend? just stop it */ if (mode == SINGLE_SUSPEND) { q->p_stat = SSTOP; + count++; break; } /* need to unwind or exit, so wake it */ setrunnable(q); } - atomic_inc_int(&pr->ps_singlecount); break; case SSTOP: if (mode == SINGLE_EXIT) { setrunnable(q); - atomic_inc_int(&pr->ps_singlecount); + break; } - break; - case SDEAD: + count++; break; case SONPROC: - atomic_inc_int(&pr->ps_singlecount); signotify(q); break; } } SCHED_UNLOCK(s); + mtx_enter(&pr->ps_mtx); + pr->ps_singlecnt += count; + mtx_leave(&pr->ps_mtx); + if (wait) single_thread_wait(pr, 1); @@ -2163,14 +2170,16 @@ single_thread_wait(struct process *pr, int recheck) int wait; /* wait until they're all suspended */ - wait = pr->ps_singlecount > 0; + mtx_enter(&pr->ps_mtx); + wait = pr->ps_singlecnt < pr->ps_threadcnt; while (wait) { - sleep_setup(&pr->ps_singlecount, PWAIT, "suspend"); - wait = pr->ps_singlecount > 0; - sleep_finish(0, wait); + msleep_nsec(&pr->ps_singlecnt, &pr->ps_mtx, PWAIT, "suspend", + INFSLP); if (!recheck) break; + wait = pr->ps_singlecnt < pr->ps_threadcnt; } + mtx_leave(&pr->ps_mtx); return wait; } @@ -2185,9 +2194,11 @@ single_thread_clear(struct proc *p, int flag) KASSERT(pr->ps_single == p); KASSERT(curproc == p); - SCHED_LOCK(s); + /* can do this without holding pr->ps_mtx since no concurrency */ pr->ps_single = NULL; atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT); + + SCHED_LOCK(s); TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { if (q == p || (q->p_flag & P_SUSPSINGLE) == 0) continue; |