diff options
author | Scott Soule Cheloha <cheloha@cvs.openbsd.org> | 2024-01-24 19:23:40 +0000 |
---|---|---|
committer | Scott Soule Cheloha <cheloha@cvs.openbsd.org> | 2024-01-24 19:23:40 +0000 |
commit | 8cb898c555527cf989801c7e83938ec6f7e32288 (patch) | |
tree | 32acb5bd373d406406213baca20ec260702dce12 /sys/kern | |
parent | 3dee2e31d0d48fb3d7aab4917b155daa51330768 (diff) |
clockintr: switch from callee- to caller-allocated clockintr structs
Currently, clockintr_establish() calls malloc(9) to allocate a
clockintr struct on behalf of the caller. mpi@ says this behavior is
incompatible with dt(4). In particular, calling malloc(9) during the
initialization of a PCB outside of dt_pcb_alloc() is (a) awkward and
(b) may conflict with future changes/optimizations to PCB allocation.
To side-step the problem, this patch changes the clockintr subsystem
to use caller-allocated clockintr structs instead of callee-allocated
structs.
clockintr_establish() is named after softintr_establish(), which uses
malloc(9) internally to create softintr objects. The clockintr subsystem
is no longer using malloc(9), so the "establish" naming is no longer apt.
To avoid confusion, this patch also renames "clockintr_establish" to
"clockintr_bind".
Requested by mpi@. Tweaked by mpi@.
Thread: https://marc.info/?l=openbsd-tech&m=170597126103504&w=2
ok claudio@ mlarkin@ mpi@
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_clockintr.c | 49 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_sched.c | 22 | ||||
-rw-r--r-- | sys/kern/sched_bsd.c | 10 | ||||
-rw-r--r-- | sys/kern/subr_prof.c | 17 |
5 files changed, 43 insertions, 61 deletions
diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index 6e771a043ba..f52ddc37f68 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.63 2024/01/15 01:15:37 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.64 2024/01/24 19:23:38 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn <drahn@openbsd.org> * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org> @@ -62,11 +62,9 @@ clockintr_cpu_init(const struct intrclock *ic) clockqueue_intrclock_install(cq, ic); /* TODO: Remove this from struct clockintr_queue. */ - if (cq->cq_hardclock == NULL) { - cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock, + if (cq->cq_hardclock.cl_expiration == 0) { + clockintr_bind(&cq->cq_hardclock, ci, clockintr_hardclock, NULL); - if (cq->cq_hardclock == NULL) - panic("%s: failed to establish hardclock", __func__); } /* @@ -96,16 +94,16 @@ clockintr_cpu_init(const struct intrclock *ic) * behalf. */ if (CPU_IS_PRIMARY(ci)) { - if (cq->cq_hardclock->cl_expiration == 0) - clockintr_schedule(cq->cq_hardclock, 0); + if (cq->cq_hardclock.cl_expiration == 0) + clockintr_schedule(&cq->cq_hardclock, 0); else - clockintr_advance(cq->cq_hardclock, hardclock_period); + clockintr_advance(&cq->cq_hardclock, hardclock_period); } else { - if (cq->cq_hardclock->cl_expiration == 0) { - clockintr_stagger(cq->cq_hardclock, hardclock_period, + if (cq->cq_hardclock.cl_expiration == 0) { + clockintr_stagger(&cq->cq_hardclock, hardclock_period, multiplier, MAXCPUS); } - clockintr_advance(cq->cq_hardclock, hardclock_period); + clockintr_advance(&cq->cq_hardclock, hardclock_period); } /* @@ -113,30 +111,30 @@ clockintr_cpu_init(const struct intrclock *ic) * stagger a randomized statclock. */ if (!statclock_is_randomized) { - if (spc->spc_statclock->cl_expiration == 0) { - clockintr_stagger(spc->spc_statclock, statclock_avg, + if (spc->spc_statclock.cl_expiration == 0) { + clockintr_stagger(&spc->spc_statclock, statclock_avg, multiplier, MAXCPUS); } } - clockintr_advance(spc->spc_statclock, statclock_avg); + clockintr_advance(&spc->spc_statclock, statclock_avg); /* * XXX Need to find a better place to do this. We can't do it in * sched_init_cpu() because initclocks() runs after it. */ - if (spc->spc_itimer->cl_expiration == 0) { - clockintr_stagger(spc->spc_itimer, hardclock_period, + if (spc->spc_itimer.cl_expiration == 0) { + clockintr_stagger(&spc->spc_itimer, hardclock_period, multiplier, MAXCPUS); } - if (spc->spc_profclock->cl_expiration == 0) { - clockintr_stagger(spc->spc_profclock, profclock_period, + if (spc->spc_profclock.cl_expiration == 0) { + clockintr_stagger(&spc->spc_profclock, profclock_period, multiplier, MAXCPUS); } - if (spc->spc_roundrobin->cl_expiration == 0) { - clockintr_stagger(spc->spc_roundrobin, hardclock_period, + if (spc->spc_roundrobin.cl_expiration == 0) { + clockintr_stagger(&spc->spc_roundrobin, hardclock_period, multiplier, MAXCPUS); } - clockintr_advance(spc->spc_roundrobin, roundrobin_period); + clockintr_advance(&spc->spc_roundrobin, roundrobin_period); if (reset_cq_intrclock) SET(cq->cq_flags, CQ_INTRCLOCK); @@ -337,16 +335,12 @@ clockintr_cancel(struct clockintr *cl) mtx_leave(&cq->cq_mtx); } -struct clockintr * -clockintr_establish(struct cpu_info *ci, +void +clockintr_bind(struct clockintr *cl, struct cpu_info *ci, void (*func)(struct clockrequest *, void *, void *), void *arg) { - struct clockintr *cl; struct clockintr_queue *cq = &ci->ci_queue; - cl = malloc(sizeof *cl, M_DEVBUF, M_NOWAIT | M_ZERO); - if (cl == NULL) - return NULL; cl->cl_arg = arg; cl->cl_func = func; cl->cl_queue = cq; @@ -354,7 +348,6 @@ clockintr_establish(struct cpu_info *ci, mtx_enter(&cq->cq_mtx); TAILQ_INSERT_TAIL(&cq->cq_all, cl, cl_alink); mtx_leave(&cq->cq_mtx); - return cl; } void diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index b7b93d86556..90acfdc09b2 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_fork.c,v 1.256 2024/01/19 01:43:26 bluhm Exp $ */ +/* $OpenBSD: kern_fork.c,v 1.257 2024/01/24 19:23:38 cheloha Exp $ */ /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ /* @@ -704,11 +704,11 @@ proc_trampoline_mi(void) /* Start any optional clock interrupts needed by the thread. */ if (ISSET(p->p_p->ps_flags, PS_ITIMER)) { atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER); - clockintr_advance(spc->spc_itimer, hardclock_period); + clockintr_advance(&spc->spc_itimer, hardclock_period); } if (ISSET(p->p_p->ps_flags, PS_PROFIL)) { atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); - clockintr_advance(spc->spc_profclock, profclock_period); + clockintr_advance(&spc->spc_profclock, profclock_period); } nanouptime(&spc->spc_runtime); diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index b10a64c5e80..731c615284d 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sched.c,v 1.93 2023/10/24 13:20:11 claudio Exp $ */ +/* $OpenBSD: kern_sched.c,v 1.94 2024/01/24 19:23:38 cheloha Exp $ */ /* * Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org> * @@ -88,18 +88,10 @@ sched_init_cpu(struct cpu_info *ci) spc->spc_idleproc = NULL; - spc->spc_itimer = clockintr_establish(ci, itimer_update, NULL); - if (spc->spc_itimer == NULL) - panic("%s: clockintr_establish itimer_update", __func__); - spc->spc_profclock = clockintr_establish(ci, profclock, NULL); - if (spc->spc_profclock == NULL) - panic("%s: clockintr_establish profclock", __func__); - spc->spc_roundrobin = clockintr_establish(ci, roundrobin, NULL); - if (spc->spc_roundrobin == NULL) - panic("%s: clockintr_establish roundrobin", __func__); - spc->spc_statclock = clockintr_establish(ci, statclock, NULL); - if (spc->spc_statclock == NULL) - panic("%s: clockintr_establish statclock", __func__); + clockintr_bind(&spc->spc_itimer, ci, itimer_update, NULL); + clockintr_bind(&spc->spc_profclock, ci, profclock, NULL); + clockintr_bind(&spc->spc_roundrobin, ci, roundrobin, NULL); + clockintr_bind(&spc->spc_statclock, ci, statclock, NULL); kthread_create_deferred(sched_kthreads_create, ci); @@ -244,11 +236,11 @@ sched_toidle(void) if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) { atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER); - clockintr_cancel(spc->spc_itimer); + clockintr_cancel(&spc->spc_itimer); } if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) { atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); - clockintr_cancel(spc->spc_profclock); + clockintr_cancel(&spc->spc_profclock); } atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR); diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c index 82fb73f6d62..89d58c6528a 100644 --- a/sys/kern/sched_bsd.c +++ b/sys/kern/sched_bsd.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sched_bsd.c,v 1.89 2023/10/17 00:04:02 cheloha Exp $ */ +/* $OpenBSD: sched_bsd.c,v 1.90 2024/01/24 19:23:38 cheloha Exp $ */ /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ /*- @@ -396,11 +396,11 @@ mi_switch(void) /* Stop any optional clock interrupts. */ if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) { atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER); - clockintr_cancel(spc->spc_itimer); + clockintr_cancel(&spc->spc_itimer); } if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) { atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); - clockintr_cancel(spc->spc_profclock); + clockintr_cancel(&spc->spc_profclock); } /* @@ -451,11 +451,11 @@ mi_switch(void) /* Start any optional clock interrupts needed by the thread. */ if (ISSET(p->p_p->ps_flags, PS_ITIMER)) { atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER); - clockintr_advance(spc->spc_itimer, hardclock_period); + clockintr_advance(&spc->spc_itimer, hardclock_period); } if (ISSET(p->p_p->ps_flags, PS_PROFIL)) { atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); - clockintr_advance(spc->spc_profclock, profclock_period); + clockintr_advance(&spc->spc_profclock, profclock_period); } nanouptime(&spc->spc_runtime); diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c index 19eb3cc6fdd..906c15d9706 100644 --- a/sys/kern/subr_prof.c +++ b/sys/kern/subr_prof.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_prof.c,v 1.40 2023/10/17 00:04:02 cheloha Exp $ */ +/* $OpenBSD: subr_prof.c,v 1.41 2024/01/24 19:23:38 cheloha Exp $ */ /* $NetBSD: subr_prof.c,v 1.12 1996/04/22 01:38:50 christos Exp $ */ /*- @@ -101,19 +101,16 @@ prof_init(void) /* Allocate and initialize one profiling buffer per CPU. */ CPU_INFO_FOREACH(cii, ci) { - ci->ci_gmonclock = clockintr_establish(ci, gmonclock, NULL); - if (ci->ci_gmonclock == NULL) { - printf("%s: clockintr_establish gmonclock\n", __func__); - return; - } - clockintr_stagger(ci->ci_gmonclock, profclock_period, - CPU_INFO_UNIT(ci), MAXCPUS); cp = km_alloc(round_page(size), &kv_any, &kp_zero, &kd_nowait); if (cp == NULL) { printf("No memory for profiling.\n"); return; } + clockintr_bind(&ci->ci_gmonclock, ci, gmonclock, NULL); + clockintr_stagger(&ci->ci_gmonclock, profclock_period, + CPU_INFO_UNIT(ci), MAXCPUS); + p = (struct gmonparam *)cp; cp += sizeof(*p); p->tos = (struct tostruct *)cp; @@ -159,7 +156,7 @@ prof_state_toggle(struct cpu_info *ci, int oldstate) if (error == 0) { if (++gmon_cpu_count == 1) startprofclock(&process0); - clockintr_advance(ci->ci_gmonclock, profclock_period); + clockintr_advance(&ci->ci_gmonclock, profclock_period); } break; default: @@ -167,7 +164,7 @@ prof_state_toggle(struct cpu_info *ci, int oldstate) gp->state = GMON_PROF_OFF; /* FALLTHROUGH */ case GMON_PROF_OFF: - clockintr_cancel(ci->ci_gmonclock); + clockintr_cancel(&ci->ci_gmonclock); if (--gmon_cpu_count == 0) stopprofclock(&process0); #if !defined(GPROF) |