diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 2002-01-10 14:16:54 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 2002-01-10 14:16:54 +0000 |
commit | 3e39404a4299dbc88be1b70f0a7fa9888b7ac3cc (patch) | |
tree | e5aa16c9eeb68327705eced1f281857a0d60910f /sys/kern/subr_pool.c | |
parent | f2f820a9756c09fc30222845334818800e7f00da (diff) |
Convert some for-loops into TAILQ_FOREACH.
Diffstat (limited to 'sys/kern/subr_pool.c')
-rw-r--r-- | sys/kern/subr_pool.c | 33 |
1 files changed, 11 insertions, 22 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c index fa985f0e10c..15c207c6bd9 100644 --- a/sys/kern/subr_pool.c +++ b/sys/kern/subr_pool.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_pool.c,v 1.14 2001/11/06 19:53:20 miod Exp $ */ +/* $OpenBSD: subr_pool.c,v 1.15 2002/01/10 14:16:53 art Exp $ */ /* $NetBSD: subr_pool.c,v 1.59 2001/06/05 18:51:04 thorpej Exp $ */ /*- @@ -353,8 +353,7 @@ pr_rmpage(struct pool *pp, struct pool_item_header *ph) * Start search from the page head, to increase the * chance for "high water" pages to be freed. */ - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -805,8 +804,7 @@ pool_get(struct pool *pp, int flags) */ TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -939,8 +937,7 @@ pool_do_put(struct pool *pp, void *v) * page with the fewest available items, to minimize * fragmentation? */ - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -1286,8 +1283,7 @@ pool_reclaim(struct pool *pp) /* * Reclaim items from the pool's caches. */ - for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; - pc = TAILQ_NEXT(pc, pc_poollist)) + TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) pool_cache_reclaim(pc); s = splclock(); @@ -1420,8 +1416,7 @@ pool_print1(struct pool *pp, const char *modif, int (*pr)(const char *, ...)) (u_long)ph->ph_time.tv_sec, (u_long)ph->ph_time.tv_usec); #ifdef DIAGNOSTIC - for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL; - pi = TAILQ_NEXT(pi, pi_list)) { + TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { if (pi->pi_magic != PI_MAGIC) { (*pr)("\t\t\titem %p, magic 0x%x\n", pi, pi->pi_magic); @@ -1450,14 +1445,12 @@ pool_print1(struct pool *pp, const char *modif, int (*pr)(const char *, ...)) if (print_cache == 0) goto skip_cache; - for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; - pc = TAILQ_NEXT(pc, pc_poollist)) { + TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, pc->pc_allocfrom, pc->pc_freeto); (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); for (i = 0; i < PCG_NOBJECTS; i++) (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); @@ -1477,9 +1470,7 @@ pool_chk(struct pool *pp, const char *label) simple_lock(&pp->pr_slock); - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) { - + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) { struct pool_item *pi; int n; caddr_t page; @@ -1637,8 +1628,7 @@ pool_cache_get(struct pool_cache *pc, int flags) simple_lock(&pc->pc_slock); if ((pcg = pc->pc_allocfrom) == NULL) { - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { if (pcg->pcg_avail != 0) { pc->pc_allocfrom = pcg; goto have_group; @@ -1689,8 +1679,7 @@ pool_cache_put(struct pool_cache *pc, void *object) simple_lock(&pc->pc_slock); if ((pcg = pc->pc_freeto) == NULL) { - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { if (pcg->pcg_avail != PCG_NOBJECTS) { pc->pc_freeto = pcg; goto have_group; |