diff options
author | David Gwynne <dlg@cvs.openbsd.org> | 2022-11-09 10:41:19 +0000 |
---|---|---|
committer | David Gwynne <dlg@cvs.openbsd.org> | 2022-11-09 10:41:19 +0000 |
commit | 0403d056180bfd7ceb95274ea923550bf8abc6a8 (patch) | |
tree | a85376aeaf5f45cc24468077c27074d49bf03ca8 /sys/net/if.c | |
parent | 92c8ab13f6e3199e31f53186cdcbb36ff5dfdaf0 (diff) |
replace SRP with SMR in the if_idxmap.
when i first wrote if_idxmap i didn't realise (and no one thought
to tell me) that index 0 was special and means "no interface", so
while here use the 0th slot in the interface map to store the length
of the map instead of prepending the map with a length field.
if_get() now special cases index 0 and returns NULL directly. this
also means the size of the map is now always a power of 2, which
is a nicer fit with what the kernel malloc aprovides.
tweaks and ok visa@
Diffstat (limited to 'sys/net/if.c')
-rw-r--r-- | sys/net/if.c | 213 |
1 files changed, 83 insertions, 130 deletions
diff --git a/sys/net/if.c b/sys/net/if.c index 297eac6afeb..9d4dc8cfc89 100644 --- a/sys/net/if.c +++ b/sys/net/if.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if.c,v 1.672 2022/11/08 21:07:33 kn Exp $ */ +/* $OpenBSD: if.c,v 1.673 2022/11/09 10:41:18 dlg Exp $ */ /* $NetBSD: if.c,v 1.35 1996/05/07 05:26:04 thorpej Exp $ */ /* @@ -87,6 +87,7 @@ #include <sys/proc.h> #include <sys/stdint.h> /* uintptr_t */ #include <sys/rwlock.h> +#include <sys/smr.h> #include <net/if.h> #include <net/if_dl.h> @@ -196,29 +197,20 @@ void if_map_dtor(void *, void *); struct ifnet *if_ref(struct ifnet *); /* - * struct if_map - * - * bounded array of ifnet srp pointers used to fetch references of live - * interfaces with if_get(). - */ - -struct if_map { - unsigned long limit; - /* followed by limit ifnet srp pointers */ -}; - -/* * struct if_idxmap * * infrastructure to manage updates and accesses to the current if_map. + * + * interface index 0 is special and represents "no interface", so we + * use the 0th slot in map to store the length of the array. */ struct if_idxmap { - unsigned int serial; - unsigned int count; - struct srp map; - struct rwlock lock; - unsigned char *usedidx; /* bitmap of indices in use */ + unsigned int serial; + unsigned int count; + struct ifnet **map; /* SMR protected */ + struct rwlock lock; + unsigned char *usedidx; /* bitmap of indices in use */ }; void if_idxmap_init(unsigned int); @@ -265,7 +257,7 @@ ifinit(void) * most machines boot with 4 or 5 interfaces, so size the initial map * to accommodate this */ - if_idxmap_init(8); + if_idxmap_init(8); /* 8 is a nice power of 2 for malloc */ for (i = 0; i < NET_TASKQ; i++) { nettqmp[i] = taskq_create("softnet", 1, IPL_NET, TASKQ_MPSAFE); @@ -274,48 +266,41 @@ ifinit(void) } } -static struct if_idxmap if_idxmap = { - 0, - 0, - SRP_INITIALIZER(), - RWLOCK_INITIALIZER("idxmaplk"), - NULL, -}; - -struct srp_gc if_ifp_gc = SRP_GC_INITIALIZER(if_ifp_dtor, NULL); -struct srp_gc if_map_gc = SRP_GC_INITIALIZER(if_map_dtor, NULL); +static struct if_idxmap if_idxmap; struct ifnet_head ifnetlist = TAILQ_HEAD_INITIALIZER(ifnetlist); +static inline unsigned int +if_idxmap_limit(struct ifnet **if_map) +{ + return ((uintptr_t)if_map[0]); +} + void if_idxmap_init(unsigned int limit) { - struct if_map *if_map; - struct srp *map; - unsigned int i; + struct ifnet **if_map; - if_idxmap.serial = 1; /* skip ifidx 0 so it can return NULL */ + rw_init(&if_idxmap.lock, "idxmaplk"); + if_idxmap.serial = 1; /* skip ifidx 0 */ - if_map = malloc(sizeof(*if_map) + limit * sizeof(*map), - M_IFADDR, M_WAITOK); + if_map = mallocarray(limit, sizeof(*if_map), M_IFADDR, + M_WAITOK | M_ZERO); - if_map->limit = limit; - map = (struct srp *)(if_map + 1); - for (i = 0; i < limit; i++) - srp_init(&map[i]); + if_map[0] = (struct ifnet *)(uintptr_t)limit; if_idxmap.usedidx = malloc(howmany(limit, NBBY), M_IFADDR, M_WAITOK | M_ZERO); /* this is called early so there's nothing to race with */ - srp_update_locked(&if_map_gc, &if_idxmap.map, if_map); + SMR_PTR_SET_LOCKED(&if_idxmap.map, if_map); } void if_idxmap_alloc(struct ifnet *ifp) { - struct if_map *if_map; - struct srp *map; + struct ifnet **if_map, **oif_map = NULL; + unsigned int limit, olimit; unsigned int index, i; refcnt_init(&ifp->if_refcnt); @@ -325,49 +310,41 @@ if_idxmap_alloc(struct ifnet *ifp) if (++if_idxmap.count >= USHRT_MAX) panic("too many interfaces"); - if_map = srp_get_locked(&if_idxmap.map); - map = (struct srp *)(if_map + 1); + if_map = SMR_PTR_GET_LOCKED(&if_idxmap.map); + limit = if_idxmap_limit(if_map); index = if_idxmap.serial++ & USHRT_MAX; - if (index >= if_map->limit) { - struct if_map *nif_map; - struct srp *nmap; - unsigned int nlimit; - struct ifnet *nifp; + if (index >= limit) { unsigned char *nusedidx; - nlimit = if_map->limit * 2; - nif_map = malloc(sizeof(*nif_map) + nlimit * sizeof(*nmap), - M_IFADDR, M_WAITOK); - nmap = (struct srp *)(nif_map + 1); - - nif_map->limit = nlimit; - for (i = 0; i < if_map->limit; i++) { - srp_init(&nmap[i]); - nifp = srp_get_locked(&map[i]); - if (nifp != NULL) { - srp_update_locked(&if_ifp_gc, &nmap[i], - if_ref(nifp)); - } - } + oif_map = if_map; + olimit = limit; + + limit = olimit * 2; + if_map = mallocarray(limit, sizeof(*if_map), M_IFADDR, + M_WAITOK | M_ZERO); + if_map[0] = (struct ifnet *)(uintptr_t)limit; + + for (i = 1; i < olimit; i++) { + struct ifnet *oifp = SMR_PTR_GET_LOCKED(&oif_map[i]); + if (oifp == NULL) + continue; - while (i < nlimit) { - srp_init(&nmap[i]); - i++; + /* + * nif_map isn't visible yet, so don't need + * SMR_PTR_SET_LOCKED and its membar. + */ + if_map[i] = if_ref(oifp); } - nusedidx = malloc(howmany(nlimit, NBBY), + nusedidx = malloc(howmany(limit, NBBY), M_IFADDR, M_WAITOK | M_ZERO); - memcpy(nusedidx, if_idxmap.usedidx, - howmany(if_map->limit, NBBY)); - free(if_idxmap.usedidx, M_IFADDR, - howmany(if_map->limit, NBBY)); + memcpy(nusedidx, if_idxmap.usedidx, howmany(olimit, NBBY)); + free(if_idxmap.usedidx, M_IFADDR, howmany(olimit, NBBY)); if_idxmap.usedidx = nusedidx; - srp_update_locked(&if_map_gc, &if_idxmap.map, nif_map); - if_map = nif_map; - map = nmap; + SMR_PTR_SET_LOCKED(&if_idxmap.map, if_map); } /* pick the next free index */ @@ -377,32 +354,40 @@ if_idxmap_alloc(struct ifnet *ifp) index = if_idxmap.serial++ & USHRT_MAX; } - KASSERT(index != 0 && index < if_map->limit); + KASSERT(index != 0 && index < limit); KASSERT(isclr(if_idxmap.usedidx, index)); setbit(if_idxmap.usedidx, index); ifp->if_index = index; rw_exit_write(&if_idxmap.lock); + + if (oif_map != NULL) { + smr_barrier(); + for (i = 1; i < olimit; i++) + if_put(oif_map[i]); + free(oif_map, M_IFADDR, olimit * sizeof(*oif_map)); + } } void if_idxmap_insert(struct ifnet *ifp) { - struct if_map *if_map; - struct srp *map; + struct ifnet **if_map; unsigned int index = ifp->if_index; rw_enter_write(&if_idxmap.lock); - if_map = srp_get_locked(&if_idxmap.map); - map = (struct srp *)(if_map + 1); + if_map = SMR_PTR_GET_LOCKED(&if_idxmap.map); - KASSERT(index != 0 && index < if_map->limit); + KASSERTMSG(index != 0 && index < if_idxmap_limit(if_map), + "%s(%p) index %u vs limit %u", ifp->if_xname, ifp, index, + if_idxmap_limit(if_map)); + KASSERT(SMR_PTR_GET_LOCKED(&if_map[index]) == NULL); KASSERT(isset(if_idxmap.usedidx, index)); /* commit */ - srp_update_locked(&if_ifp_gc, &map[index], if_ref(ifp)); + SMR_PTR_SET_LOCKED(&if_map[index], if_ref(ifp)); rw_exit_write(&if_idxmap.lock); } @@ -410,53 +395,29 @@ if_idxmap_insert(struct ifnet *ifp) void if_idxmap_remove(struct ifnet *ifp) { - struct if_map *if_map; - struct srp *map; - unsigned int index; - - index = ifp->if_index; + struct ifnet **if_map; + unsigned int index = ifp->if_index; rw_enter_write(&if_idxmap.lock); - if_map = srp_get_locked(&if_idxmap.map); - KASSERT(index < if_map->limit); + if_map = SMR_PTR_GET_LOCKED(&if_idxmap.map); + + KASSERT(index != 0 && index < if_idxmap_limit(if_map)); + KASSERT(SMR_PTR_GET_LOCKED(&if_map[index]) == ifp); + KASSERT(isset(if_idxmap.usedidx, index)); - map = (struct srp *)(if_map + 1); - KASSERT(ifp == (struct ifnet *)srp_get_locked(&map[index])); + SMR_PTR_SET_LOCKED(&if_map[index], NULL); - srp_update_locked(&if_ifp_gc, &map[index], NULL); if_idxmap.count--; - - KASSERT(isset(if_idxmap.usedidx, index)); clrbit(if_idxmap.usedidx, index); /* end of if_idxmap modifications */ rw_exit_write(&if_idxmap.lock); -} -void -if_ifp_dtor(void *null, void *ifp) -{ + smr_barrier(); if_put(ifp); } -void -if_map_dtor(void *null, void *m) -{ - struct if_map *if_map = m; - struct srp *map = (struct srp *)(if_map + 1); - unsigned int i; - - /* - * dont need to serialize the use of update_locked since this is - * the last reference to this map. there's nothing to race against. - */ - for (i = 0; i < if_map->limit; i++) - srp_update_locked(&if_ifp_gc, &map[i], NULL); - - free(if_map, M_IFADDR, sizeof(*if_map) + if_map->limit * sizeof(*map)); -} - /* * Attach an interface to the * list of "active" interfaces. @@ -970,14 +931,6 @@ if_netisr(void *unused) t |= n; } -#if NPFSYNC > 0 - if (t & (1 << NETISR_PFSYNC)) { - KERNEL_LOCK(); - pfsyncintr(); - KERNEL_UNLOCK(); - } -#endif - NET_UNLOCK(); } @@ -1780,22 +1733,22 @@ if_unit(const char *name) struct ifnet * if_get(unsigned int index) { - struct srp_ref sr; - struct if_map *if_map; - struct srp *map; + struct ifnet **if_map; struct ifnet *ifp = NULL; - if_map = srp_enter(&sr, &if_idxmap.map); - if (index < if_map->limit) { - map = (struct srp *)(if_map + 1); + if (index == 0) + return (NULL); - ifp = srp_follow(&sr, &map[index]); + smr_read_enter(); + if_map = SMR_PTR_GET(&if_idxmap.map); + if (index < if_idxmap_limit(if_map)) { + ifp = SMR_PTR_GET(&if_map[index]); if (ifp != NULL) { KASSERT(ifp->if_index == index); if_ref(ifp); } } - srp_leave(&sr); + smr_read_leave(); return (ifp); } |