summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorAlexander Bluhm <bluhm@cvs.openbsd.org>2024-11-06 14:37:46 +0000
committerAlexander Bluhm <bluhm@cvs.openbsd.org>2024-11-06 14:37:46 +0000
commitbe67c1623e5cff64b38112356f5fa4175040881d (patch)
treedd13dc0c7b0d8a5c83c84c794ffb7e2497fff924 /sys/kern
parent98e8bc8aba248755245c6d81042ae5eeec1f45b9 (diff)
Use atomic load and store operations for sbchecklowmem().
Use atomic operations to make explicit where access from multiple CPU happens. Add a comment why sbchecklowmem() is sufficently MP safe without locks. OK mvs@ claudio@
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/uipc_mbuf.c14
-rw-r--r--sys/kern/uipc_socket2.c16
2 files changed, 19 insertions, 11 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 34ce9390635..1151a179fb5 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uipc_mbuf.c,v 1.294 2024/09/10 14:52:42 bluhm Exp $ */
+/* $OpenBSD: uipc_mbuf.c,v 1.295 2024/11/06 14:37:45 bluhm Exp $ */
/* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */
/*
@@ -129,8 +129,8 @@ struct mutex m_extref_mtx = MUTEX_INITIALIZER(IPL_NET);
void m_extfree(struct mbuf *);
void m_zero(struct mbuf *);
-unsigned long mbuf_mem_limit; /* how much memory can be allocated */
-unsigned long mbuf_mem_alloc; /* how much memory has been allocated */
+unsigned long mbuf_mem_limit; /* [a] how much memory can be allocated */
+unsigned long mbuf_mem_alloc; /* [a] how much memory has been allocated */
void *m_pool_alloc(struct pool *, int, int *);
void m_pool_free(struct pool *, void *);
@@ -219,7 +219,7 @@ nmbclust_update(long newval)
return ERANGE;
/* update the global mbuf memory limit */
nmbclust = newval;
- mbuf_mem_limit = nmbclust * MCLBYTES;
+ atomic_store_long(&mbuf_mem_limit, nmbclust * MCLBYTES);
pool_wakeup(&mbpool);
for (i = 0; i < nitems(mclsizes); i++)
@@ -1458,7 +1458,8 @@ m_pool_alloc(struct pool *pp, int flags, int *slowdown)
{
void *v;
- if (atomic_add_long_nv(&mbuf_mem_alloc, pp->pr_pgsize) > mbuf_mem_limit)
+ if (atomic_add_long_nv(&mbuf_mem_alloc, pp->pr_pgsize) >
+ atomic_load_long(&mbuf_mem_limit))
goto fail;
v = (*pool_allocator_multi.pa_alloc)(pp, flags, slowdown);
@@ -1488,7 +1489,8 @@ m_pool_init(struct pool *pp, u_int size, u_int align, const char *wmesg)
u_int
m_pool_used(void)
{
- return ((mbuf_mem_alloc * 100) / mbuf_mem_limit);
+ return ((atomic_load_long(&mbuf_mem_alloc) * 100) /
+ atomic_load_long(&mbuf_mem_limit));
}
#ifdef DDB
diff --git a/sys/kern/uipc_socket2.c b/sys/kern/uipc_socket2.c
index 2855feb61c7..91f32710478 100644
--- a/sys/kern/uipc_socket2.c
+++ b/sys/kern/uipc_socket2.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uipc_socket2.c,v 1.158 2024/07/12 19:50:35 bluhm Exp $ */
+/* $OpenBSD: uipc_socket2.c,v 1.159 2024/11/06 14:37:45 bluhm Exp $ */
/* $NetBSD: uipc_socket2.c,v 1.11 1996/02/04 02:17:55 christos Exp $ */
/*
@@ -684,14 +684,20 @@ int
sbchecklowmem(void)
{
static int sblowmem;
- unsigned int used = m_pool_used();
+ unsigned int used;
+ /*
+ * m_pool_used() is thread safe. Global variable sblowmem is updated
+ * by multiple CPUs, but most times with the same value. And even
+ * if the value is not correct for a short time, it does not matter.
+ */
+ used = m_pool_used();
if (used < 60)
- sblowmem = 0;
+ atomic_store_int(&sblowmem, 0);
else if (used > 80)
- sblowmem = 1;
+ atomic_store_int(&sblowmem, 1);
- return (sblowmem);
+ return (atomic_load_int(&sblowmem));
}
/*