summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClaudio Jeker <claudio@cvs.openbsd.org>2019-07-24 20:54:55 +0000
committerClaudio Jeker <claudio@cvs.openbsd.org>2019-07-24 20:54:55 +0000
commit1fbf90bcaf4eb348812e9004193c30900357eb2a (patch)
tree3ba1459832587c27352c786516e22b64e6bf28ea
parentefaf08bc8ff476af9c9d9b1944a67d2ca61d4b89 (diff)
rib_dump_terminate() and rib_dump_abort() need to free any possibly
locked entry (rib or prefix) before removing the context else elements may remain locked for the rest of the run time (which is equal to a memory leak). OK benno@
-rw-r--r--usr.sbin/bgpd/rde_rib.c111
1 files changed, 59 insertions, 52 deletions
diff --git a/usr.sbin/bgpd/rde_rib.c b/usr.sbin/bgpd/rde_rib.c
index 7fde1d38a8c..e6dd9473df2 100644
--- a/usr.sbin/bgpd/rde_rib.c
+++ b/usr.sbin/bgpd/rde_rib.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rde_rib.c,v 1.200 2019/07/23 14:19:17 claudio Exp $ */
+/* $OpenBSD: rde_rib.c,v 1.201 2019/07/24 20:54:54 claudio Exp $ */
/*
* Copyright (c) 2003, 2004 Claudio Jeker <claudio@openbsd.org>
@@ -97,6 +97,36 @@ re_is_locked(struct rib_entry *re)
return (re->lock != 0);
}
+static inline struct prefix *
+prefix_lock(struct prefix *p)
+{
+ if (p->flags & PREFIX_FLAG_LOCKED)
+ fatalx("%s: locking locked prefix", __func__);
+ p->flags |= PREFIX_FLAG_LOCKED;
+ return p;
+}
+
+static inline struct prefix *
+prefix_unlock(struct prefix *p)
+{
+ if ((p->flags & PREFIX_FLAG_LOCKED) == 0)
+ fatalx("%s: unlocking unlocked prefix", __func__);
+ p->flags &= ~PREFIX_FLAG_LOCKED;
+ return p;
+}
+
+static inline int
+prefix_is_locked(struct prefix *p)
+{
+ return (p->flags & PREFIX_FLAG_LOCKED) != 0;
+}
+
+static inline int
+prefix_is_dead(struct prefix *p)
+{
+ return (p->flags & PREFIX_FLAG_DEAD) != 0;
+}
+
static inline struct rib_tree *
rib_tree(struct rib *rib)
{
@@ -349,8 +379,7 @@ rib_restart(struct rib_context *ctx)
{
struct rib_entry *re;
- re = ctx->ctx_re;
- re_unlock(re);
+ re = re_unlock(ctx->ctx_re);
/* find first non empty element */
while (re && rib_empty(re))
@@ -390,8 +419,7 @@ rib_dump_r(struct rib_context *ctx)
if (ctx->ctx_count && i++ >= ctx->ctx_count &&
!re_is_locked(re)) {
/* store and lock last element */
- ctx->ctx_re = re;
- re_lock(re);
+ ctx->ctx_re = re_lock(re);
return;
}
ctx->ctx_rib_call(re, ctx->ctx_arg);
@@ -442,6 +470,29 @@ rib_dump_abort(u_int16_t id)
continue;
if (ctx->ctx_done)
ctx->ctx_done(ctx->ctx_arg, ctx->ctx_aid);
+ if (ctx->ctx_re && rib_empty(re_unlock(ctx->ctx_re)))
+ rib_remove(ctx->ctx_re);
+ if (ctx->ctx_p && prefix_is_dead(prefix_unlock(ctx->ctx_p)))
+ prefix_adjout_destroy(ctx->ctx_p);
+ LIST_REMOVE(ctx, entry);
+ free(ctx);
+ }
+}
+
+void
+rib_dump_terminate(void *arg)
+{
+ struct rib_context *ctx, *next;
+
+ LIST_FOREACH_SAFE(ctx, &rib_dumps, entry, next) {
+ if (ctx->ctx_arg != arg)
+ continue;
+ if (ctx->ctx_done)
+ ctx->ctx_done(ctx->ctx_arg, ctx->ctx_aid);
+ if (ctx->ctx_re && rib_empty(re_unlock(ctx->ctx_re)))
+ rib_remove(ctx->ctx_re);
+ if (ctx->ctx_p && prefix_is_dead(prefix_unlock(ctx->ctx_p)))
+ prefix_adjout_destroy(ctx->ctx_p);
LIST_REMOVE(ctx, entry);
free(ctx);
}
@@ -473,21 +524,6 @@ rib_dump_new(u_int16_t id, u_int8_t aid, unsigned int count, void *arg,
return 0;
}
-void
-rib_dump_terminate(void *arg)
-{
- struct rib_context *ctx, *next;
-
- LIST_FOREACH_SAFE(ctx, &rib_dumps, entry, next) {
- if (ctx->ctx_arg != arg)
- continue;
- if (ctx->ctx_done)
- ctx->ctx_done(ctx->ctx_arg, ctx->ctx_aid);
- LIST_REMOVE(ctx, entry);
- free(ctx);
- }
-}
-
/* path specific functions */
static struct rde_aspath *path_lookup(struct rde_aspath *);
@@ -1208,41 +1244,12 @@ prefix_withdraw(struct rde_peer *peer, struct bgpd_addr *prefix, int prefixlen)
return (1);
}
-static inline void
-prefix_lock(struct prefix *p)
-{
- if (p->flags & PREFIX_FLAG_LOCKED)
- fatalx("%s: locking locked prefix", __func__);
- p->flags |= PREFIX_FLAG_LOCKED;
-}
-
-static inline void
-prefix_unlock(struct prefix *p)
-{
- if ((p->flags & PREFIX_FLAG_LOCKED) == 0)
- fatalx("%s: unlocking unlocked prefix", __func__);
- p->flags &= ~PREFIX_FLAG_LOCKED;
-}
-
-static inline int
-prefix_is_locked(struct prefix *p)
-{
- return (p->flags & PREFIX_FLAG_LOCKED) != 0;
-}
-
-static inline int
-prefix_is_dead(struct prefix *p)
-{
- return (p->flags & PREFIX_FLAG_DEAD) != 0;
-}
-
static struct prefix *
prefix_restart(struct rib_context *ctx)
{
struct prefix *p;
- p = ctx->ctx_p;
- prefix_unlock(p);
+ p = prefix_unlock(ctx->ctx_p);
if (prefix_is_dead(p)) {
struct prefix *next;
@@ -1251,6 +1258,7 @@ prefix_restart(struct rib_context *ctx)
prefix_adjout_destroy(p);
p = next;
}
+ ctx->ctx_p = NULL;
return p;
}
@@ -1322,8 +1330,7 @@ prefix_dump_r(struct rib_context *ctx)
if (ctx->ctx_count && i++ >= ctx->ctx_count &&
!prefix_is_locked(p)) {
/* store and lock last element */
- ctx->ctx_p = p;
- prefix_lock(p);
+ ctx->ctx_p = prefix_lock(p);
return;
}
ctx->ctx_prefix_call(p, ctx->ctx_arg);