summaryrefslogtreecommitdiff
path: root/usr.sbin/bgpd/rde_rib.c
diff options
context:
space:
mode:
authorClaudio Jeker <claudio@cvs.openbsd.org>2009-06-01 22:49:07 +0000
committerClaudio Jeker <claudio@cvs.openbsd.org>2009-06-01 22:49:07 +0000
commit276317b86ebb47f8e781ac4d05a5f61fb9d04b92 (patch)
tree663d1e99af600b38c629a74eb776aa8418f2df0f /usr.sbin/bgpd/rde_rib.c
parent4ce3e7a39b85475a8527adeb543af1be496ca7db (diff)
Holy simplification batman. Use the per rib entry flags to lock entries
when interrupting rib dumps and now we no longer need evil RB magic to find the next entry on restart.
Diffstat (limited to 'usr.sbin/bgpd/rde_rib.c')
-rw-r--r--usr.sbin/bgpd/rde_rib.c75
1 files changed, 24 insertions, 51 deletions
diff --git a/usr.sbin/bgpd/rde_rib.c b/usr.sbin/bgpd/rde_rib.c
index aa3b2d1eaaf..80d4ab293c2 100644
--- a/usr.sbin/bgpd/rde_rib.c
+++ b/usr.sbin/bgpd/rde_rib.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rde_rib.c,v 1.104 2009/06/01 21:20:17 claudio Exp $ */
+/* $OpenBSD: rde_rib.c,v 1.105 2009/06/01 22:49:06 claudio Exp $ */
/*
* Copyright (c) 2003, 2004 Claudio Jeker <claudio@openbsd.org>
@@ -181,6 +181,10 @@ rib_remove(struct rib_entry *re)
if (!rib_empty(re))
fatalx("rib_remove: entry not empty");
+ if (re->flags & F_RIB_ENTRYLOCK)
+ /* entry is locked, don't free it. */
+ return;
+
pt_unref(re->prefix);
if (pt_empty(re->prefix))
pt_remove(re->prefix);
@@ -218,7 +222,7 @@ rib_dump_r(struct rib_context *ctx)
struct rib_entry *re;
unsigned int i;
- if (ctx->ctx_p == NULL) {
+ if (ctx->ctx_re == NULL) {
re = RB_MIN(rib_tree, &ctx->ctx_rib->rib);
LIST_INSERT_HEAD(&ctx->ctx_rib->ctxts, ctx, entry);
} else
@@ -227,13 +231,14 @@ rib_dump_r(struct rib_context *ctx)
for (i = 0; re != NULL; re = RB_NEXT(rib_tree, unused, re)) {
if (ctx->ctx_af != AF_UNSPEC && ctx->ctx_af != re->prefix->af)
continue;
- if (ctx->ctx_count && i++ >= ctx->ctx_count) {
- /* store next start point */
- ctx->ctx_p = re->prefix;
- pt_ref(ctx->ctx_p);
+ ctx->ctx_upcall(re, ctx->ctx_arg);
+ if (ctx->ctx_count && i++ >= ctx->ctx_count &&
+ (re->flags & F_RIB_ENTRYLOCK) == 0) {
+ /* store and lock last element */
+ ctx->ctx_re = re;
+ re->flags |= F_RIB_ENTRYLOCK;
return;
}
- ctx->ctx_upcall(re, ctx->ctx_arg);
}
LIST_REMOVE(ctx, entry);
@@ -244,52 +249,20 @@ rib_dump_r(struct rib_context *ctx)
struct rib_entry *
rib_restart(struct rib_context *ctx)
{
- struct rib_entry *tmp, *prev = NULL;
- int comp;
-
- /* frist check if the table is still around */
- if (ctx->ctx_rib == NULL)
- goto done;
-
- /* then try to find the element */
- tmp = RB_ROOT(&ctx->ctx_rib->rib);
- while (tmp) {
- prev = tmp;
- comp = pt_prefix_cmp(ctx->ctx_p, tmp->prefix);
- if (comp < 0)
- tmp = RB_LEFT(tmp, rib_e);
- else if (comp > 0)
- tmp = RB_RIGHT(tmp, rib_e);
- else
- goto done;
- }
+ struct rib_entry *re;
- /* no match, empty tree */
- if (prev == NULL)
- goto done;
+ re = ctx->ctx_re;
+ re->flags &= ~F_RIB_ENTRYLOCK;
- /*
- * no perfect match
- * if last element was bigger use that as new start point
- */
- if (comp < 0)
- goto done;
-
- /* backtrack until parent is bigger */
- do {
- prev = RB_PARENT(prev, rib_e);
- if (prev == NULL)
- /* all elements in the tree are smaller */
- goto done;
- comp = pt_prefix_cmp(ctx->ctx_p, prev->prefix);
- } while (comp > 0);
-
-done:
- /* unref the prefix and cleanup if needed. */
- pt_unref(ctx->ctx_p);
- if (pt_empty(ctx->ctx_p))
- pt_remove(ctx->ctx_p);
- return (prev);
+ /* find first non empty element */
+ while (rib_empty(re))
+ re = RB_NEXT(rib_tree, unused, re);
+
+ /* free the previously locked rib element if empty */
+ if (rib_empty(ctx->ctx_re))
+ rib_remove(ctx->ctx_re);
+ ctx->ctx_re = NULL;
+ return (re);
}
/* used to bump correct prefix counters */