summaryrefslogtreecommitdiff
path: root/usr.sbin/unbound/iterator
diff options
context:
space:
mode:
Diffstat (limited to 'usr.sbin/unbound/iterator')
-rw-r--r--usr.sbin/unbound/iterator/iter_fwd.c119
-rw-r--r--usr.sbin/unbound/iterator/iter_fwd.h60
-rw-r--r--usr.sbin/unbound/iterator/iter_hints.c95
-rw-r--r--usr.sbin/unbound/iterator/iter_hints.h54
-rw-r--r--usr.sbin/unbound/iterator/iter_utils.c62
-rw-r--r--usr.sbin/unbound/iterator/iter_utils.h6
-rw-r--r--usr.sbin/unbound/iterator/iterator.c295
7 files changed, 521 insertions, 170 deletions
diff --git a/usr.sbin/unbound/iterator/iter_fwd.c b/usr.sbin/unbound/iterator/iter_fwd.c
index c4b2411297e..b9d42553a8b 100644
--- a/usr.sbin/unbound/iterator/iter_fwd.c
+++ b/usr.sbin/unbound/iterator/iter_fwd.c
@@ -71,6 +71,7 @@ forwards_create(void)
sizeof(struct iter_forwards));
if(!fwd)
return NULL;
+ lock_rw_init(&fwd->lock);
return fwd;
}
@@ -100,6 +101,7 @@ forwards_delete(struct iter_forwards* fwd)
{
if(!fwd)
return;
+ lock_rw_destroy(&fwd->lock);
fwd_del_tree(fwd);
free(fwd);
}
@@ -332,45 +334,64 @@ make_stub_holes(struct iter_forwards* fwd, struct config_file* cfg)
int
forwards_apply_cfg(struct iter_forwards* fwd, struct config_file* cfg)
{
+ if(fwd->tree) {
+ lock_unprotect(&fwd->lock, fwd->tree);
+ }
fwd_del_tree(fwd);
fwd->tree = rbtree_create(fwd_cmp);
if(!fwd->tree)
return 0;
+ lock_protect(&fwd->lock, fwd->tree, sizeof(*fwd->tree));
+ lock_rw_wrlock(&fwd->lock);
/* read forward zones */
- if(!read_forwards(fwd, cfg))
+ if(!read_forwards(fwd, cfg)) {
+ lock_rw_unlock(&fwd->lock);
return 0;
- if(!make_stub_holes(fwd, cfg))
+ }
+ if(!make_stub_holes(fwd, cfg)) {
+ lock_rw_unlock(&fwd->lock);
return 0;
+ }
fwd_init_parents(fwd);
+ lock_rw_unlock(&fwd->lock);
return 1;
}
struct delegpt*
-forwards_find(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
+forwards_find(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass,
+ int nolock)
{
- rbnode_type* res = NULL;
+ struct iter_forward_zone* res;
struct iter_forward_zone key;
+ int has_dp;
key.node.key = &key;
key.dclass = qclass;
key.name = qname;
key.namelabs = dname_count_size_labels(qname, &key.namelen);
- res = rbtree_search(fwd->tree, &key);
- if(res) return ((struct iter_forward_zone*)res)->dp;
- return NULL;
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&fwd->lock); }
+ res = (struct iter_forward_zone*)rbtree_search(fwd->tree, &key);
+ has_dp = res && res->dp;
+ if(!has_dp && !nolock) { lock_rw_unlock(&fwd->lock); }
+ return has_dp?res->dp:NULL;
}
struct delegpt*
-forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
+forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass,
+ int nolock)
{
/* lookup the forward zone in the tree */
rbnode_type* res = NULL;
struct iter_forward_zone *result;
struct iter_forward_zone key;
+ int has_dp;
key.node.key = &key;
key.dclass = qclass;
key.name = qname;
key.namelabs = dname_count_size_labels(qname, &key.namelen);
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&fwd->lock); }
if(rbtree_find_less_equal(fwd->tree, &key, &res)) {
/* exact */
result = (struct iter_forward_zone*)res;
@@ -378,8 +399,10 @@ forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
/* smaller element (or no element) */
int m;
result = (struct iter_forward_zone*)res;
- if(!result || result->dclass != qclass)
+ if(!result || result->dclass != qclass) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return NULL;
+ }
/* count number of labels matched */
(void)dname_lab_cmp(result->name, result->namelabs, key.name,
key.namelabs, &m);
@@ -389,20 +412,22 @@ forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
result = result->parent;
}
}
- if(result)
- return result->dp;
- return NULL;
+ has_dp = result && result->dp;
+ if(!has_dp && !nolock) { lock_rw_unlock(&fwd->lock); }
+ return has_dp?result->dp:NULL;
}
struct delegpt*
-forwards_lookup_root(struct iter_forwards* fwd, uint16_t qclass)
+forwards_lookup_root(struct iter_forwards* fwd, uint16_t qclass, int nolock)
{
uint8_t root = 0;
- return forwards_lookup(fwd, &root, qclass);
+ return forwards_lookup(fwd, &root, qclass, nolock);
}
-int
-forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
+/* Finds next root item in forwards lookup tree.
+ * Caller needs to handle locking of the forwards structure. */
+static int
+next_root_locked(struct iter_forwards* fwd, uint16_t* dclass)
{
struct iter_forward_zone key;
rbnode_type* n;
@@ -419,7 +444,7 @@ forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
}
/* root not first item? search for higher items */
*dclass = p->dclass + 1;
- return forwards_next_root(fwd, dclass);
+ return next_root_locked(fwd, dclass);
}
/* find class n in tree, we may get a direct hit, or if we don't
* this is the last item of the previous class so rbtree_next() takes
@@ -447,10 +472,21 @@ forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
}
/* not a root node, return next higher item */
*dclass = p->dclass+1;
- return forwards_next_root(fwd, dclass);
+ return next_root_locked(fwd, dclass);
}
}
+int
+forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass, int nolock)
+{
+ int ret;
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&fwd->lock); }
+ ret = next_root_locked(fwd, dclass);
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
+ return ret;
+}
+
size_t
forwards_get_mem(struct iter_forwards* fwd)
{
@@ -458,10 +494,12 @@ forwards_get_mem(struct iter_forwards* fwd)
size_t s;
if(!fwd)
return 0;
+ lock_rw_rdlock(&fwd->lock);
s = sizeof(*fwd) + sizeof(*fwd->tree);
RBTREE_FOR(p, struct iter_forward_zone*, fwd->tree) {
s += sizeof(*p) + p->namelen + delegpt_get_mem(p->dp);
}
+ lock_rw_unlock(&fwd->lock);
return s;
}
@@ -477,49 +515,78 @@ fwd_zone_find(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
}
int
-forwards_add_zone(struct iter_forwards* fwd, uint16_t c, struct delegpt* dp)
+forwards_add_zone(struct iter_forwards* fwd, uint16_t c, struct delegpt* dp,
+ int nolock)
{
struct iter_forward_zone *z;
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&fwd->lock); }
if((z=fwd_zone_find(fwd, c, dp->name)) != NULL) {
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
}
- if(!forwards_insert(fwd, c, dp))
+ if(!forwards_insert(fwd, c, dp)) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 0;
+ }
fwd_init_parents(fwd);
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1;
}
void
-forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
+forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm,
+ int nolock)
{
struct iter_forward_zone *z;
- if(!(z=fwd_zone_find(fwd, c, nm)))
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&fwd->lock); }
+ if(!(z=fwd_zone_find(fwd, c, nm))) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* nothing to do */
+ }
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
fwd_init_parents(fwd);
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
}
int
-forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
+forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm,
+ int nolock)
{
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&fwd->lock); }
+ if(fwd_zone_find(fwd, c, nm) != NULL) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
+ return 1; /* already a stub zone there */
+ }
if(!fwd_add_stub_hole(fwd, c, nm)) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 0;
}
fwd_init_parents(fwd);
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1;
}
void
-forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
+forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c,
+ uint8_t* nm, int nolock)
{
struct iter_forward_zone *z;
- if(!(z=fwd_zone_find(fwd, c, nm)))
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&fwd->lock); }
+ if(!(z=fwd_zone_find(fwd, c, nm))) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* nothing to do */
- if(z->dp != NULL)
+ }
+ if(z->dp != NULL) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* not a stub hole */
+ }
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
fwd_init_parents(fwd);
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
}
diff --git a/usr.sbin/unbound/iterator/iter_fwd.h b/usr.sbin/unbound/iterator/iter_fwd.h
index e90b74c16a5..4527d899c79 100644
--- a/usr.sbin/unbound/iterator/iter_fwd.h
+++ b/usr.sbin/unbound/iterator/iter_fwd.h
@@ -43,6 +43,7 @@
#ifndef ITERATOR_ITER_FWD_H
#define ITERATOR_ITER_FWD_H
#include "util/rbtree.h"
+#include "util/locks.h"
struct config_file;
struct delegpt;
@@ -50,6 +51,11 @@ struct delegpt;
* Iterator forward zones structure
*/
struct iter_forwards {
+ /** lock on the forwards tree.
+ * When grabbing both this lock and the anchors.lock, this lock
+ * is grabbed first. When grabbing both this lock and the hints.lock
+ * this lock is grabbed first. */
+ lock_rw_type lock;
/**
* Zones are stored in this tree. Sort order is specially chosen.
* first sorted on qclass. Then on dname in nsec-like order, so that
@@ -106,47 +112,65 @@ int forwards_apply_cfg(struct iter_forwards* fwd, struct config_file* cfg);
/**
* Find forward zone exactly by name
+ * The return value is contents of the forwards structure.
+ * Caller should lock and unlock a readlock on the forwards structure if nolock
+ * is set.
+ * Otherwise caller should unlock the readlock on the forwards structure if a
+ * value was returned.
* @param fwd: forward storage.
* @param qname: The qname of the query.
* @param qclass: The qclass of the query.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point or null.
*/
struct delegpt* forwards_find(struct iter_forwards* fwd, uint8_t* qname,
- uint16_t qclass);
+ uint16_t qclass, int nolock);
/**
* Find forward zone information
* For this qname/qclass find forward zone information, returns delegation
* point with server names and addresses, or NULL if no forwarding is needed.
+ * The return value is contents of the forwards structure.
+ * Caller should lock and unlock a readlock on the forwards structure if nolock
+ * is set.
+ * Otherwise caller should unlock the readlock on the forwards structure if a
+ * value was returned.
*
* @param fwd: forward storage.
* @param qname: The qname of the query.
* @param qclass: The qclass of the query.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point if the query has to be forwarded to that list,
* otherwise null.
*/
-struct delegpt* forwards_lookup(struct iter_forwards* fwd,
- uint8_t* qname, uint16_t qclass);
+struct delegpt* forwards_lookup(struct iter_forwards* fwd,
+ uint8_t* qname, uint16_t qclass, int nolock);
/**
* Same as forwards_lookup, but for the root only
* @param fwd: forward storage.
* @param qclass: The qclass of the query.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point if root forward exists, otherwise null.
*/
-struct delegpt* forwards_lookup_root(struct iter_forwards* fwd,
- uint16_t qclass);
+struct delegpt* forwards_lookup_root(struct iter_forwards* fwd,
+ uint16_t qclass, int nolock);
/**
* Find next root item in forwards lookup tree.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a readlock on the forwards structure.
* @param fwd: the forward storage
* @param qclass: class to look at next, or higher.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return false if none found, or if true stored in qclass.
*/
-int forwards_next_root(struct iter_forwards* fwd, uint16_t* qclass);
+int forwards_next_root(struct iter_forwards* fwd, uint16_t* qclass,
+ int nolock);
/**
* Get memory in use by forward storage
+ * Locks and unlocks the structure.
* @param fwd: forward storage.
* @return bytes in use
*/
@@ -158,42 +182,56 @@ int fwd_cmp(const void* k1, const void* k2);
/**
* Add zone to forward structure. For external use since it recalcs
* the tree parents.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param dp: delegation point with name and target nameservers for new
* forward zone. malloced.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
-int forwards_add_zone(struct iter_forwards* fwd, uint16_t c,
- struct delegpt* dp);
+int forwards_add_zone(struct iter_forwards* fwd, uint16_t c,
+ struct delegpt* dp, int nolock);
/**
* Remove zone from forward structure. For external use since it
* recalcs the tree parents.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
+ * @param nolock: Skip locking, locking is handled by the caller.
*/
-void forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm);
+void forwards_delete_zone(struct iter_forwards* fwd, uint16_t c,
+ uint8_t* nm, int nolock);
/**
* Add stub hole (empty entry in forward table, that makes resolution skip
* a forward-zone because the stub zone should override the forward zone).
* Does not add one if not necessary.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
-int forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm);
+int forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c,
+ uint8_t* nm, int nolock);
/**
* Remove stub hole, if one exists.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
+ * @param nolock: Skip locking, locking is handled by the caller.
*/
void forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c,
- uint8_t* nm);
+ uint8_t* nm, int nolock);
#endif /* ITERATOR_ITER_FWD_H */
diff --git a/usr.sbin/unbound/iterator/iter_hints.c b/usr.sbin/unbound/iterator/iter_hints.c
index 4f86f3676a2..8b168271c7f 100644
--- a/usr.sbin/unbound/iterator/iter_hints.c
+++ b/usr.sbin/unbound/iterator/iter_hints.c
@@ -57,6 +57,8 @@ hints_create(void)
sizeof(struct iter_hints));
if(!hints)
return NULL;
+ lock_rw_init(&hints->lock);
+ lock_protect(&hints->lock, &hints->tree, sizeof(hints->tree));
return hints;
}
@@ -83,6 +85,7 @@ hints_delete(struct iter_hints* hints)
{
if(!hints)
return;
+ lock_rw_destroy(&hints->lock);
hints_del_tree(hints);
free(hints);
}
@@ -438,47 +441,70 @@ read_root_hints_list(struct iter_hints* hints, struct config_file* cfg)
int
hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg)
{
+ int nolock = 1;
+ lock_rw_wrlock(&hints->lock);
hints_del_tree(hints);
name_tree_init(&hints->tree);
-
+
/* read root hints */
- if(!read_root_hints_list(hints, cfg))
+ if(!read_root_hints_list(hints, cfg)) {
+ lock_rw_unlock(&hints->lock);
return 0;
+ }
/* read stub hints */
- if(!read_stubs(hints, cfg))
+ if(!read_stubs(hints, cfg)) {
+ lock_rw_unlock(&hints->lock);
return 0;
+ }
/* use fallback compiletime root hints */
- if(!hints_lookup_root(hints, LDNS_RR_CLASS_IN)) {
+ if(!hints_find_root(hints, LDNS_RR_CLASS_IN, nolock)) {
struct delegpt* dp = compile_time_root_prime(cfg->do_ip4,
cfg->do_ip6);
verbose(VERB_ALGO, "no config, using builtin root hints.");
- if(!dp)
+ if(!dp) {
+ lock_rw_unlock(&hints->lock);
return 0;
- if(!hints_insert(hints, LDNS_RR_CLASS_IN, dp, 0))
+ }
+ if(!hints_insert(hints, LDNS_RR_CLASS_IN, dp, 0)) {
+ lock_rw_unlock(&hints->lock);
return 0;
+ }
}
name_tree_init_parents(&hints->tree);
+ lock_rw_unlock(&hints->lock);
return 1;
}
-struct delegpt*
-hints_lookup_root(struct iter_hints* hints, uint16_t qclass)
+struct delegpt*
+hints_find(struct iter_hints* hints, uint8_t* qname, uint16_t qclass,
+ int nolock)
{
- uint8_t rootlab = 0;
struct iter_hints_stub *stub;
+ size_t len;
+ int has_dp;
+ int labs = dname_count_size_labels(qname, &len);
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&hints->lock); }
stub = (struct iter_hints_stub*)name_tree_find(&hints->tree,
- &rootlab, 1, 1, qclass);
- if(!stub)
- return NULL;
- return stub->dp;
+ qname, len, labs, qclass);
+ has_dp = stub && stub->dp;
+ if(!has_dp && !nolock) { lock_rw_unlock(&hints->lock); }
+ return has_dp?stub->dp:NULL;
+}
+
+struct delegpt*
+hints_find_root(struct iter_hints* hints, uint16_t qclass, int nolock)
+{
+ uint8_t rootlab = 0;
+ return hints_find(hints, &rootlab, qclass, nolock);
}
struct iter_hints_stub*
-hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
- uint16_t qclass, struct delegpt* cache_dp)
+hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
+ uint16_t qclass, struct delegpt* cache_dp, int nolock)
{
size_t len;
int labs;
@@ -486,14 +512,20 @@ hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
/* first lookup the stub */
labs = dname_count_size_labels(qname, &len);
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&hints->lock); }
r = (struct iter_hints_stub*)name_tree_lookup(&hints->tree, qname,
len, labs, qclass);
- if(!r) return NULL;
+ if(!r) {
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
+ return NULL;
+ }
/* If there is no cache (root prime situation) */
if(cache_dp == NULL) {
if(r->dp->namelabs != 1)
return r; /* no cache dp, use any non-root stub */
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
return NULL;
}
@@ -510,12 +542,18 @@ hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
if(dname_strict_subdomain(r->dp->name, r->dp->namelabs,
cache_dp->name, cache_dp->namelabs))
return r; /* need to prime this stub */
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
return NULL;
}
-int hints_next_root(struct iter_hints* hints, uint16_t* qclass)
+int hints_next_root(struct iter_hints* hints, uint16_t* qclass, int nolock)
{
- return name_tree_next_root(&hints->tree, qclass);
+ int ret;
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&hints->lock); }
+ ret = name_tree_next_root(&hints->tree, qclass);
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
+ return ret;
}
size_t
@@ -524,39 +562,52 @@ hints_get_mem(struct iter_hints* hints)
size_t s;
struct iter_hints_stub* p;
if(!hints) return 0;
+ lock_rw_rdlock(&hints->lock);
s = sizeof(*hints);
RBTREE_FOR(p, struct iter_hints_stub*, &hints->tree) {
s += sizeof(*p) + delegpt_get_mem(p->dp);
}
+ lock_rw_unlock(&hints->lock);
return s;
}
int
hints_add_stub(struct iter_hints* hints, uint16_t c, struct delegpt* dp,
- int noprime)
+ int noprime, int nolock)
{
struct iter_hints_stub *z;
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&hints->lock); }
if((z=(struct iter_hints_stub*)name_tree_find(&hints->tree,
dp->name, dp->namelen, dp->namelabs, c)) != NULL) {
(void)rbtree_delete(&hints->tree, &z->node);
hints_stub_free(z);
}
- if(!hints_insert(hints, c, dp, noprime))
+ if(!hints_insert(hints, c, dp, noprime)) {
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
return 0;
+ }
name_tree_init_parents(&hints->tree);
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
return 1;
}
void
-hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm)
+hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm,
+ int nolock)
{
struct iter_hints_stub *z;
size_t len;
int labs = dname_count_size_labels(nm, &len);
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&hints->lock); }
if(!(z=(struct iter_hints_stub*)name_tree_find(&hints->tree,
- nm, len, labs, c)))
+ nm, len, labs, c))) {
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
return; /* nothing to do */
+ }
(void)rbtree_delete(&hints->tree, &z->node);
hints_stub_free(z);
name_tree_init_parents(&hints->tree);
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
}
diff --git a/usr.sbin/unbound/iterator/iter_hints.h b/usr.sbin/unbound/iterator/iter_hints.h
index 06b4b9667d1..26de323c9e9 100644
--- a/usr.sbin/unbound/iterator/iter_hints.h
+++ b/usr.sbin/unbound/iterator/iter_hints.h
@@ -43,6 +43,7 @@
#ifndef ITERATOR_ITER_HINTS_H
#define ITERATOR_ITER_HINTS_H
#include "util/storage/dnstree.h"
+#include "util/locks.h"
struct iter_env;
struct config_file;
struct delegpt;
@@ -51,6 +52,10 @@ struct delegpt;
* Iterator hints structure
*/
struct iter_hints {
+ /** lock on the forwards tree.
+ * When grabbing both this lock and the anchors.lock, this lock
+ * is grabbed first. */
+ lock_rw_type lock;
/**
* Hints are stored in this tree. Sort order is specially chosen.
* first sorted on qclass. Then on dname in nsec-like order, so that
@@ -95,42 +100,70 @@ void hints_delete(struct iter_hints* hints);
int hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg);
/**
- * Find root hints for the given class.
+ * Find hints for the given class.
+ * The return value is contents of the hints structure.
+ * Caller should lock and unlock a readlock on the hints structure if nolock
+ * is set.
+ * Otherwise caller should unlock the readlock on the hints structure if a
+ * value was returned.
* @param hints: hint storage.
+ * @param qname: the qname that generated the delegation point.
* @param qclass: class for which root hints are requested. host order.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return: NULL if no hints, or a ptr to stored hints.
*/
-struct delegpt* hints_lookup_root(struct iter_hints* hints, uint16_t qclass);
+struct delegpt* hints_find(struct iter_hints* hints, uint8_t* qname,
+ uint16_t qclass, int nolock);
+
+/**
+ * Same as hints_lookup, but for the root only.
+ * @param hints: hint storage.
+ * @param qclass: class for which root hints are requested. host order.
+ * @param nolock: Skip locking, locking is handled by the caller.
+ * @return: NULL if no hints, or a ptr to stored hints.
+ */
+struct delegpt* hints_find_root(struct iter_hints* hints,
+ uint16_t qclass, int nolock);
/**
* Find next root hints (to cycle through all root hints).
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a readlock on the hints structure.
* @param hints: hint storage
* @param qclass: class for which root hints are sought.
* 0 means give the first available root hints class.
* x means, give class x or a higher class if any.
* returns the found class in this variable.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return true if a root hint class is found.
* false if not root hint class is found (qclass may have been changed).
*/
-int hints_next_root(struct iter_hints* hints, uint16_t* qclass);
+int hints_next_root(struct iter_hints* hints, uint16_t* qclass, int nolock);
/**
* Given a qname/qclass combination, and the delegation point from the cache
* for this qname/qclass, determine if this combination indicates that a
* stub hint exists and must be primed.
+ * The return value is contents of the hints structure.
+ * Caller should lock and unlock a readlock on the hints structure if nolock
+ * is set.
+ * Otherwise caller should unlock the readlock on the hints structure if a
+ * value was returned.
*
* @param hints: hint storage.
* @param qname: The qname that generated the delegation point.
* @param qclass: The qclass that generated the delegation point.
* @param dp: The cache generated delegation point.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return: A priming delegation point if there is a stub hint that must
* be primed, otherwise null.
*/
-struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
- uint8_t* qname, uint16_t qclass, struct delegpt* dp);
+struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
+ uint8_t* qname, uint16_t qclass, struct delegpt* dp, int nolock);
/**
* Get memory in use by hints
+ * Locks and unlocks the structure.
* @param hints: hint storage.
* @return bytes in use
*/
@@ -139,23 +172,30 @@ size_t hints_get_mem(struct iter_hints* hints);
/**
* Add stub to hints structure. For external use since it recalcs
* the tree parents.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the hints structure.
* @param hints: the hints data structure
* @param c: class of zone
* @param dp: delegation point with name and target nameservers for new
* hints stub. malloced.
* @param noprime: set noprime option to true or false on new hint stub.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
int hints_add_stub(struct iter_hints* hints, uint16_t c, struct delegpt* dp,
- int noprime);
+ int noprime, int nolock);
/**
* Remove stub from hints structure. For external use since it
* recalcs the tree parents.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the hints structure.
* @param hints: the hints data structure
* @param c: class of stub zone
* @param nm: name of stub zone (in uncompressed wireformat).
+ * @param nolock: Skip locking, locking is handled by the caller.
*/
-void hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm);
+void hints_delete_stub(struct iter_hints* hints, uint16_t c,
+ uint8_t* nm, int nolock);
#endif /* ITERATOR_ITER_HINTS_H */
diff --git a/usr.sbin/unbound/iterator/iter_utils.c b/usr.sbin/unbound/iterator/iter_utils.c
index 10a8ec3eb08..f291178d231 100644
--- a/usr.sbin/unbound/iterator/iter_utils.c
+++ b/usr.sbin/unbound/iterator/iter_utils.c
@@ -1284,8 +1284,17 @@ iter_get_next_root(struct iter_hints* hints, struct iter_forwards* fwd,
uint16_t* c)
{
uint16_t c1 = *c, c2 = *c;
- int r1 = hints_next_root(hints, &c1);
- int r2 = forwards_next_root(fwd, &c2);
+ int r1, r2;
+ int nolock = 1;
+
+ /* prelock both forwards and hints for atomic read. */
+ lock_rw_rdlock(&fwd->lock);
+ lock_rw_rdlock(&hints->lock);
+ r1 = hints_next_root(hints, &c1, nolock);
+ r2 = forwards_next_root(fwd, &c2, nolock);
+ lock_rw_unlock(&fwd->lock);
+ lock_rw_unlock(&hints->lock);
+
if(!r1 && !r2) /* got none, end of list */
return 0;
else if(!r1) /* got one, return that */
@@ -1450,15 +1459,21 @@ int iter_dp_cangodown(struct query_info* qinfo, struct delegpt* dp)
int
iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
- uint8_t** retdpname, size_t* retdpnamelen)
+ uint8_t** retdpname, size_t* retdpnamelen, uint8_t* dpname_storage,
+ size_t dpname_storage_len)
{
struct iter_hints_stub *stub;
struct delegpt *dp;
+ int nolock = 1;
/* Check for stub. */
+ /* Lock both forwards and hints for atomic read. */
+ lock_rw_rdlock(&qstate->env->fwds->lock);
+ lock_rw_rdlock(&qstate->env->hints->lock);
stub = hints_lookup_stub(qstate->env->hints, qinf->qname,
- qinf->qclass, NULL);
- dp = forwards_lookup(qstate->env->fwds, qinf->qname, qinf->qclass);
+ qinf->qclass, NULL, nolock);
+ dp = forwards_lookup(qstate->env->fwds, qinf->qname, qinf->qclass,
+ nolock);
/* see if forward or stub is more pertinent */
if(stub && stub->dp && dp) {
@@ -1472,7 +1487,9 @@ iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
/* check stub */
if (stub != NULL && stub->dp != NULL) {
- if(stub->dp->no_cache) {
+ int stub_no_cache = stub->dp->no_cache;
+ lock_rw_unlock(&qstate->env->fwds->lock);
+ if(stub_no_cache) {
char qname[255+1];
char dpname[255+1];
dname_str(qinf->qname, qname);
@@ -1480,15 +1497,27 @@ iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
verbose(VERB_ALGO, "stub for %s %s has no_cache", qname, dpname);
}
if(retdpname) {
- *retdpname = stub->dp->name;
+ if(stub->dp->namelen > dpname_storage_len) {
+ verbose(VERB_ALGO, "no cache stub dpname too long");
+ lock_rw_unlock(&qstate->env->hints->lock);
+ *retdpname = NULL;
+ *retdpnamelen = 0;
+ return stub_no_cache;
+ }
+ memmove(dpname_storage, stub->dp->name,
+ stub->dp->namelen);
+ *retdpname = dpname_storage;
*retdpnamelen = stub->dp->namelen;
}
- return (stub->dp->no_cache);
+ lock_rw_unlock(&qstate->env->hints->lock);
+ return stub_no_cache;
}
/* Check for forward. */
if (dp) {
- if(dp->no_cache) {
+ int dp_no_cache = dp->no_cache;
+ lock_rw_unlock(&qstate->env->hints->lock);
+ if(dp_no_cache) {
char qname[255+1];
char dpname[255+1];
dname_str(qinf->qname, qname);
@@ -1496,11 +1525,22 @@ iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
verbose(VERB_ALGO, "forward for %s %s has no_cache", qname, dpname);
}
if(retdpname) {
- *retdpname = dp->name;
+ if(dp->namelen > dpname_storage_len) {
+ verbose(VERB_ALGO, "no cache dpname too long");
+ lock_rw_unlock(&qstate->env->fwds->lock);
+ *retdpname = NULL;
+ *retdpnamelen = 0;
+ return dp_no_cache;
+ }
+ memmove(dpname_storage, dp->name, dp->namelen);
+ *retdpname = dpname_storage;
*retdpnamelen = dp->namelen;
}
- return (dp->no_cache);
+ lock_rw_unlock(&qstate->env->fwds->lock);
+ return dp_no_cache;
}
+ lock_rw_unlock(&qstate->env->fwds->lock);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(retdpname) {
*retdpname = NULL;
*retdpnamelen = 0;
diff --git a/usr.sbin/unbound/iterator/iter_utils.h b/usr.sbin/unbound/iterator/iter_utils.h
index fa860fa682f..4024629e686 100644
--- a/usr.sbin/unbound/iterator/iter_utils.h
+++ b/usr.sbin/unbound/iterator/iter_utils.h
@@ -407,10 +407,14 @@ int iter_dp_cangodown(struct query_info* qinfo, struct delegpt* dp);
* Used for NXDOMAIN checks, above that it is an nxdomain from a
* different server and zone. You can pass NULL to not get it.
* @param retdpnamelen: returns the length of the dpname.
+ * @param dpname_storage: this is where the dpname buf is stored, if any.
+ * So that caller can manage the buffer.
+ * @param dpname_storage_len: size of dpname_storage buffer.
* @return true if no_cache is set in stub or fwd.
*/
int iter_stub_fwd_no_cache(struct module_qstate *qstate,
- struct query_info *qinf, uint8_t** retdpname, size_t* retdpnamelen);
+ struct query_info *qinf, uint8_t** retdpname, size_t* retdpnamelen,
+ uint8_t* dpname_storage, size_t dpname_storage_len);
/**
* Set support for IP4 and IP6 depending on outgoing interfaces
diff --git a/usr.sbin/unbound/iterator/iterator.c b/usr.sbin/unbound/iterator/iterator.c
index e9fea544aa6..5732a414857 100644
--- a/usr.sbin/unbound/iterator/iterator.c
+++ b/usr.sbin/unbound/iterator/iterator.c
@@ -52,6 +52,7 @@
#include "iterator/iter_priv.h"
#include "validator/val_neg.h"
#include "services/cache/dns.h"
+#include "services/cache/rrset.h"
#include "services/cache/infra.h"
#include "services/authzone.h"
#include "util/module.h"
@@ -678,30 +679,40 @@ errinf_reply(struct module_qstate* qstate, struct iter_qstate* iq)
/** see if last resort is possible - does config allow queries to parent */
static int
-can_have_last_resort(struct module_env* env, uint8_t* nm, size_t nmlen,
- uint16_t qclass, struct delegpt** retdp)
+can_have_last_resort(struct module_env* env, uint8_t* nm, size_t ATTR_UNUSED(nmlen),
+ uint16_t qclass, int* have_dp, struct delegpt** retdp,
+ struct regional* region)
{
- struct delegpt* fwddp;
- struct iter_hints_stub* stub;
- int labs = dname_count_labels(nm);
+ struct delegpt* dp = NULL;
+ int nolock = 0;
/* do not process a last resort (the parent side) if a stub
* or forward is configured, because we do not want to go 'above'
* the configured servers */
- if(!dname_is_root(nm) && (stub = (struct iter_hints_stub*)
- name_tree_find(&env->hints->tree, nm, nmlen, labs, qclass)) &&
+ if(!dname_is_root(nm) &&
+ (dp = hints_find(env->hints, nm, qclass, nolock)) &&
/* has_parent side is turned off for stub_first, where we
* are allowed to go to the parent */
- stub->dp->has_parent_side_NS) {
- if(retdp) *retdp = stub->dp;
+ dp->has_parent_side_NS) {
+ if(retdp) *retdp = delegpt_copy(dp, region);
+ lock_rw_unlock(&env->hints->lock);
+ if(have_dp) *have_dp = 1;
return 0;
}
- if((fwddp = forwards_find(env->fwds, nm, qclass)) &&
+ if(dp) {
+ lock_rw_unlock(&env->hints->lock);
+ dp = NULL;
+ }
+ if((dp = forwards_find(env->fwds, nm, qclass, nolock)) &&
/* has_parent_side is turned off for forward_first, where
* we are allowed to go to the parent */
- fwddp->has_parent_side_NS) {
- if(retdp) *retdp = fwddp;
+ dp->has_parent_side_NS) {
+ if(retdp) *retdp = delegpt_copy(dp, region);
+ lock_rw_unlock(&env->fwds->lock);
+ if(have_dp) *have_dp = 1;
return 0;
}
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(dp) { lock_rw_unlock(&env->fwds->lock); }
return 1;
}
@@ -877,10 +888,11 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
{
struct delegpt* dp;
struct module_qstate* subq;
+ int nolock = 0;
verbose(VERB_DETAIL, "priming . %s NS",
sldns_lookup_by_id(sldns_rr_classes, (int)qclass)?
sldns_lookup_by_id(sldns_rr_classes, (int)qclass)->name:"??");
- dp = hints_lookup_root(qstate->env->hints, qclass);
+ dp = hints_find_root(qstate->env->hints, qclass, nolock);
if(!dp) {
verbose(VERB_ALGO, "Cannot prime due to lack of hints");
return 0;
@@ -890,6 +902,7 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
if(!generate_sub_request((uint8_t*)"\000", 1, LDNS_RR_TYPE_NS,
qclass, qstate, id, iq, QUERYTARGETS_STATE, PRIME_RESP_STATE,
&subq, 0, 0)) {
+ lock_rw_unlock(&qstate->env->hints->lock);
verbose(VERB_ALGO, "could not prime root");
return 0;
}
@@ -900,6 +913,7 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
* copy dp, it is now part of the root prime query.
* dp was part of in the fixed hints structure. */
subiq->dp = delegpt_copy(dp, subq->region);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(!subiq->dp) {
log_err("out of memory priming root, copydp");
fptr_ok(fptr_whitelist_modenv_kill_sub(
@@ -911,6 +925,8 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
subiq->num_target_queries = 0;
subiq->dnssec_expected = iter_indicates_dnssec(
qstate->env, subiq->dp, NULL, subq->qinfo.qclass);
+ } else {
+ lock_rw_unlock(&qstate->env->hints->lock);
}
/* this module stops, our submodule starts, and does the query. */
@@ -941,18 +957,21 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
struct iter_hints_stub* stub;
struct delegpt* stub_dp;
struct module_qstate* subq;
+ int nolock = 0;
if(!qname) return 0;
- stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp);
+ stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp,
+ nolock);
/* The stub (if there is one) does not need priming. */
- if(!stub)
- return 0;
+ if(!stub) return 0;
stub_dp = stub->dp;
/* if we have an auth_zone dp, and stub is equal, don't prime stub
* yet, unless we want to fallback and avoid the auth_zone */
if(!iq->auth_zone_avoid && iq->dp && iq->dp->auth_dp &&
- query_dname_compare(iq->dp->name, stub_dp->name) == 0)
+ query_dname_compare(iq->dp->name, stub_dp->name) == 0) {
+ lock_rw_unlock(&qstate->env->hints->lock);
return 0;
+ }
/* is it a noprime stub (always use) */
if(stub->noprime) {
@@ -961,13 +980,14 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
/* copy the dp out of the fixed hints structure, so that
* it can be changed when servicing this query */
iq->dp = delegpt_copy(stub_dp, qstate->region);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(!iq->dp) {
log_err("out of memory priming stub");
errinf(qstate, "malloc failure, priming stub");
(void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
return 1; /* return 1 to make module stop, with error */
}
- log_nametypeclass(VERB_DETAIL, "use stub", stub_dp->name,
+ log_nametypeclass(VERB_DETAIL, "use stub", iq->dp->name,
LDNS_RR_TYPE_NS, qclass);
return r;
}
@@ -981,6 +1001,7 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
if(!generate_sub_request(stub_dp->name, stub_dp->namelen,
LDNS_RR_TYPE_NS, qclass, qstate, id, iq,
QUERYTARGETS_STATE, PRIME_RESP_STATE, &subq, 0, 0)) {
+ lock_rw_unlock(&qstate->env->hints->lock);
verbose(VERB_ALGO, "could not prime stub");
errinf(qstate, "could not generate lookup for stub prime");
(void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
@@ -993,6 +1014,7 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
/* Set the initial delegation point to the hint. */
/* make copy to avoid use of stub dp by different qs/threads */
subiq->dp = delegpt_copy(stub_dp, subq->region);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(!subiq->dp) {
log_err("out of memory priming stub, copydp");
fptr_ok(fptr_whitelist_modenv_kill_sub(
@@ -1009,6 +1031,8 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
subiq->wait_priming_stub = 1;
subiq->dnssec_expected = iter_indicates_dnssec(
qstate->env, subiq->dp, NULL, subq->qinfo.qclass);
+ } else {
+ lock_rw_unlock(&qstate->env->hints->lock);
}
/* this module stops, our submodule starts, and does the query. */
@@ -1181,7 +1205,7 @@ generate_ns_check(struct module_qstate* qstate, struct iter_qstate* iq, int id)
if(iq->depth == ie->max_dependency_depth)
return;
if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen,
- iq->qchase.qclass, NULL))
+ iq->qchase.qclass, NULL, NULL, NULL))
return;
/* is this query the same as the nscheck? */
if(qstate->qinfo.qtype == LDNS_RR_TYPE_NS &&
@@ -1294,6 +1318,7 @@ forward_request(struct module_qstate* qstate, struct iter_qstate* iq)
struct delegpt* dp;
uint8_t* delname = iq->qchase.qname;
size_t delnamelen = iq->qchase.qname_len;
+ int nolock = 0;
if(iq->refetch_glue && iq->dp) {
delname = iq->dp->name;
delnamelen = iq->dp->namelen;
@@ -1302,12 +1327,13 @@ forward_request(struct module_qstate* qstate, struct iter_qstate* iq)
if( (iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue)
&& !dname_is_root(iq->qchase.qname))
dname_remove_label(&delname, &delnamelen);
- dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass);
- if(!dp)
- return 0;
+ dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass,
+ nolock);
+ if(!dp) return 0;
/* send recursion desired to forward addr */
iq->chase_flags |= BIT_RD;
iq->dp = delegpt_copy(dp, qstate->region);
+ lock_rw_unlock(&qstate->env->fwds->lock);
/* iq->dp checked by caller */
verbose(VERB_ALGO, "forwarding request");
return 1;
@@ -1335,6 +1361,7 @@ static int
processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
struct iter_env* ie, int id)
{
+ uint8_t dpname_storage[LDNS_MAX_DOMAINLEN+1];
uint8_t* delname, *dpname=NULL;
size_t delnamelen, dpnamelen=0;
struct dns_msg* msg = NULL;
@@ -1381,7 +1408,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
if (iq->refetch_glue &&
iq->dp &&
!can_have_last_resort(qstate->env, iq->dp->name,
- iq->dp->namelen, iq->qchase.qclass, NULL)) {
+ iq->dp->namelen, iq->qchase.qclass, NULL, NULL, NULL)) {
iq->refetch_glue = 0;
}
@@ -1389,8 +1416,61 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
/* This either results in a query restart (CNAME cache response), a
* terminating response (ANSWER), or a cache miss (null). */
-
- if (iter_stub_fwd_no_cache(qstate, &iq->qchase, &dpname, &dpnamelen)) {
+
+ /* Check RPZ for override */
+ if(qstate->env->auth_zones) {
+ /* apply rpz qname triggers, like after cname */
+ struct dns_msg* forged_response =
+ rpz_callback_from_iterator_cname(qstate, iq);
+ if(forged_response) {
+ uint8_t* sname = 0;
+ size_t slen = 0;
+ int count = 0;
+ while(forged_response && reply_find_rrset_section_an(
+ forged_response->rep, iq->qchase.qname,
+ iq->qchase.qname_len, LDNS_RR_TYPE_CNAME,
+ iq->qchase.qclass) &&
+ iq->qchase.qtype != LDNS_RR_TYPE_CNAME &&
+ count++ < ie->max_query_restarts) {
+ /* another cname to follow */
+ if(!handle_cname_response(qstate, iq, forged_response,
+ &sname, &slen)) {
+ errinf(qstate, "malloc failure, CNAME info");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ iq->qchase.qname = sname;
+ iq->qchase.qname_len = slen;
+ forged_response =
+ rpz_callback_from_iterator_cname(qstate, iq);
+ }
+ if(forged_response != NULL) {
+ qstate->ext_state[id] = module_finished;
+ qstate->return_rcode = LDNS_RCODE_NOERROR;
+ qstate->return_msg = forged_response;
+ iq->response = forged_response;
+ next_state(iq, FINISHED_STATE);
+ if(!iter_prepend(iq, qstate->return_msg, qstate->region)) {
+ log_err("rpz: after cached cname, prepend rrsets: out of memory");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ qstate->return_msg->qinfo = qstate->qinfo;
+ return 0;
+ }
+ /* Follow the CNAME response */
+ iq->dp = NULL;
+ iq->refetch_glue = 0;
+ iq->query_restart_count++;
+ iq->sent_count = 0;
+ iq->dp_target_count = 0;
+ sock_list_insert(&qstate->reply_origin, NULL, 0, qstate->region);
+ if(qstate->env->cfg->qname_minimisation)
+ iq->minimisation_state = INIT_MINIMISE_STATE;
+ return next_state(iq, INIT_REQUEST_STATE);
+ }
+ }
+
+ if (iter_stub_fwd_no_cache(qstate, &iq->qchase, &dpname, &dpnamelen,
+ dpname_storage, sizeof(dpname_storage))) {
/* Asked to not query cache. */
verbose(VERB_ALGO, "no-cache set, going to the network");
qstate->no_cache_lookup = 1;
@@ -1449,39 +1529,6 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
iq->qchase.qname = sname;
iq->qchase.qname_len = slen;
- if(qstate->env->auth_zones) {
- /* apply rpz qname triggers after cname */
- struct dns_msg* forged_response =
- rpz_callback_from_iterator_cname(qstate, iq);
- while(forged_response && reply_find_rrset_section_an(
- forged_response->rep, iq->qchase.qname,
- iq->qchase.qname_len, LDNS_RR_TYPE_CNAME,
- iq->qchase.qclass)) {
- /* another cname to follow */
- if(!handle_cname_response(qstate, iq, forged_response,
- &sname, &slen)) {
- errinf(qstate, "malloc failure, CNAME info");
- return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
- }
- iq->qchase.qname = sname;
- iq->qchase.qname_len = slen;
- forged_response =
- rpz_callback_from_iterator_cname(qstate, iq);
- }
- if(forged_response != NULL) {
- qstate->ext_state[id] = module_finished;
- qstate->return_rcode = LDNS_RCODE_NOERROR;
- qstate->return_msg = forged_response;
- iq->response = forged_response;
- next_state(iq, FINISHED_STATE);
- if(!iter_prepend(iq, qstate->return_msg, qstate->region)) {
- log_err("rpz: after cached cname, prepend rrsets: out of memory");
- return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
- }
- qstate->return_msg->qinfo = qstate->qinfo;
- return 0;
- }
- }
/* This *is* a query restart, even if it is a cheap
* one. */
iq->dp = NULL;
@@ -1494,7 +1541,6 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
iq->minimisation_state = INIT_MINIMISE_STATE;
return next_state(iq, INIT_REQUEST_STATE);
}
-
/* if from cache, NULL, else insert 'cache IP' len=0 */
if(qstate->reply_origin)
sock_list_insert(&qstate->reply_origin, NULL, 0, qstate->region);
@@ -1555,7 +1601,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
if(iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue ||
(iq->qchase.qtype == LDNS_RR_TYPE_NS && qstate->prefetch_leeway
- && can_have_last_resort(qstate->env, delname, delnamelen, iq->qchase.qclass, NULL))) {
+ && can_have_last_resort(qstate->env, delname, delnamelen, iq->qchase.qclass, NULL, NULL, NULL))) {
/* remove first label from delname, root goes to hints,
* but only to fetch glue, not for qtype=DS. */
/* also when prefetching an NS record, fetch it again from
@@ -1584,6 +1630,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
* root priming situation. */
if(iq->dp == NULL) {
int r;
+ int nolock = 0;
/* if under auth zone, no prime needed */
if(!auth_zone_delegpt(qstate, iq, delname, delnamelen))
return error_response(qstate, id,
@@ -1597,12 +1644,13 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
break; /* got noprime-stub-zone, continue */
else if(r)
return 0; /* stub prime request made */
- if(forwards_lookup_root(qstate->env->fwds,
- iq->qchase.qclass)) {
+ if(forwards_lookup_root(qstate->env->fwds,
+ iq->qchase.qclass, nolock)) {
+ lock_rw_unlock(&qstate->env->fwds->lock);
/* forward zone root, no root prime needed */
/* fill in some dp - safety belt */
- iq->dp = hints_lookup_root(qstate->env->hints,
- iq->qchase.qclass);
+ iq->dp = hints_find_root(qstate->env->hints,
+ iq->qchase.qclass, nolock);
if(!iq->dp) {
log_err("internal error: no hints dp");
errinf(qstate, "no hints for this class");
@@ -1610,6 +1658,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
LDNS_RCODE_SERVFAIL);
}
iq->dp = delegpt_copy(iq->dp, qstate->region);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(!iq->dp) {
log_err("out of memory in safety belt");
errinf(qstate, "malloc failure, in safety belt");
@@ -1649,15 +1698,13 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
if(iter_dp_is_useless(&qstate->qinfo, qstate->query_flags,
iq->dp, ie->supports_ipv4, ie->supports_ipv6,
ie->use_nat64)) {
- struct delegpt* retdp = NULL;
- if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen, iq->qchase.qclass, &retdp)) {
- if(retdp) {
+ int have_dp = 0;
+ if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen, iq->qchase.qclass, &have_dp, &iq->dp, qstate->region)) {
+ if(have_dp) {
verbose(VERB_QUERY, "cache has stub "
"or fwd but no addresses, "
"fallback to config");
- iq->dp = delegpt_copy(retdp,
- qstate->region);
- if(!iq->dp) {
+ if(have_dp && !iq->dp) {
log_err("out of memory in "
"stub/fwd fallback");
errinf(qstate, "malloc failure, for fallback to config");
@@ -1677,10 +1724,11 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
if(dname_is_root(iq->dp->name)) {
/* use safety belt */
+ int nolock = 0;
verbose(VERB_QUERY, "Cache has root NS but "
"no addresses. Fallback to the safety belt.");
- iq->dp = hints_lookup_root(qstate->env->hints,
- iq->qchase.qclass);
+ iq->dp = hints_find_root(qstate->env->hints,
+ iq->qchase.qclass, nolock);
/* note deleg_msg is from previous lookup,
* but RD is on, so it is not used */
if(!iq->dp) {
@@ -1689,6 +1737,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
LDNS_RCODE_REFUSED);
}
iq->dp = delegpt_copy(iq->dp, qstate->region);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(!iq->dp) {
log_err("out of memory in safety belt");
errinf(qstate, "malloc failure, in safety belt, for root");
@@ -1744,6 +1793,7 @@ processInitRequest2(struct module_qstate* qstate, struct iter_qstate* iq,
delnamelen = iq->qchase.qname_len;
if(iq->refetch_glue) {
struct iter_hints_stub* stub;
+ int nolock = 0;
if(!iq->dp) {
log_err("internal or malloc fail: no dp for refetch");
errinf(qstate, "malloc failure, no delegation info");
@@ -1753,12 +1803,14 @@ processInitRequest2(struct module_qstate* qstate, struct iter_qstate* iq,
* this is above stub without stub-first. */
stub = hints_lookup_stub(
qstate->env->hints, iq->qchase.qname, iq->qchase.qclass,
- iq->dp);
+ iq->dp, nolock);
if(!stub || !stub->dp->has_parent_side_NS ||
dname_subdomain_c(iq->dp->name, stub->dp->name)) {
delname = iq->dp->name;
delnamelen = iq->dp->namelen;
}
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(stub) { lock_rw_unlock(&qstate->env->hints->lock); }
}
if(iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue) {
if(!dname_is_root(delname))
@@ -2062,7 +2114,7 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
log_assert(iq->dp);
if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen,
- iq->qchase.qclass, NULL)) {
+ iq->qchase.qclass, NULL, NULL, NULL)) {
/* fail -- no more targets, no more hope of targets, no hope
* of a response. */
errinf(qstate, "all the configured stub or forward servers failed,");
@@ -2072,21 +2124,24 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
return error_response_cache(qstate, id, LDNS_RCODE_SERVFAIL);
}
if(!iq->dp->has_parent_side_NS && dname_is_root(iq->dp->name)) {
- struct delegpt* p = hints_lookup_root(qstate->env->hints,
- iq->qchase.qclass);
- if(p) {
+ struct delegpt* dp;
+ int nolock = 0;
+ dp = hints_find_root(qstate->env->hints,
+ iq->qchase.qclass, nolock);
+ if(dp) {
struct delegpt_addr* a;
iq->chase_flags &= ~BIT_RD; /* go to authorities */
- for(ns = p->nslist; ns; ns=ns->next) {
+ for(ns = dp->nslist; ns; ns=ns->next) {
(void)delegpt_add_ns(iq->dp, qstate->region,
ns->name, ns->lame, ns->tls_auth_name,
ns->port);
}
- for(a = p->target_list; a; a=a->next_target) {
+ for(a = dp->target_list; a; a=a->next_target) {
(void)delegpt_add_addr(iq->dp, qstate->region,
&a->addr, a->addrlen, a->bogus,
a->lame, a->tls_auth_name, -1, NULL);
}
+ lock_rw_unlock(&qstate->env->hints->lock);
}
iq->dp->has_parent_side_NS = 1;
} else if(!iq->dp->has_parent_side_NS) {
@@ -2164,7 +2219,7 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
if( ((ie->supports_ipv6 && !ns->done_pside6) ||
((ie->supports_ipv4 || ie->use_nat64) && !ns->done_pside4)) &&
!can_have_last_resort(qstate->env, ns->name, ns->namelen,
- iq->qchase.qclass, NULL)) {
+ iq->qchase.qclass, NULL, NULL, NULL)) {
log_nametypeclass(VERB_ALGO, "cannot pside lookup ns "
"because it is also a stub/forward,",
ns->name, LDNS_RR_TYPE_NS, iq->qchase.qclass);
@@ -2746,8 +2801,51 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
delegpt_add_unused_targets(iq->dp);
if(qstate->env->auth_zones) {
- /* apply rpz triggers at query time */
+ uint8_t* sname = NULL;
+ size_t snamelen = 0;
+ /* apply rpz triggers at query time; nameserver IP and dname */
+ struct dns_msg* forged_response_after_cname;
struct dns_msg* forged_response = rpz_callback_from_iterator_module(qstate, iq);
+ int count = 0;
+ while(forged_response && reply_find_rrset_section_an(
+ forged_response->rep, iq->qchase.qname,
+ iq->qchase.qname_len, LDNS_RR_TYPE_CNAME,
+ iq->qchase.qclass) &&
+ iq->qchase.qtype != LDNS_RR_TYPE_CNAME &&
+ count++ < ie->max_query_restarts) {
+ /* another cname to follow */
+ if(!handle_cname_response(qstate, iq, forged_response,
+ &sname, &snamelen)) {
+ errinf(qstate, "malloc failure, CNAME info");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ iq->qchase.qname = sname;
+ iq->qchase.qname_len = snamelen;
+ forged_response_after_cname =
+ rpz_callback_from_iterator_cname(qstate, iq);
+ if(forged_response_after_cname) {
+ forged_response = forged_response_after_cname;
+ } else {
+ /* Follow the CNAME with a query restart */
+ iq->deleg_msg = NULL;
+ iq->dp = NULL;
+ iq->dsns_point = NULL;
+ iq->auth_zone_response = 0;
+ iq->refetch_glue = 0;
+ iq->query_restart_count++;
+ iq->sent_count = 0;
+ iq->dp_target_count = 0;
+ if(qstate->env->cfg->qname_minimisation)
+ iq->minimisation_state = INIT_MINIMISE_STATE;
+ outbound_list_clear(&iq->outlist);
+ iq->num_current_queries = 0;
+ fptr_ok(fptr_whitelist_modenv_detach_subs(
+ qstate->env->detach_subs));
+ (*qstate->env->detach_subs)(qstate);
+ iq->num_target_queries = 0;
+ return next_state(iq, INIT_REQUEST_STATE);
+ }
+ }
if(forged_response != NULL) {
qstate->ext_state[id] = module_finished;
qstate->return_rcode = LDNS_RCODE_NOERROR;
@@ -3082,7 +3180,8 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
/* DNAME to a subdomain loop; do not recurse */
type = RESPONSE_TYPE_ANSWER;
}
- } else if(type == RESPONSE_TYPE_CNAME &&
+ }
+ if(type == RESPONSE_TYPE_CNAME &&
iq->qchase.qtype == LDNS_RR_TYPE_CNAME &&
iq->minimisation_state == MINIMISE_STATE &&
query_dname_compare(iq->qchase.qname, iq->qinfo_out.qname) == 0) {
@@ -3193,6 +3292,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
}
return final_state(iq);
} else if(type == RESPONSE_TYPE_REFERRAL) {
+ struct delegpt* old_dp = NULL;
/* REFERRAL type responses get a reset of the
* delegation point, and back to the QUERYTARGETS_STATE. */
verbose(VERB_DETAIL, "query response was REFERRAL");
@@ -3244,6 +3344,8 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
/* Reset the event state, setting the current delegation
* point to the referral. */
iq->deleg_msg = iq->response;
+ /* Keep current delegation point for label comparison */
+ old_dp = iq->dp;
iq->dp = delegpt_from_message(iq->response, qstate->region);
if (qstate->env->cfg->qname_minimisation)
iq->minimisation_state = INIT_MINIMISE_STATE;
@@ -3251,6 +3353,20 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
errinf(qstate, "malloc failure, for delegation point");
return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
}
+ if(old_dp->namelabs + 1 < iq->dp->namelabs) {
+ /* We got a grandchild delegation (more than one label
+ * difference) than expected. Check for in-between
+ * delegations in the cache and remove them.
+ * They could prove problematic when they expire
+ * and rrset_expired_above() encounters them during
+ * delegation cache lookups. */
+ uint8_t* qname = iq->dp->name;
+ size_t qnamelen = iq->dp->namelen;
+ rrset_cache_remove_above(qstate->env->rrset_cache,
+ &qname, &qnamelen, LDNS_RR_TYPE_NS,
+ iq->qchase.qclass, *qstate->env->now,
+ old_dp->name, old_dp->namelen);
+ }
if(!cache_fill_missing(qstate->env, iq->qchase.qclass,
qstate->region, iq->dp)) {
errinf(qstate, "malloc failure, copy extra info into delegation point");
@@ -3341,10 +3457,13 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
/* apply rpz qname triggers after cname */
struct dns_msg* forged_response =
rpz_callback_from_iterator_cname(qstate, iq);
+ int count = 0;
while(forged_response && reply_find_rrset_section_an(
forged_response->rep, iq->qchase.qname,
iq->qchase.qname_len, LDNS_RR_TYPE_CNAME,
- iq->qchase.qclass)) {
+ iq->qchase.qclass) &&
+ iq->qchase.qtype != LDNS_RR_TYPE_CNAME &&
+ count++ < ie->max_query_restarts) {
/* another cname to follow */
if(!handle_cname_response(qstate, iq, forged_response,
&sname, &snamelen)) {
@@ -3926,17 +4045,9 @@ processFinished(struct module_qstate* qstate, struct iter_qstate* iq,
!qstate->env->cfg->val_log_squelch) {
char* err_str = errinf_to_str_misc(qstate);
if(err_str) {
- size_t err_str_len = strlen(err_str);
verbose(VERB_ALGO, "iterator EDE: %s", err_str);
- /* allocate space and store the error
- * string */
- iq->response->rep->reason_bogus_str = regional_alloc(
- qstate->region,
- sizeof(char) * (err_str_len+1));
- memcpy(iq->response->rep->reason_bogus_str,
- err_str, err_str_len+1);
+ iq->response->rep->reason_bogus_str = err_str;
}
- free(err_str);
}
/* we have finished processing this query */