summaryrefslogtreecommitdiff
path: root/usr.sbin/unbound/libunbound
diff options
context:
space:
mode:
Diffstat (limited to 'usr.sbin/unbound/libunbound')
-rw-r--r--usr.sbin/unbound/libunbound/context.c400
-rw-r--r--usr.sbin/unbound/libunbound/context.h345
-rw-r--r--usr.sbin/unbound/libunbound/libunbound.c1124
-rw-r--r--usr.sbin/unbound/libunbound/libworker.c901
-rw-r--r--usr.sbin/unbound/libunbound/libworker.h170
-rw-r--r--usr.sbin/unbound/libunbound/ubsyms.def29
-rw-r--r--usr.sbin/unbound/libunbound/unbound.h556
7 files changed, 3525 insertions, 0 deletions
diff --git a/usr.sbin/unbound/libunbound/context.c b/usr.sbin/unbound/libunbound/context.c
new file mode 100644
index 00000000000..f28307971b4
--- /dev/null
+++ b/usr.sbin/unbound/libunbound/context.c
@@ -0,0 +1,400 @@
+/*
+ * libunbound/context.c - validating context for unbound internal use
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains the validator context structure.
+ */
+#include "config.h"
+#include "libunbound/context.h"
+#include "util/module.h"
+#include "util/config_file.h"
+#include "util/net_help.h"
+#include "services/modstack.h"
+#include "services/localzone.h"
+#include "services/cache/rrset.h"
+#include "services/cache/infra.h"
+#include "util/data/msgreply.h"
+#include "util/storage/slabhash.h"
+
+int
+context_finalize(struct ub_ctx* ctx)
+{
+ struct config_file* cfg = ctx->env->cfg;
+ verbosity = cfg->verbosity;
+ if(ctx->logfile_override)
+ log_file(ctx->log_out);
+ else log_init(cfg->logfile, cfg->use_syslog, NULL);
+ config_apply(cfg);
+ if(!modstack_setup(&ctx->mods, cfg->module_conf, ctx->env))
+ return UB_INITFAIL;
+ ctx->local_zones = local_zones_create();
+ if(!ctx->local_zones)
+ return UB_NOMEM;
+ if(!local_zones_apply_cfg(ctx->local_zones, cfg))
+ return UB_INITFAIL;
+ if(!ctx->env->msg_cache ||
+ cfg->msg_cache_size != slabhash_get_size(ctx->env->msg_cache) ||
+ cfg->msg_cache_slabs != ctx->env->msg_cache->size) {
+ slabhash_delete(ctx->env->msg_cache);
+ ctx->env->msg_cache = slabhash_create(cfg->msg_cache_slabs,
+ HASH_DEFAULT_STARTARRAY, cfg->msg_cache_size,
+ msgreply_sizefunc, query_info_compare,
+ query_entry_delete, reply_info_delete, NULL);
+ if(!ctx->env->msg_cache)
+ return UB_NOMEM;
+ }
+ ctx->env->rrset_cache = rrset_cache_adjust(ctx->env->rrset_cache,
+ ctx->env->cfg, ctx->env->alloc);
+ if(!ctx->env->rrset_cache)
+ return UB_NOMEM;
+ ctx->env->infra_cache = infra_adjust(ctx->env->infra_cache, cfg);
+ if(!ctx->env->infra_cache)
+ return UB_NOMEM;
+ ctx->finalized = 1;
+ return UB_NOERROR;
+}
+
+int context_query_cmp(const void* a, const void* b)
+{
+ if( *(int*)a < *(int*)b )
+ return -1;
+ if( *(int*)a > *(int*)b )
+ return 1;
+ return 0;
+}
+
+void
+context_query_delete(struct ctx_query* q)
+{
+ if(!q) return;
+ ub_resolve_free(q->res);
+ free(q->msg);
+ free(q);
+}
+
+/** How many times to try to find an unused query-id-number for async */
+#define NUM_ID_TRIES 100000
+/** find next useful id number of 0 on error */
+static int
+find_id(struct ub_ctx* ctx, int* id)
+{
+ size_t tries = 0;
+ ctx->next_querynum++;
+ while(rbtree_search(&ctx->queries, &ctx->next_querynum)) {
+ ctx->next_querynum++; /* numerical wraparound is fine */
+ if(tries++ > NUM_ID_TRIES)
+ return 0;
+ }
+ *id = ctx->next_querynum;
+ return 1;
+}
+
+struct ctx_query*
+context_new(struct ub_ctx* ctx, char* name, int rrtype, int rrclass,
+ ub_callback_t cb, void* cbarg)
+{
+ struct ctx_query* q = (struct ctx_query*)calloc(1, sizeof(*q));
+ if(!q) return NULL;
+ lock_basic_lock(&ctx->cfglock);
+ if(!find_id(ctx, &q->querynum)) {
+ lock_basic_unlock(&ctx->cfglock);
+ free(q);
+ return NULL;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ q->node.key = &q->querynum;
+ q->async = (cb != NULL);
+ q->cb = cb;
+ q->cb_arg = cbarg;
+ q->res = (struct ub_result*)calloc(1, sizeof(*q->res));
+ if(!q->res) {
+ free(q);
+ return NULL;
+ }
+ q->res->qname = strdup(name);
+ if(!q->res->qname) {
+ free(q->res);
+ free(q);
+ return NULL;
+ }
+ q->res->qtype = rrtype;
+ q->res->qclass = rrclass;
+
+ /* add to query list */
+ lock_basic_lock(&ctx->cfglock);
+ if(q->async)
+ ctx->num_async ++;
+ (void)rbtree_insert(&ctx->queries, &q->node);
+ lock_basic_unlock(&ctx->cfglock);
+ return q;
+}
+
+struct alloc_cache*
+context_obtain_alloc(struct ub_ctx* ctx, int locking)
+{
+ struct alloc_cache* a;
+ int tnum = 0;
+ if(locking) {
+ lock_basic_lock(&ctx->cfglock);
+ }
+ a = ctx->alloc_list;
+ if(a)
+ ctx->alloc_list = a->super; /* snip off list */
+ else tnum = ctx->thr_next_num++;
+ if(locking) {
+ lock_basic_unlock(&ctx->cfglock);
+ }
+ if(a) {
+ a->super = &ctx->superalloc;
+ return a;
+ }
+ a = (struct alloc_cache*)calloc(1, sizeof(*a));
+ if(!a)
+ return NULL;
+ alloc_init(a, &ctx->superalloc, tnum);
+ return a;
+}
+
+void
+context_release_alloc(struct ub_ctx* ctx, struct alloc_cache* alloc,
+ int locking)
+{
+ if(!ctx || !alloc)
+ return;
+ if(locking) {
+ lock_basic_lock(&ctx->cfglock);
+ }
+ alloc->super = ctx->alloc_list;
+ ctx->alloc_list = alloc;
+ if(locking) {
+ lock_basic_unlock(&ctx->cfglock);
+ }
+}
+
+uint8_t*
+context_serialize_new_query(struct ctx_query* q, uint32_t* len)
+{
+ /* format for new query is
+ * o uint32 cmd
+ * o uint32 id
+ * o uint32 type
+ * o uint32 class
+ * o rest queryname (string)
+ */
+ uint8_t* p;
+ size_t slen = strlen(q->res->qname) + 1/*end of string*/;
+ *len = sizeof(uint32_t)*4 + slen;
+ p = (uint8_t*)malloc(*len);
+ if(!p) return NULL;
+ ldns_write_uint32(p, UB_LIBCMD_NEWQUERY);
+ ldns_write_uint32(p+sizeof(uint32_t), (uint32_t)q->querynum);
+ ldns_write_uint32(p+2*sizeof(uint32_t), (uint32_t)q->res->qtype);
+ ldns_write_uint32(p+3*sizeof(uint32_t), (uint32_t)q->res->qclass);
+ memmove(p+4*sizeof(uint32_t), q->res->qname, slen);
+ return p;
+}
+
+struct ctx_query*
+context_deserialize_new_query(struct ub_ctx* ctx, uint8_t* p, uint32_t len)
+{
+ struct ctx_query* q = (struct ctx_query*)calloc(1, sizeof(*q));
+ if(!q) return NULL;
+ if(len < 4*sizeof(uint32_t)+1) {
+ free(q);
+ return NULL;
+ }
+ log_assert( ldns_read_uint32(p) == UB_LIBCMD_NEWQUERY);
+ q->querynum = (int)ldns_read_uint32(p+sizeof(uint32_t));
+ q->node.key = &q->querynum;
+ q->async = 1;
+ q->res = (struct ub_result*)calloc(1, sizeof(*q->res));
+ if(!q->res) {
+ free(q);
+ return NULL;
+ }
+ q->res->qtype = (int)ldns_read_uint32(p+2*sizeof(uint32_t));
+ q->res->qclass = (int)ldns_read_uint32(p+3*sizeof(uint32_t));
+ q->res->qname = strdup((char*)(p+4*sizeof(uint32_t)));
+ if(!q->res->qname) {
+ free(q->res);
+ free(q);
+ return NULL;
+ }
+
+ /** add to query list */
+ ctx->num_async++;
+ (void)rbtree_insert(&ctx->queries, &q->node);
+ return q;
+}
+
+struct ctx_query*
+context_lookup_new_query(struct ub_ctx* ctx, uint8_t* p, uint32_t len)
+{
+ struct ctx_query* q;
+ int querynum;
+ if(len < 4*sizeof(uint32_t)+1) {
+ return NULL;
+ }
+ log_assert( ldns_read_uint32(p) == UB_LIBCMD_NEWQUERY);
+ querynum = (int)ldns_read_uint32(p+sizeof(uint32_t));
+ q = (struct ctx_query*)rbtree_search(&ctx->queries, &querynum);
+ if(!q) {
+ return NULL;
+ }
+ log_assert(q->async);
+ return q;
+}
+
+uint8_t*
+context_serialize_answer(struct ctx_query* q, int err, ldns_buffer* pkt,
+ uint32_t* len)
+{
+ /* answer format
+ * o uint32 cmd
+ * o uint32 id
+ * o uint32 error_code
+ * o uint32 msg_security
+ * o uint32 length of why_bogus string (+1 for eos); 0 absent.
+ * o why_bogus_string
+ * o the remainder is the answer msg from resolver lookup.
+ * remainder can be length 0.
+ */
+ size_t pkt_len = pkt?ldns_buffer_remaining(pkt):0;
+ size_t wlen = (pkt&&q->res->why_bogus)?strlen(q->res->why_bogus)+1:0;
+ uint8_t* p;
+ *len = sizeof(uint32_t)*5 + pkt_len + wlen;
+ p = (uint8_t*)malloc(*len);
+ if(!p) return NULL;
+ ldns_write_uint32(p, UB_LIBCMD_ANSWER);
+ ldns_write_uint32(p+sizeof(uint32_t), (uint32_t)q->querynum);
+ ldns_write_uint32(p+2*sizeof(uint32_t), (uint32_t)err);
+ ldns_write_uint32(p+3*sizeof(uint32_t), (uint32_t)q->msg_security);
+ ldns_write_uint32(p+4*sizeof(uint32_t), (uint32_t)wlen);
+ if(wlen > 0)
+ memmove(p+5*sizeof(uint32_t), q->res->why_bogus, wlen);
+ if(pkt_len > 0)
+ memmove(p+5*sizeof(uint32_t)+wlen,
+ ldns_buffer_begin(pkt), pkt_len);
+ return p;
+}
+
+struct ctx_query*
+context_deserialize_answer(struct ub_ctx* ctx,
+ uint8_t* p, uint32_t len, int* err)
+{
+ struct ctx_query* q = NULL ;
+ int id;
+ size_t wlen;
+ if(len < 5*sizeof(uint32_t)) return NULL;
+ log_assert( ldns_read_uint32(p) == UB_LIBCMD_ANSWER);
+ id = (int)ldns_read_uint32(p+sizeof(uint32_t));
+ q = (struct ctx_query*)rbtree_search(&ctx->queries, &id);
+ if(!q) return NULL;
+ *err = (int)ldns_read_uint32(p+2*sizeof(uint32_t));
+ q->msg_security = ldns_read_uint32(p+3*sizeof(uint32_t));
+ wlen = (size_t)ldns_read_uint32(p+4*sizeof(uint32_t));
+ if(len > 5*sizeof(uint32_t) && wlen > 0) {
+ if(len >= 5*sizeof(uint32_t)+wlen)
+ q->res->why_bogus = (char*)memdup(
+ p+5*sizeof(uint32_t), wlen);
+ if(!q->res->why_bogus) {
+ /* pass malloc failure to the user callback */
+ q->msg_len = 0;
+ *err = UB_NOMEM;
+ return q;
+ }
+ q->res->why_bogus[wlen-1] = 0; /* zero terminated for sure */
+ }
+ if(len > 5*sizeof(uint32_t)+wlen) {
+ q->msg_len = len - 5*sizeof(uint32_t) - wlen;
+ q->msg = (uint8_t*)memdup(p+5*sizeof(uint32_t)+wlen,
+ q->msg_len);
+ if(!q->msg) {
+ /* pass malloc failure to the user callback */
+ q->msg_len = 0;
+ *err = UB_NOMEM;
+ return q;
+ }
+ }
+ return q;
+}
+
+uint8_t*
+context_serialize_cancel(struct ctx_query* q, uint32_t* len)
+{
+ /* format of cancel:
+ * o uint32 cmd
+ * o uint32 async-id */
+ uint8_t* p = (uint8_t*)malloc(2*sizeof(uint32_t));
+ if(!p) return NULL;
+ *len = 2*sizeof(uint32_t);
+ ldns_write_uint32(p, UB_LIBCMD_CANCEL);
+ ldns_write_uint32(p+sizeof(uint32_t), (uint32_t)q->querynum);
+ return p;
+}
+
+struct ctx_query* context_deserialize_cancel(struct ub_ctx* ctx,
+ uint8_t* p, uint32_t len)
+{
+ struct ctx_query* q;
+ int id;
+ if(len != 2*sizeof(uint32_t)) return NULL;
+ log_assert( ldns_read_uint32(p) == UB_LIBCMD_CANCEL);
+ id = (int)ldns_read_uint32(p+sizeof(uint32_t));
+ q = (struct ctx_query*)rbtree_search(&ctx->queries, &id);
+ return q;
+}
+
+uint8_t*
+context_serialize_quit(uint32_t* len)
+{
+ uint8_t* p = (uint8_t*)malloc(sizeof(uint32_t));
+ if(!p)
+ return NULL;
+ *len = sizeof(uint32_t);
+ ldns_write_uint32(p, UB_LIBCMD_QUIT);
+ return p;
+}
+
+enum ub_ctx_cmd context_serial_getcmd(uint8_t* p, uint32_t len)
+{
+ uint32_t v;
+ if((size_t)len < sizeof(v))
+ return UB_LIBCMD_QUIT;
+ v = ldns_read_uint32(p);
+ return v;
+}
diff --git a/usr.sbin/unbound/libunbound/context.h b/usr.sbin/unbound/libunbound/context.h
new file mode 100644
index 00000000000..8898f3ebfdf
--- /dev/null
+++ b/usr.sbin/unbound/libunbound/context.h
@@ -0,0 +1,345 @@
+/*
+ * libunbound/context.h - validating context for unbound internal use
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains the validator context structure.
+ */
+#ifndef LIBUNBOUND_CONTEXT_H
+#define LIBUNBOUND_CONTEXT_H
+#include "util/locks.h"
+#include "util/alloc.h"
+#include "util/rbtree.h"
+#include "services/modstack.h"
+#include "libunbound/unbound.h"
+#include "util/data/packed_rrset.h"
+struct libworker;
+struct tube;
+
+/**
+ * The context structure
+ *
+ * Contains two pipes for async service
+ * qq : write queries to the async service pid/tid.
+ * rr : read results from the async service pid/tid.
+ */
+struct ub_ctx {
+ /* --- pipes --- */
+ /** mutex on query write pipe */
+ lock_basic_t qqpipe_lock;
+ /** the query write pipe */
+ struct tube* qq_pipe;
+ /** mutex on result read pipe */
+ lock_basic_t rrpipe_lock;
+ /** the result read pipe */
+ struct tube* rr_pipe;
+
+ /* --- shared data --- */
+ /** mutex for access to env.cfg, finalized and dothread */
+ lock_basic_t cfglock;
+ /**
+ * The context has been finalized
+ * This is after config when the first resolve is done.
+ * The modules are inited (module-init()) and shared caches created.
+ */
+ int finalized;
+
+ /** is bg worker created yet ? */
+ int created_bg;
+ /** pid of bg worker process */
+ pid_t bg_pid;
+ /** tid of bg worker thread */
+ ub_thread_t bg_tid;
+
+ /** do threading (instead of forking) for async resolution */
+ int dothread;
+ /** next thread number for new threads */
+ int thr_next_num;
+ /** if logfile is overriden */
+ int logfile_override;
+ /** what logfile to use instead */
+ FILE* log_out;
+ /**
+ * List of alloc-cache-id points per threadnum for notinuse threads.
+ * Simply the entire struct alloc_cache with the 'super' member used
+ * to link a simply linked list. Reset super member to the superalloc
+ * before use.
+ */
+ struct alloc_cache* alloc_list;
+
+ /** shared caches, and so on */
+ struct alloc_cache superalloc;
+ /** module env master value */
+ struct module_env* env;
+ /** module stack */
+ struct module_stack mods;
+ /** local authority zones */
+ struct local_zones* local_zones;
+ /** random state used to seed new random state structures */
+ struct ub_randstate* seed_rnd;
+
+ /** next query number (to try) to use */
+ int next_querynum;
+ /** number of async queries outstanding */
+ size_t num_async;
+ /**
+ * Tree of outstanding queries. Indexed by querynum
+ * Used when results come in for async to lookup.
+ * Used when cancel is done for lookup (and delete).
+ * Used to see if querynum is free for use.
+ * Content of type ctx_query.
+ */
+ rbtree_t queries;
+};
+
+/**
+ * The queries outstanding for the libunbound resolver.
+ * These are outstanding for async resolution.
+ * But also, outstanding for sync resolution by one of the threads that
+ * has joined the threadpool.
+ */
+struct ctx_query {
+ /** node in rbtree, must be first entry, key is ptr to the querynum */
+ struct rbnode_t node;
+ /** query id number, key for node */
+ int querynum;
+ /** was this an async query? */
+ int async;
+ /** was this query cancelled (for bg worker) */
+ int cancelled;
+
+ /** for async query, the callback function */
+ ub_callback_t cb;
+ /** for async query, the callback user arg */
+ void* cb_arg;
+
+ /** answer message, result from resolver lookup. */
+ uint8_t* msg;
+ /** resulting message length. */
+ size_t msg_len;
+ /** validation status on security */
+ enum sec_status msg_security;
+ /** store libworker that is handling this query */
+ struct libworker* w;
+
+ /** result structure, also contains original query, type, class.
+ * malloced ptr ready to hand to the client. */
+ struct ub_result* res;
+};
+
+/**
+ * The error constants
+ */
+enum ub_ctx_err {
+ /** no error */
+ UB_NOERROR = 0,
+ /** socket operation. Set to -1, so that if an error from _fd() is
+ * passed (-1) it gives a socket error. */
+ UB_SOCKET = -1,
+ /** alloc failure */
+ UB_NOMEM = -2,
+ /** syntax error */
+ UB_SYNTAX = -3,
+ /** DNS service failed */
+ UB_SERVFAIL = -4,
+ /** fork() failed */
+ UB_FORKFAIL = -5,
+ /** cfg change after finalize() */
+ UB_AFTERFINAL = -6,
+ /** initialization failed (bad settings) */
+ UB_INITFAIL = -7,
+ /** error in pipe communication with async bg worker */
+ UB_PIPE = -8,
+ /** error reading from file (resolv.conf) */
+ UB_READFILE = -9,
+ /** error async_id does not exist or result already been delivered */
+ UB_NOID = -10
+};
+
+/**
+ * Command codes for libunbound pipe.
+ *
+ * Serialization looks like this:
+ * o length (of remainder) uint32.
+ * o uint32 command code.
+ * o per command format.
+ */
+enum ub_ctx_cmd {
+ /** QUIT */
+ UB_LIBCMD_QUIT = 0,
+ /** New query, sent to bg worker */
+ UB_LIBCMD_NEWQUERY,
+ /** Cancel query, sent to bg worker */
+ UB_LIBCMD_CANCEL,
+ /** Query result, originates from bg worker */
+ UB_LIBCMD_ANSWER
+};
+
+/**
+ * finalize a context.
+ * @param ctx: context to finalize. creates shared data.
+ * @return 0 if OK, or errcode.
+ */
+int context_finalize(struct ub_ctx* ctx);
+
+/** compare two ctx_query elements */
+int context_query_cmp(const void* a, const void* b);
+
+/**
+ * delete context query
+ * @param q: query to delete, including message packet and prealloc result
+ */
+void context_query_delete(struct ctx_query* q);
+
+/**
+ * Create new query in context, add to querynum list.
+ * @param ctx: context
+ * @param name: query name
+ * @param rrtype: type
+ * @param rrclass: class
+ * @param cb: callback for async, or NULL for sync.
+ * @param cbarg: user arg for async queries.
+ * @return new ctx_query or NULL for malloc failure.
+ */
+struct ctx_query* context_new(struct ub_ctx* ctx, char* name, int rrtype,
+ int rrclass, ub_callback_t cb, void* cbarg);
+
+/**
+ * Get a new alloc. Creates a new one or uses a cached one.
+ * @param ctx: context
+ * @param locking: if true, cfglock is locked while getting alloc.
+ * @return an alloc, or NULL on mem error.
+ */
+struct alloc_cache* context_obtain_alloc(struct ub_ctx* ctx, int locking);
+
+/**
+ * Release an alloc. Puts it into the cache.
+ * @param ctx: context
+ * @param locking: if true, cfglock is locked while releasing alloc.
+ * @param alloc: alloc to relinquish.
+ */
+void context_release_alloc(struct ub_ctx* ctx, struct alloc_cache* alloc,
+ int locking);
+
+/**
+ * Serialize a context query that questions data.
+ * This serializes the query name, type, ...
+ * As well as command code 'new_query'.
+ * @param q: context query
+ * @param len: the length of the allocation is returned.
+ * @return: an alloc, or NULL on mem error.
+ */
+uint8_t* context_serialize_new_query(struct ctx_query* q, uint32_t* len);
+
+/**
+ * Serialize a context_query result to hand back to user.
+ * This serializes the query name, type, ..., and result.
+ * As well as command code 'answer'.
+ * @param q: context query
+ * @param err: error code to pass to client.
+ * @param pkt: the packet to add, can be NULL.
+ * @param len: the length of the allocation is returned.
+ * @return: an alloc, or NULL on mem error.
+ */
+uint8_t* context_serialize_answer(struct ctx_query* q, int err,
+ ldns_buffer* pkt, uint32_t* len);
+
+/**
+ * Serialize a query cancellation. Serializes query async id
+ * as well as command code 'cancel'
+ * @param q: context query
+ * @param len: the length of the allocation is returned.
+ * @return: an alloc, or NULL on mem error.
+ */
+uint8_t* context_serialize_cancel(struct ctx_query* q, uint32_t* len);
+
+/**
+ * Serialize a 'quit' command.
+ * @param len: the length of the allocation is returned.
+ * @return: an alloc, or NULL on mem error.
+ */
+uint8_t* context_serialize_quit(uint32_t* len);
+
+/**
+ * Obtain command code from serialized buffer
+ * @param p: buffer serialized.
+ * @param len: length of buffer.
+ * @return command code or QUIT on error.
+ */
+enum ub_ctx_cmd context_serial_getcmd(uint8_t* p, uint32_t len);
+
+/**
+ * Lookup query from new_query buffer.
+ * @param ctx: context
+ * @param p: buffer serialized.
+ * @param len: length of buffer.
+ * @return looked up ctx_query or NULL for malloc failure.
+ */
+struct ctx_query* context_lookup_new_query(struct ub_ctx* ctx,
+ uint8_t* p, uint32_t len);
+
+/**
+ * Deserialize a new_query buffer.
+ * @param ctx: context
+ * @param p: buffer serialized.
+ * @param len: length of buffer.
+ * @return new ctx_query or NULL for malloc failure.
+ */
+struct ctx_query* context_deserialize_new_query(struct ub_ctx* ctx,
+ uint8_t* p, uint32_t len);
+
+/**
+ * Deserialize an answer buffer.
+ * @param ctx: context
+ * @param p: buffer serialized.
+ * @param len: length of buffer.
+ * @param err: error code to be returned to client is passed.
+ * @return ctx_query with answer added or NULL for malloc failure.
+ */
+struct ctx_query* context_deserialize_answer(struct ub_ctx* ctx,
+ uint8_t* p, uint32_t len, int* err);
+
+/**
+ * Deserialize a cancel buffer.
+ * @param ctx: context
+ * @param p: buffer serialized.
+ * @param len: length of buffer.
+ * @return ctx_query to cancel or NULL for failure.
+ */
+struct ctx_query* context_deserialize_cancel(struct ub_ctx* ctx,
+ uint8_t* p, uint32_t len);
+
+#endif /* LIBUNBOUND_CONTEXT_H */
diff --git a/usr.sbin/unbound/libunbound/libunbound.c b/usr.sbin/unbound/libunbound/libunbound.c
new file mode 100644
index 00000000000..10d00ddc521
--- /dev/null
+++ b/usr.sbin/unbound/libunbound/libunbound.c
@@ -0,0 +1,1124 @@
+/*
+ * unbound.c - unbound validating resolver public API implementation
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to resolve DNS queries and
+ * validate the answers. Synchonously and asynchronously.
+ *
+ */
+
+/* include the public api first, it should be able to stand alone */
+#include "libunbound/unbound.h"
+#include "config.h"
+#include <ctype.h>
+#include "libunbound/context.h"
+#include "libunbound/libworker.h"
+#include "util/locks.h"
+#include "util/config_file.h"
+#include "util/alloc.h"
+#include "util/module.h"
+#include "util/regional.h"
+#include "util/log.h"
+#include "util/random.h"
+#include "util/net_help.h"
+#include "util/tube.h"
+#include "services/modstack.h"
+#include "services/localzone.h"
+#include "services/cache/infra.h"
+#include "services/cache/rrset.h"
+
+#if defined(UB_ON_WINDOWS) && defined (HAVE_WINDOWS_H)
+#include <windows.h>
+#include <iphlpapi.h>
+#endif /* UB_ON_WINDOWS */
+
+struct ub_ctx*
+ub_ctx_create(void)
+{
+ struct ub_ctx* ctx;
+ unsigned int seed;
+#ifdef USE_WINSOCK
+ int r;
+ WSADATA wsa_data;
+#endif
+
+ log_init(NULL, 0, NULL); /* logs to stderr */
+ log_ident_set("libunbound");
+#ifdef USE_WINSOCK
+ if((r = WSAStartup(MAKEWORD(2,2), &wsa_data)) != 0) {
+ log_err("could not init winsock. WSAStartup: %s",
+ wsa_strerror(r));
+ return NULL;
+ }
+#endif
+ verbosity = 0; /* errors only */
+ checklock_start();
+ ctx = (struct ub_ctx*)calloc(1, sizeof(*ctx));
+ if(!ctx) {
+ errno = ENOMEM;
+ return NULL;
+ }
+ alloc_init(&ctx->superalloc, NULL, 0);
+ seed = (unsigned int)time(NULL) ^ (unsigned int)getpid();
+ if(!(ctx->seed_rnd = ub_initstate(seed, NULL))) {
+ seed = 0;
+ ub_randfree(ctx->seed_rnd);
+ free(ctx);
+ errno = ENOMEM;
+ return NULL;
+ }
+ seed = 0;
+ if((ctx->qq_pipe = tube_create()) == NULL) {
+ int e = errno;
+ ub_randfree(ctx->seed_rnd);
+ free(ctx);
+ errno = e;
+ return NULL;
+ }
+ if((ctx->rr_pipe = tube_create()) == NULL) {
+ int e = errno;
+ tube_delete(ctx->qq_pipe);
+ ub_randfree(ctx->seed_rnd);
+ free(ctx);
+ errno = e;
+ return NULL;
+ }
+ lock_basic_init(&ctx->qqpipe_lock);
+ lock_basic_init(&ctx->rrpipe_lock);
+ lock_basic_init(&ctx->cfglock);
+ ctx->env = (struct module_env*)calloc(1, sizeof(*ctx->env));
+ if(!ctx->env) {
+ tube_delete(ctx->qq_pipe);
+ tube_delete(ctx->rr_pipe);
+ ub_randfree(ctx->seed_rnd);
+ free(ctx);
+ errno = ENOMEM;
+ return NULL;
+ }
+ ctx->env->cfg = config_create_forlib();
+ if(!ctx->env->cfg) {
+ tube_delete(ctx->qq_pipe);
+ tube_delete(ctx->rr_pipe);
+ free(ctx->env);
+ ub_randfree(ctx->seed_rnd);
+ free(ctx);
+ errno = ENOMEM;
+ return NULL;
+ }
+ ctx->env->alloc = &ctx->superalloc;
+ ctx->env->worker = NULL;
+ ctx->env->need_to_validate = 0;
+ modstack_init(&ctx->mods);
+ rbtree_init(&ctx->queries, &context_query_cmp);
+ return ctx;
+}
+
+/** delete q */
+static void
+delq(rbnode_t* n, void* ATTR_UNUSED(arg))
+{
+ struct ctx_query* q = (struct ctx_query*)n;
+ context_query_delete(q);
+}
+
+void
+ub_ctx_delete(struct ub_ctx* ctx)
+{
+ struct alloc_cache* a, *na;
+ if(!ctx) return;
+ /* stop the bg thread */
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->created_bg) {
+ uint8_t* msg;
+ uint32_t len;
+ uint32_t cmd = UB_LIBCMD_QUIT;
+ lock_basic_unlock(&ctx->cfglock);
+ lock_basic_lock(&ctx->qqpipe_lock);
+ (void)tube_write_msg(ctx->qq_pipe, (uint8_t*)&cmd,
+ (uint32_t)sizeof(cmd), 0);
+ lock_basic_unlock(&ctx->qqpipe_lock);
+ lock_basic_lock(&ctx->rrpipe_lock);
+ while(tube_read_msg(ctx->rr_pipe, &msg, &len, 0)) {
+ /* discard all results except a quit confirm */
+ if(context_serial_getcmd(msg, len) == UB_LIBCMD_QUIT) {
+ free(msg);
+ break;
+ }
+ free(msg);
+ }
+ lock_basic_unlock(&ctx->rrpipe_lock);
+
+ /* if bg worker is a thread, wait for it to exit, so that all
+ * resources are really gone. */
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->dothread) {
+ lock_basic_unlock(&ctx->cfglock);
+ ub_thread_join(ctx->bg_tid);
+ } else {
+ lock_basic_unlock(&ctx->cfglock);
+ }
+ }
+ else {
+ lock_basic_unlock(&ctx->cfglock);
+ }
+
+
+ modstack_desetup(&ctx->mods, ctx->env);
+ a = ctx->alloc_list;
+ while(a) {
+ na = a->super;
+ a->super = &ctx->superalloc;
+ alloc_clear(a);
+ free(a);
+ a = na;
+ }
+ local_zones_delete(ctx->local_zones);
+ lock_basic_destroy(&ctx->qqpipe_lock);
+ lock_basic_destroy(&ctx->rrpipe_lock);
+ lock_basic_destroy(&ctx->cfglock);
+ tube_delete(ctx->qq_pipe);
+ tube_delete(ctx->rr_pipe);
+ if(ctx->env) {
+ slabhash_delete(ctx->env->msg_cache);
+ rrset_cache_delete(ctx->env->rrset_cache);
+ infra_delete(ctx->env->infra_cache);
+ config_delete(ctx->env->cfg);
+ free(ctx->env);
+ }
+ ub_randfree(ctx->seed_rnd);
+ alloc_clear(&ctx->superalloc);
+ traverse_postorder(&ctx->queries, delq, NULL);
+ free(ctx);
+#ifdef USE_WINSOCK
+ WSACleanup();
+#endif
+}
+
+int
+ub_ctx_set_option(struct ub_ctx* ctx, char* opt, char* val)
+{
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->finalized) {
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_AFTERFINAL;
+ }
+ if(!config_set_option(ctx->env->cfg, opt, val)) {
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_SYNTAX;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+}
+
+int
+ub_ctx_get_option(struct ub_ctx* ctx, char* opt, char** str)
+{
+ int r;
+ lock_basic_lock(&ctx->cfglock);
+ r = config_get_option_collate(ctx->env->cfg, opt, str);
+ lock_basic_unlock(&ctx->cfglock);
+ if(r == 0) r = UB_NOERROR;
+ else if(r == 1) r = UB_SYNTAX;
+ else if(r == 2) r = UB_NOMEM;
+ return r;
+}
+
+int
+ub_ctx_config(struct ub_ctx* ctx, char* fname)
+{
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->finalized) {
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_AFTERFINAL;
+ }
+ if(!config_read(ctx->env->cfg, fname, NULL)) {
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_SYNTAX;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+}
+
+int
+ub_ctx_add_ta(struct ub_ctx* ctx, char* ta)
+{
+ char* dup = strdup(ta);
+ if(!dup) return UB_NOMEM;
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->finalized) {
+ lock_basic_unlock(&ctx->cfglock);
+ free(dup);
+ return UB_AFTERFINAL;
+ }
+ if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_list, dup)) {
+ lock_basic_unlock(&ctx->cfglock);
+ free(dup);
+ return UB_NOMEM;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+}
+
+int
+ub_ctx_add_ta_file(struct ub_ctx* ctx, char* fname)
+{
+ char* dup = strdup(fname);
+ if(!dup) return UB_NOMEM;
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->finalized) {
+ lock_basic_unlock(&ctx->cfglock);
+ free(dup);
+ return UB_AFTERFINAL;
+ }
+ if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_file_list, dup)) {
+ lock_basic_unlock(&ctx->cfglock);
+ free(dup);
+ return UB_NOMEM;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+}
+
+int
+ub_ctx_trustedkeys(struct ub_ctx* ctx, char* fname)
+{
+ char* dup = strdup(fname);
+ if(!dup) return UB_NOMEM;
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->finalized) {
+ lock_basic_unlock(&ctx->cfglock);
+ free(dup);
+ return UB_AFTERFINAL;
+ }
+ if(!cfg_strlist_insert(&ctx->env->cfg->trusted_keys_file_list, dup)) {
+ lock_basic_unlock(&ctx->cfglock);
+ free(dup);
+ return UB_NOMEM;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+}
+
+int
+ub_ctx_debuglevel(struct ub_ctx* ctx, int d)
+{
+ lock_basic_lock(&ctx->cfglock);
+ verbosity = d;
+ ctx->env->cfg->verbosity = d;
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+}
+
+int ub_ctx_debugout(struct ub_ctx* ctx, void* out)
+{
+ lock_basic_lock(&ctx->cfglock);
+ log_file((FILE*)out);
+ ctx->logfile_override = 1;
+ ctx->log_out = out;
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+}
+
+int
+ub_ctx_async(struct ub_ctx* ctx, int dothread)
+{
+#ifdef THREADS_DISABLED
+ if(dothread) /* cannot do threading */
+ return UB_NOERROR;
+#endif
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->finalized) {
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_AFTERFINAL;
+ }
+ ctx->dothread = dothread;
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+}
+
+int
+ub_poll(struct ub_ctx* ctx)
+{
+ /* no need to hold lock while testing for readability. */
+ return tube_poll(ctx->rr_pipe);
+}
+
+int
+ub_fd(struct ub_ctx* ctx)
+{
+ return tube_read_fd(ctx->rr_pipe);
+}
+
+/** process answer from bg worker */
+static int
+process_answer_detail(struct ub_ctx* ctx, uint8_t* msg, uint32_t len,
+ ub_callback_t* cb, void** cbarg, int* err,
+ struct ub_result** res)
+{
+ struct ctx_query* q;
+ if(context_serial_getcmd(msg, len) != UB_LIBCMD_ANSWER) {
+ log_err("error: bad data from bg worker %d",
+ (int)context_serial_getcmd(msg, len));
+ return 0;
+ }
+
+ lock_basic_lock(&ctx->cfglock);
+ q = context_deserialize_answer(ctx, msg, len, err);
+ if(!q) {
+ lock_basic_unlock(&ctx->cfglock);
+ /* probably simply the lookup that failed, i.e.
+ * response returned before cancel was sent out, so noerror */
+ return 1;
+ }
+ log_assert(q->async);
+
+ /* grab cb while locked */
+ if(q->cancelled) {
+ *cb = NULL;
+ *cbarg = NULL;
+ } else {
+ *cb = q->cb;
+ *cbarg = q->cb_arg;
+ }
+ if(*err) {
+ *res = NULL;
+ ub_resolve_free(q->res);
+ } else {
+ /* parse the message, extract rcode, fill result */
+ ldns_buffer* buf = ldns_buffer_new(q->msg_len);
+ struct regional* region = regional_create();
+ *res = q->res;
+ (*res)->rcode = LDNS_RCODE_SERVFAIL;
+ if(region && buf) {
+ ldns_buffer_clear(buf);
+ ldns_buffer_write(buf, q->msg, q->msg_len);
+ ldns_buffer_flip(buf);
+ libworker_enter_result(*res, buf, region,
+ q->msg_security);
+ }
+ (*res)->answer_packet = q->msg;
+ (*res)->answer_len = (int)q->msg_len;
+ q->msg = NULL;
+ ldns_buffer_free(buf);
+ regional_destroy(region);
+ }
+ q->res = NULL;
+ /* delete the q from list */
+ (void)rbtree_delete(&ctx->queries, q->node.key);
+ ctx->num_async--;
+ context_query_delete(q);
+ lock_basic_unlock(&ctx->cfglock);
+
+ if(*cb) return 2;
+ ub_resolve_free(*res);
+ return 1;
+}
+
+/** process answer from bg worker */
+static int
+process_answer(struct ub_ctx* ctx, uint8_t* msg, uint32_t len)
+{
+ int err;
+ ub_callback_t cb;
+ void* cbarg;
+ struct ub_result* res;
+ int r;
+
+ r = process_answer_detail(ctx, msg, len, &cb, &cbarg, &err, &res);
+
+ /* no locks held while calling callback, so that library is
+ * re-entrant. */
+ if(r == 2)
+ (*cb)(cbarg, err, res);
+
+ return r;
+}
+
+int
+ub_process(struct ub_ctx* ctx)
+{
+ int r;
+ uint8_t* msg;
+ uint32_t len;
+ while(1) {
+ msg = NULL;
+ lock_basic_lock(&ctx->rrpipe_lock);
+ r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1);
+ lock_basic_unlock(&ctx->rrpipe_lock);
+ if(r == 0)
+ return UB_PIPE;
+ else if(r == -1)
+ break;
+ if(!process_answer(ctx, msg, len)) {
+ free(msg);
+ return UB_PIPE;
+ }
+ free(msg);
+ }
+ return UB_NOERROR;
+}
+
+int
+ub_wait(struct ub_ctx* ctx)
+{
+ int err;
+ ub_callback_t cb;
+ void* cbarg;
+ struct ub_result* res;
+ int r;
+ uint8_t* msg;
+ uint32_t len;
+ /* this is basically the same loop as _process(), but with changes.
+ * holds the rrpipe lock and waits with tube_wait */
+ while(1) {
+ lock_basic_lock(&ctx->rrpipe_lock);
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->num_async == 0) {
+ lock_basic_unlock(&ctx->cfglock);
+ lock_basic_unlock(&ctx->rrpipe_lock);
+ break;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+
+ /* keep rrpipe locked, while
+ * o waiting for pipe readable
+ * o parsing message
+ * o possibly decrementing num_async
+ * do callback without lock
+ */
+ r = tube_wait(ctx->rr_pipe);
+ if(r) {
+ r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1);
+ if(r == 0) {
+ lock_basic_unlock(&ctx->rrpipe_lock);
+ return UB_PIPE;
+ }
+ if(r == -1) {
+ lock_basic_unlock(&ctx->rrpipe_lock);
+ continue;
+ }
+ r = process_answer_detail(ctx, msg, len,
+ &cb, &cbarg, &err, &res);
+ lock_basic_unlock(&ctx->rrpipe_lock);
+ free(msg);
+ if(r == 0)
+ return UB_PIPE;
+ if(r == 2)
+ (*cb)(cbarg, err, res);
+ } else {
+ lock_basic_unlock(&ctx->rrpipe_lock);
+ }
+ }
+ return UB_NOERROR;
+}
+
+int
+ub_resolve(struct ub_ctx* ctx, char* name, int rrtype,
+ int rrclass, struct ub_result** result)
+{
+ struct ctx_query* q;
+ int r;
+ *result = NULL;
+
+ lock_basic_lock(&ctx->cfglock);
+ if(!ctx->finalized) {
+ r = context_finalize(ctx);
+ if(r) {
+ lock_basic_unlock(&ctx->cfglock);
+ return r;
+ }
+ }
+ /* create new ctx_query and attempt to add to the list */
+ lock_basic_unlock(&ctx->cfglock);
+ q = context_new(ctx, name, rrtype, rrclass, NULL, NULL);
+ if(!q)
+ return UB_NOMEM;
+ /* become a resolver thread for a bit */
+
+ r = libworker_fg(ctx, q);
+ if(r) {
+ lock_basic_lock(&ctx->cfglock);
+ (void)rbtree_delete(&ctx->queries, q->node.key);
+ context_query_delete(q);
+ lock_basic_unlock(&ctx->cfglock);
+ return r;
+ }
+ q->res->answer_packet = q->msg;
+ q->res->answer_len = (int)q->msg_len;
+ q->msg = NULL;
+ *result = q->res;
+ q->res = NULL;
+
+ lock_basic_lock(&ctx->cfglock);
+ (void)rbtree_delete(&ctx->queries, q->node.key);
+ context_query_delete(q);
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+}
+
+int
+ub_resolve_async(struct ub_ctx* ctx, char* name, int rrtype,
+ int rrclass, void* mydata, ub_callback_t callback, int* async_id)
+{
+ struct ctx_query* q;
+ uint8_t* msg = NULL;
+ uint32_t len = 0;
+
+ if(async_id)
+ *async_id = 0;
+ lock_basic_lock(&ctx->cfglock);
+ if(!ctx->finalized) {
+ int r = context_finalize(ctx);
+ if(r) {
+ lock_basic_unlock(&ctx->cfglock);
+ return r;
+ }
+ }
+ if(!ctx->created_bg) {
+ int r;
+ ctx->created_bg = 1;
+ lock_basic_unlock(&ctx->cfglock);
+ r = libworker_bg(ctx);
+ if(r) {
+ lock_basic_lock(&ctx->cfglock);
+ ctx->created_bg = 0;
+ lock_basic_unlock(&ctx->cfglock);
+ return r;
+ }
+ } else {
+ lock_basic_unlock(&ctx->cfglock);
+ }
+
+ /* create new ctx_query and attempt to add to the list */
+ q = context_new(ctx, name, rrtype, rrclass, callback, mydata);
+ if(!q)
+ return UB_NOMEM;
+
+ /* write over pipe to background worker */
+ lock_basic_lock(&ctx->cfglock);
+ msg = context_serialize_new_query(q, &len);
+ if(!msg) {
+ (void)rbtree_delete(&ctx->queries, q->node.key);
+ ctx->num_async--;
+ context_query_delete(q);
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOMEM;
+ }
+ if(async_id)
+ *async_id = q->querynum;
+ lock_basic_unlock(&ctx->cfglock);
+
+ lock_basic_lock(&ctx->qqpipe_lock);
+ if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
+ lock_basic_unlock(&ctx->qqpipe_lock);
+ free(msg);
+ return UB_PIPE;
+ }
+ lock_basic_unlock(&ctx->qqpipe_lock);
+ free(msg);
+ return UB_NOERROR;
+}
+
+int
+ub_cancel(struct ub_ctx* ctx, int async_id)
+{
+ struct ctx_query* q;
+ uint8_t* msg = NULL;
+ uint32_t len = 0;
+ lock_basic_lock(&ctx->cfglock);
+ q = (struct ctx_query*)rbtree_search(&ctx->queries, &async_id);
+ if(!q || !q->async) {
+ /* it is not there, so nothing to do */
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOID;
+ }
+ log_assert(q->async);
+ q->cancelled = 1;
+
+ /* delete it */
+ if(!ctx->dothread) { /* if forked */
+ (void)rbtree_delete(&ctx->queries, q->node.key);
+ ctx->num_async--;
+ msg = context_serialize_cancel(q, &len);
+ context_query_delete(q);
+ lock_basic_unlock(&ctx->cfglock);
+ if(!msg) {
+ return UB_NOMEM;
+ }
+ /* send cancel to background worker */
+ lock_basic_lock(&ctx->qqpipe_lock);
+ if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
+ lock_basic_unlock(&ctx->qqpipe_lock);
+ free(msg);
+ return UB_PIPE;
+ }
+ lock_basic_unlock(&ctx->qqpipe_lock);
+ free(msg);
+ } else {
+ lock_basic_unlock(&ctx->cfglock);
+ }
+ return UB_NOERROR;
+}
+
+void
+ub_resolve_free(struct ub_result* result)
+{
+ char** p;
+ if(!result) return;
+ free(result->qname);
+ if(result->canonname != result->qname)
+ free(result->canonname);
+ if(result->data)
+ for(p = result->data; *p; p++)
+ free(*p);
+ free(result->data);
+ free(result->len);
+ free(result->answer_packet);
+ free(result->why_bogus);
+ free(result);
+}
+
+const char*
+ub_strerror(int err)
+{
+ switch(err) {
+ case UB_NOERROR: return "no error";
+ case UB_SOCKET: return "socket io error";
+ case UB_NOMEM: return "out of memory";
+ case UB_SYNTAX: return "syntax error";
+ case UB_SERVFAIL: return "server failure";
+ case UB_FORKFAIL: return "could not fork";
+ case UB_INITFAIL: return "initialization failure";
+ case UB_AFTERFINAL: return "setting change after finalize";
+ case UB_PIPE: return "error in pipe communication with async";
+ case UB_READFILE: return "error reading file";
+ case UB_NOID: return "error async_id does not exist";
+ default: return "unknown error";
+ }
+}
+
+int
+ub_ctx_set_fwd(struct ub_ctx* ctx, char* addr)
+{
+ struct sockaddr_storage storage;
+ socklen_t stlen;
+ struct config_stub* s;
+ char* dupl;
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->finalized) {
+ lock_basic_unlock(&ctx->cfglock);
+ errno=EINVAL;
+ return UB_AFTERFINAL;
+ }
+ if(!addr) {
+ /* disable fwd mode - the root stub should be first. */
+ if(ctx->env->cfg->forwards &&
+ strcmp(ctx->env->cfg->forwards->name, ".") == 0) {
+ s = ctx->env->cfg->forwards;
+ ctx->env->cfg->forwards = s->next;
+ s->next = NULL;
+ config_delstubs(s);
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+
+ /* check syntax for addr */
+ if(!extstrtoaddr(addr, &storage, &stlen)) {
+ errno=EINVAL;
+ return UB_SYNTAX;
+ }
+
+ /* it parses, add root stub in front of list */
+ lock_basic_lock(&ctx->cfglock);
+ if(!ctx->env->cfg->forwards ||
+ strcmp(ctx->env->cfg->forwards->name, ".") != 0) {
+ s = calloc(1, sizeof(*s));
+ if(!s) {
+ lock_basic_unlock(&ctx->cfglock);
+ errno=ENOMEM;
+ return UB_NOMEM;
+ }
+ s->name = strdup(".");
+ if(!s->name) {
+ free(s);
+ lock_basic_unlock(&ctx->cfglock);
+ errno=ENOMEM;
+ return UB_NOMEM;
+ }
+ s->next = ctx->env->cfg->forwards;
+ ctx->env->cfg->forwards = s;
+ } else {
+ log_assert(ctx->env->cfg->forwards);
+ s = ctx->env->cfg->forwards;
+ }
+ dupl = strdup(addr);
+ if(!dupl) {
+ lock_basic_unlock(&ctx->cfglock);
+ errno=ENOMEM;
+ return UB_NOMEM;
+ }
+ if(!cfg_strlist_insert(&s->addrs, dupl)) {
+ free(dupl);
+ lock_basic_unlock(&ctx->cfglock);
+ errno=ENOMEM;
+ return UB_NOMEM;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ return UB_NOERROR;
+}
+
+int
+ub_ctx_resolvconf(struct ub_ctx* ctx, char* fname)
+{
+ FILE* in;
+ int numserv = 0;
+ char buf[1024];
+ char* parse, *addr;
+ int r;
+
+ if(fname == NULL) {
+#if !defined(UB_ON_WINDOWS) || !defined(HAVE_WINDOWS_H)
+ fname = "/etc/resolv.conf";
+#else
+ FIXED_INFO *info;
+ ULONG buflen = sizeof(*info);
+ IP_ADDR_STRING *ptr;
+
+ info = (FIXED_INFO *) malloc(sizeof (FIXED_INFO));
+ if (info == NULL)
+ return UB_READFILE;
+
+ if (GetNetworkParams(info, &buflen) == ERROR_BUFFER_OVERFLOW) {
+ free(info);
+ info = (FIXED_INFO *) malloc(buflen);
+ if (info == NULL)
+ return UB_READFILE;
+ }
+
+ if (GetNetworkParams(info, &buflen) == NO_ERROR) {
+ int retval=0;
+ ptr = &(info->DnsServerList);
+ while (ptr) {
+ numserv++;
+ if((retval=ub_ctx_set_fwd(ctx,
+ ptr->IpAddress.String)!=0)) {
+ free(info);
+ return retval;
+ }
+ ptr = ptr->Next;
+ }
+ free(info);
+ if (numserv==0)
+ return UB_READFILE;
+ return UB_NOERROR;
+ }
+ free(info);
+ return UB_READFILE;
+#endif /* WINDOWS */
+ }
+ in = fopen(fname, "r");
+ if(!in) {
+ /* error in errno! perror(fname) */
+ return UB_READFILE;
+ }
+ while(fgets(buf, (int)sizeof(buf), in)) {
+ buf[sizeof(buf)-1] = 0;
+ parse=buf;
+ while(*parse == ' ' || *parse == '\t')
+ parse++;
+ if(strncmp(parse, "nameserver", 10) == 0) {
+ numserv++;
+ parse += 10; /* skip 'nameserver' */
+ /* skip whitespace */
+ while(*parse == ' ' || *parse == '\t')
+ parse++;
+ addr = parse;
+ /* skip [0-9a-fA-F.:]*, i.e. IP4 and IP6 address */
+ while(isxdigit(*parse) || *parse=='.' || *parse==':')
+ parse++;
+ /* terminate after the address, remove newline */
+ *parse = 0;
+
+ if((r = ub_ctx_set_fwd(ctx, addr)) != UB_NOERROR) {
+ fclose(in);
+ return r;
+ }
+ }
+ }
+ fclose(in);
+ if(numserv == 0) {
+ /* from resolv.conf(5) if none given, use localhost */
+ return ub_ctx_set_fwd(ctx, "127.0.0.1");
+ }
+ return UB_NOERROR;
+}
+
+int
+ub_ctx_hosts(struct ub_ctx* ctx, char* fname)
+{
+ FILE* in;
+ char buf[1024], ldata[1024];
+ char* parse, *addr, *name, *ins;
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->finalized) {
+ lock_basic_unlock(&ctx->cfglock);
+ errno=EINVAL;
+ return UB_AFTERFINAL;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ if(fname == NULL) {
+#if defined(UB_ON_WINDOWS) && defined(HAVE_WINDOWS_H)
+ /*
+ * If this is Windows NT/XP/2K it's in
+ * %WINDIR%\system32\drivers\etc\hosts.
+ * If this is Windows 95/98/Me it's in %WINDIR%\hosts.
+ */
+ name = getenv("WINDIR");
+ if (name != NULL) {
+ int retval=0;
+ snprintf(buf, sizeof(buf), "%s%s", name,
+ "\\system32\\drivers\\etc\\hosts");
+ if((retval=ub_ctx_hosts(ctx, buf)) !=0 ) {
+ snprintf(buf, sizeof(buf), "%s%s", name,
+ "\\hosts");
+ retval=ub_ctx_hosts(ctx, buf);
+ }
+ free(name);
+ return retval;
+ }
+ return UB_READFILE;
+#else
+ fname = "/etc/hosts";
+#endif /* WIN32 */
+ }
+ in = fopen(fname, "r");
+ if(!in) {
+ /* error in errno! perror(fname) */
+ return UB_READFILE;
+ }
+ while(fgets(buf, (int)sizeof(buf), in)) {
+ buf[sizeof(buf)-1] = 0;
+ parse=buf;
+ while(*parse == ' ' || *parse == '\t')
+ parse++;
+ if(*parse == '#')
+ continue; /* skip comment */
+ /* format: <addr> spaces <name> spaces <name> ... */
+ addr = parse;
+ /* skip addr */
+ while(isxdigit(*parse) || *parse == '.' || *parse == ':')
+ parse++;
+ if(*parse == '\n' || *parse == 0)
+ continue;
+ if(*parse == '%')
+ continue; /* ignore macOSX fe80::1%lo0 localhost */
+ if(*parse != ' ' && *parse != '\t') {
+ /* must have whitespace after address */
+ fclose(in);
+ errno=EINVAL;
+ return UB_SYNTAX;
+ }
+ *parse++ = 0; /* end delimiter for addr ... */
+ /* go to names and add them */
+ while(*parse) {
+ while(*parse == ' ' || *parse == '\t' || *parse=='\n')
+ parse++;
+ if(*parse == 0 || *parse == '#')
+ break;
+ /* skip name, allows (too) many printable characters */
+ name = parse;
+ while('!' <= *parse && *parse <= '~')
+ parse++;
+ if(*parse)
+ *parse++ = 0; /* end delimiter for name */
+ snprintf(ldata, sizeof(ldata), "%s %s %s",
+ name, str_is_ip6(addr)?"AAAA":"A", addr);
+ ins = strdup(ldata);
+ if(!ins) {
+ /* out of memory */
+ fclose(in);
+ errno=ENOMEM;
+ return UB_NOMEM;
+ }
+ lock_basic_lock(&ctx->cfglock);
+ if(!cfg_strlist_insert(&ctx->env->cfg->local_data,
+ ins)) {
+ lock_basic_unlock(&ctx->cfglock);
+ fclose(in);
+ free(ins);
+ errno=ENOMEM;
+ return UB_NOMEM;
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ }
+ }
+ fclose(in);
+ return UB_NOERROR;
+}
+
+/** finalize the context, if not already finalized */
+static int ub_ctx_finalize(struct ub_ctx* ctx)
+{
+ int res = 0;
+ lock_basic_lock(&ctx->cfglock);
+ if (!ctx->finalized) {
+ res = context_finalize(ctx);
+ }
+ lock_basic_unlock(&ctx->cfglock);
+ return res;
+}
+
+/* Print local zones and RR data */
+int ub_ctx_print_local_zones(struct ub_ctx* ctx)
+{
+ int res = ub_ctx_finalize(ctx);
+ if (res) return res;
+
+ local_zones_print(ctx->local_zones);
+
+ return UB_NOERROR;
+}
+
+/* Add a new zone */
+int ub_ctx_zone_add(struct ub_ctx* ctx, char *zone_name, char *zone_type)
+{
+ enum localzone_type t;
+ struct local_zone* z;
+ uint8_t* nm;
+ int nmlabs;
+ size_t nmlen;
+
+ int res = ub_ctx_finalize(ctx);
+ if (res) return res;
+
+ if(!local_zone_str2type(zone_type, &t)) {
+ return UB_SYNTAX;
+ }
+
+ if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
+ return UB_SYNTAX;
+ }
+
+ lock_quick_lock(&ctx->local_zones->lock);
+ if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
+ LDNS_RR_CLASS_IN))) {
+ /* already present in tree */
+ lock_rw_wrlock(&z->lock);
+ z->type = t; /* update type anyway */
+ lock_rw_unlock(&z->lock);
+ lock_quick_unlock(&ctx->local_zones->lock);
+ free(nm);
+ return UB_NOERROR;
+ }
+ if(!local_zones_add_zone(ctx->local_zones, nm, nmlen, nmlabs,
+ LDNS_RR_CLASS_IN, t)) {
+ lock_quick_unlock(&ctx->local_zones->lock);
+ return UB_NOMEM;
+ }
+ lock_quick_unlock(&ctx->local_zones->lock);
+ return UB_NOERROR;
+}
+
+/* Remove zone */
+int ub_ctx_zone_remove(struct ub_ctx* ctx, char *zone_name)
+{
+ struct local_zone* z;
+ uint8_t* nm;
+ int nmlabs;
+ size_t nmlen;
+
+ int res = ub_ctx_finalize(ctx);
+ if (res) return res;
+
+ if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
+ return UB_SYNTAX;
+ }
+
+ lock_quick_lock(&ctx->local_zones->lock);
+ if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
+ LDNS_RR_CLASS_IN))) {
+ /* present in tree */
+ local_zones_del_zone(ctx->local_zones, z);
+ }
+ lock_quick_unlock(&ctx->local_zones->lock);
+ free(nm);
+ return UB_NOERROR;
+}
+
+/* Add new RR data */
+int ub_ctx_data_add(struct ub_ctx* ctx, char *data)
+{
+ ldns_buffer* buf;
+ int res = ub_ctx_finalize(ctx);
+ if (res) return res;
+
+ lock_basic_lock(&ctx->cfglock);
+ buf = ldns_buffer_new(ctx->env->cfg->msg_buffer_size);
+ lock_basic_unlock(&ctx->cfglock);
+ if(!buf) return UB_NOMEM;
+
+ res = local_zones_add_RR(ctx->local_zones, data, buf);
+
+ ldns_buffer_free(buf);
+ return (!res) ? UB_NOMEM : UB_NOERROR;
+}
+
+/* Remove RR data */
+int ub_ctx_data_remove(struct ub_ctx* ctx, char *data)
+{
+ uint8_t* nm;
+ int nmlabs;
+ size_t nmlen;
+ int res = ub_ctx_finalize(ctx);
+ if (res) return res;
+
+ if(!parse_dname(data, &nm, &nmlen, &nmlabs))
+ return UB_SYNTAX;
+
+ local_zones_del_data(ctx->local_zones, nm, nmlen, nmlabs,
+ LDNS_RR_CLASS_IN);
+
+ free(nm);
+ return UB_NOERROR;
+}
+
+const char* ub_version(void)
+{
+ return PACKAGE_VERSION;
+}
diff --git a/usr.sbin/unbound/libunbound/libworker.c b/usr.sbin/unbound/libunbound/libworker.c
new file mode 100644
index 00000000000..0a734a294c6
--- /dev/null
+++ b/usr.sbin/unbound/libunbound/libworker.c
@@ -0,0 +1,901 @@
+/*
+ * libunbound/worker.c - worker thread or process that resolves
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains the worker process or thread that performs
+ * the DNS resolving and validation. The worker is called by a procedure
+ * and if in the background continues until exit, if in the foreground
+ * returns from the procedure when done.
+ */
+#include "config.h"
+#include <ldns/dname.h>
+#include <ldns/wire2host.h>
+#include <openssl/ssl.h>
+#include "libunbound/libworker.h"
+#include "libunbound/context.h"
+#include "libunbound/unbound.h"
+#include "services/outside_network.h"
+#include "services/mesh.h"
+#include "services/localzone.h"
+#include "services/cache/rrset.h"
+#include "services/outbound_list.h"
+#include "util/module.h"
+#include "util/regional.h"
+#include "util/random.h"
+#include "util/config_file.h"
+#include "util/netevent.h"
+#include "util/storage/lookup3.h"
+#include "util/storage/slabhash.h"
+#include "util/net_help.h"
+#include "util/data/dname.h"
+#include "util/data/msgreply.h"
+#include "util/data/msgencode.h"
+#include "util/tube.h"
+#include "iterator/iter_fwd.h"
+
+/** handle new query command for bg worker */
+static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len);
+
+/** delete libworker struct */
+static void
+libworker_delete(struct libworker* w)
+{
+ if(!w) return;
+ if(w->env) {
+ outside_network_quit_prepare(w->back);
+ mesh_delete(w->env->mesh);
+ context_release_alloc(w->ctx, w->env->alloc,
+ !w->is_bg || w->is_bg_thread);
+ ldns_buffer_free(w->env->scratch_buffer);
+ regional_destroy(w->env->scratch);
+ forwards_delete(w->env->fwds);
+ ub_randfree(w->env->rnd);
+ free(w->env);
+ }
+ SSL_CTX_free(w->sslctx);
+ outside_network_delete(w->back);
+ comm_base_delete(w->base);
+ free(w);
+}
+
+/** setup fresh libworker struct */
+static struct libworker*
+libworker_setup(struct ub_ctx* ctx, int is_bg)
+{
+ unsigned int seed;
+ struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
+ struct config_file* cfg = ctx->env->cfg;
+ int* ports;
+ int numports;
+ if(!w) return NULL;
+ w->is_bg = is_bg;
+ w->ctx = ctx;
+ w->env = (struct module_env*)malloc(sizeof(*w->env));
+ if(!w->env) {
+ free(w);
+ return NULL;
+ }
+ *w->env = *ctx->env;
+ w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread);
+ if(!w->env->alloc) {
+ libworker_delete(w);
+ return NULL;
+ }
+ w->thread_num = w->env->alloc->thread_num;
+ alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w);
+ if(!w->is_bg || w->is_bg_thread) {
+ lock_basic_lock(&ctx->cfglock);
+ }
+ w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
+ w->env->scratch_buffer = ldns_buffer_new(cfg->msg_buffer_size);
+ w->env->fwds = forwards_create();
+ if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) {
+ forwards_delete(w->env->fwds);
+ w->env->fwds = NULL;
+ }
+ if(cfg->ssl_upstream) {
+ w->sslctx = connect_sslctx_create(NULL, NULL, NULL);
+ if(!w->sslctx) {
+ libworker_delete(w);
+ return NULL;
+ }
+ }
+ if(!w->is_bg || w->is_bg_thread) {
+ lock_basic_unlock(&ctx->cfglock);
+ }
+ if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds) {
+ libworker_delete(w);
+ return NULL;
+ }
+ w->env->worker = (struct worker*)w;
+ w->env->probe_timer = NULL;
+ seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^
+ (((unsigned int)w->thread_num)<<17);
+ seed ^= (unsigned int)w->env->alloc->next_id;
+ if(!w->is_bg || w->is_bg_thread) {
+ lock_basic_lock(&ctx->cfglock);
+ }
+ if(!(w->env->rnd = ub_initstate(seed, ctx->seed_rnd))) {
+ if(!w->is_bg || w->is_bg_thread) {
+ lock_basic_unlock(&ctx->cfglock);
+ }
+ seed = 0;
+ libworker_delete(w);
+ return NULL;
+ }
+ if(!w->is_bg || w->is_bg_thread) {
+ lock_basic_unlock(&ctx->cfglock);
+ }
+ if(1) {
+ /* primitive lockout for threading: if it overwrites another
+ * thread it is like wiping the cache (which is likely empty
+ * at the start) */
+ /* note we are holding the ctx lock in normal threaded
+ * cases so that is solved properly, it is only for many ctx
+ * in different threads that this may clash */
+ static int done_raninit = 0;
+ if(!done_raninit) {
+ done_raninit = 1;
+ hash_set_raninit((uint32_t)ub_random(w->env->rnd));
+ }
+ }
+ seed = 0;
+
+ w->base = comm_base_create(0);
+ if(!w->base) {
+ libworker_delete(w);
+ return NULL;
+ }
+ if(!w->is_bg || w->is_bg_thread) {
+ lock_basic_lock(&ctx->cfglock);
+ }
+ numports = cfg_condense_ports(cfg, &ports);
+ if(numports == 0) {
+ libworker_delete(w);
+ return NULL;
+ }
+ w->back = outside_network_create(w->base, cfg->msg_buffer_size,
+ (size_t)cfg->outgoing_num_ports, cfg->out_ifs,
+ cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
+ cfg->do_tcp?cfg->outgoing_num_tcp:0,
+ w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id,
+ ports, numports, cfg->unwanted_threshold,
+ &libworker_alloc_cleanup, w, cfg->do_udp, w->sslctx);
+ if(!w->is_bg || w->is_bg_thread) {
+ lock_basic_unlock(&ctx->cfglock);
+ }
+ free(ports);
+ if(!w->back) {
+ libworker_delete(w);
+ return NULL;
+ }
+ w->env->mesh = mesh_create(&ctx->mods, w->env);
+ if(!w->env->mesh) {
+ libworker_delete(w);
+ return NULL;
+ }
+ w->env->send_query = &libworker_send_query;
+ w->env->detach_subs = &mesh_detach_subs;
+ w->env->attach_sub = &mesh_attach_sub;
+ w->env->kill_sub = &mesh_state_delete;
+ w->env->detect_cycle = &mesh_detect_cycle;
+ comm_base_timept(w->base, &w->env->now, &w->env->now_tv);
+ return w;
+}
+
+/** handle cancel command for bg worker */
+static void
+handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len)
+{
+ struct ctx_query* q;
+ if(w->is_bg_thread) {
+ lock_basic_lock(&w->ctx->cfglock);
+ q = context_deserialize_cancel(w->ctx, buf, len);
+ lock_basic_unlock(&w->ctx->cfglock);
+ } else {
+ q = context_deserialize_cancel(w->ctx, buf, len);
+ }
+ if(!q) {
+ /* probably simply lookup failed, i.e. the message had been
+ * processed and answered before the cancel arrived */
+ return;
+ }
+ q->cancelled = 1;
+ free(buf);
+}
+
+/** do control command coming into bg server */
+static void
+libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len)
+{
+ switch(context_serial_getcmd(msg, len)) {
+ default:
+ case UB_LIBCMD_ANSWER:
+ log_err("unknown command for bg worker %d",
+ (int)context_serial_getcmd(msg, len));
+ /* and fall through to quit */
+ case UB_LIBCMD_QUIT:
+ free(msg);
+ comm_base_exit(w->base);
+ break;
+ case UB_LIBCMD_NEWQUERY:
+ handle_newq(w, msg, len);
+ break;
+ case UB_LIBCMD_CANCEL:
+ handle_cancel(w, msg, len);
+ break;
+ }
+}
+
+/** handle control command coming into server */
+void
+libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
+ uint8_t* msg, size_t len, int err, void* arg)
+{
+ struct libworker* w = (struct libworker*)arg;
+
+ if(err != 0) {
+ free(msg);
+ /* it is of no use to go on, exit */
+ comm_base_exit(w->base);
+ return;
+ }
+ libworker_do_cmd(w, msg, len); /* also frees the buf */
+}
+
+/** the background thread func */
+static void*
+libworker_dobg(void* arg)
+{
+ /* setup */
+ uint32_t m;
+ struct libworker* w = (struct libworker*)arg;
+ struct ub_ctx* ctx;
+ if(!w) {
+ log_err("libunbound bg worker init failed, nomem");
+ return NULL;
+ }
+ ctx = w->ctx;
+ log_thread_set(&w->thread_num);
+#ifdef THREADS_DISABLED
+ /* we are forked */
+ w->is_bg_thread = 0;
+ /* close non-used parts of the pipes */
+ tube_close_write(ctx->qq_pipe);
+ tube_close_read(ctx->rr_pipe);
+#endif
+ if(!tube_setup_bg_listen(ctx->qq_pipe, w->base,
+ libworker_handle_control_cmd, w)) {
+ log_err("libunbound bg worker init failed, no bglisten");
+ return NULL;
+ }
+ if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) {
+ log_err("libunbound bg worker init failed, no bgwrite");
+ return NULL;
+ }
+
+ /* do the work */
+ comm_base_dispatch(w->base);
+
+ /* cleanup */
+ m = UB_LIBCMD_QUIT;
+ tube_remove_bg_listen(w->ctx->qq_pipe);
+ tube_remove_bg_write(w->ctx->rr_pipe);
+ libworker_delete(w);
+ (void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m,
+ (uint32_t)sizeof(m), 0);
+#ifdef THREADS_DISABLED
+ /* close pipes from forked process before exit */
+ tube_close_read(ctx->qq_pipe);
+ tube_close_write(ctx->rr_pipe);
+#endif
+ return NULL;
+}
+
+int libworker_bg(struct ub_ctx* ctx)
+{
+ struct libworker* w;
+ /* fork or threadcreate */
+ lock_basic_lock(&ctx->cfglock);
+ if(ctx->dothread) {
+ lock_basic_unlock(&ctx->cfglock);
+ w = libworker_setup(ctx, 1);
+ if(!w) return UB_NOMEM;
+ w->is_bg_thread = 1;
+#ifdef ENABLE_LOCK_CHECKS
+ w->thread_num = 1; /* for nicer DEBUG checklocks */
+#endif
+ ub_thread_create(&ctx->bg_tid, libworker_dobg, w);
+ } else {
+ lock_basic_unlock(&ctx->cfglock);
+#ifndef HAVE_FORK
+ /* no fork on windows */
+ return UB_FORKFAIL;
+#else /* HAVE_FORK */
+ switch((ctx->bg_pid=fork())) {
+ case 0:
+ w = libworker_setup(ctx, 1);
+ if(!w) fatal_exit("out of memory");
+ /* close non-used parts of the pipes */
+ tube_close_write(ctx->qq_pipe);
+ tube_close_read(ctx->rr_pipe);
+ (void)libworker_dobg(w);
+ exit(0);
+ break;
+ case -1:
+ return UB_FORKFAIL;
+ default:
+ break;
+ }
+#endif /* HAVE_FORK */
+ }
+ return UB_NOERROR;
+}
+
+/** get msg reply struct (in temp region) */
+static struct reply_info*
+parse_reply(ldns_buffer* pkt, struct regional* region, struct query_info* qi)
+{
+ struct reply_info* rep;
+ struct msg_parse* msg;
+ if(!(msg = regional_alloc(region, sizeof(*msg)))) {
+ return NULL;
+ }
+ memset(msg, 0, sizeof(*msg));
+ ldns_buffer_set_position(pkt, 0);
+ if(parse_packet(pkt, msg, region) != 0)
+ return 0;
+ if(!parse_create_msg(pkt, msg, NULL, qi, &rep, region)) {
+ return 0;
+ }
+ return rep;
+}
+
+/** insert canonname */
+static int
+fill_canon(struct ub_result* res, uint8_t* s)
+{
+ char buf[255+2];
+ dname_str(s, buf);
+ res->canonname = strdup(buf);
+ return res->canonname != 0;
+}
+
+/** fill data into result */
+static int
+fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer,
+ uint8_t* finalcname, struct query_info* rq)
+{
+ size_t i;
+ struct packed_rrset_data* data;
+ if(!answer) {
+ if(finalcname) {
+ if(!fill_canon(res, finalcname))
+ return 0; /* out of memory */
+ }
+ res->data = (char**)calloc(1, sizeof(char*));
+ res->len = (int*)calloc(1, sizeof(int));
+ return (res->data && res->len);
+ }
+ data = (struct packed_rrset_data*)answer->entry.data;
+ if(query_dname_compare(rq->qname, answer->rk.dname) != 0) {
+ if(!fill_canon(res, answer->rk.dname))
+ return 0; /* out of memory */
+ } else res->canonname = NULL;
+ res->data = (char**)calloc(data->count+1, sizeof(char*));
+ res->len = (int*)calloc(data->count+1, sizeof(int));
+ if(!res->data || !res->len)
+ return 0; /* out of memory */
+ for(i=0; i<data->count; i++) {
+ /* remove rdlength from rdata */
+ res->len[i] = (int)(data->rr_len[i] - 2);
+ res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]);
+ if(!res->data[i])
+ return 0; /* out of memory */
+ }
+ res->data[data->count] = NULL;
+ res->len[data->count] = 0;
+ return 1;
+}
+
+/** fill result from parsed message, on error fills servfail */
+void
+libworker_enter_result(struct ub_result* res, ldns_buffer* buf,
+ struct regional* temp, enum sec_status msg_security)
+{
+ struct query_info rq;
+ struct reply_info* rep;
+ res->rcode = LDNS_RCODE_SERVFAIL;
+ rep = parse_reply(buf, temp, &rq);
+ if(!rep) {
+ log_err("cannot parse buf");
+ return; /* error parsing buf, or out of memory */
+ }
+ if(!fill_res(res, reply_find_answer_rrset(&rq, rep),
+ reply_find_final_cname_target(&rq, rep), &rq))
+ return; /* out of memory */
+ /* rcode, havedata, nxdomain, secure, bogus */
+ res->rcode = (int)FLAGS_GET_RCODE(rep->flags);
+ if(res->data && res->data[0])
+ res->havedata = 1;
+ if(res->rcode == LDNS_RCODE_NXDOMAIN)
+ res->nxdomain = 1;
+ if(msg_security == sec_status_secure)
+ res->secure = 1;
+ if(msg_security == sec_status_bogus)
+ res->bogus = 1;
+}
+
+/** fillup fg results */
+static void
+libworker_fillup_fg(struct ctx_query* q, int rcode, ldns_buffer* buf,
+ enum sec_status s, char* why_bogus)
+{
+ if(why_bogus)
+ q->res->why_bogus = strdup(why_bogus);
+ if(rcode != 0) {
+ q->res->rcode = rcode;
+ q->msg_security = s;
+ return;
+ }
+
+ q->res->rcode = LDNS_RCODE_SERVFAIL;
+ q->msg_security = 0;
+ q->msg = memdup(ldns_buffer_begin(buf), ldns_buffer_limit(buf));
+ q->msg_len = ldns_buffer_limit(buf);
+ if(!q->msg) {
+ return; /* the error is in the rcode */
+ }
+
+ /* canonname and results */
+ q->msg_security = s;
+ libworker_enter_result(q->res, buf, q->w->env->scratch, s);
+}
+
+void
+libworker_fg_done_cb(void* arg, int rcode, ldns_buffer* buf, enum sec_status s,
+ char* why_bogus)
+{
+ struct ctx_query* q = (struct ctx_query*)arg;
+ /* fg query is done; exit comm base */
+ comm_base_exit(q->w->base);
+
+ libworker_fillup_fg(q, rcode, buf, s, why_bogus);
+}
+
+/** setup qinfo and edns */
+static int
+setup_qinfo_edns(struct libworker* w, struct ctx_query* q,
+ struct query_info* qinfo, struct edns_data* edns)
+{
+ ldns_rdf* rdf;
+ qinfo->qtype = (uint16_t)q->res->qtype;
+ qinfo->qclass = (uint16_t)q->res->qclass;
+ rdf = ldns_dname_new_frm_str(q->res->qname);
+ if(!rdf) {
+ return 0;
+ }
+#ifdef UNBOUND_ALLOC_LITE
+ qinfo->qname = memdup(ldns_rdf_data(rdf), ldns_rdf_size(rdf));
+ qinfo->qname_len = ldns_rdf_size(rdf);
+ ldns_rdf_deep_free(rdf);
+ rdf = 0;
+#else
+ qinfo->qname = ldns_rdf_data(rdf);
+ qinfo->qname_len = ldns_rdf_size(rdf);
+#endif
+ edns->edns_present = 1;
+ edns->ext_rcode = 0;
+ edns->edns_version = 0;
+ edns->bits = EDNS_DO;
+ if(ldns_buffer_capacity(w->back->udp_buff) < 65535)
+ edns->udp_size = (uint16_t)ldns_buffer_capacity(
+ w->back->udp_buff);
+ else edns->udp_size = 65535;
+ ldns_rdf_free(rdf);
+ return 1;
+}
+
+int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q)
+{
+ struct libworker* w = libworker_setup(ctx, 0);
+ uint16_t qflags, qid;
+ struct query_info qinfo;
+ struct edns_data edns;
+ if(!w)
+ return UB_INITFAIL;
+ if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
+ libworker_delete(w);
+ return UB_SYNTAX;
+ }
+ qid = 0;
+ qflags = BIT_RD;
+ q->w = w;
+ /* see if there is a fixed answer */
+ ldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
+ ldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
+ if(local_zones_answer(ctx->local_zones, &qinfo, &edns,
+ w->back->udp_buff, w->env->scratch)) {
+ regional_free_all(w->env->scratch);
+ libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
+ w->back->udp_buff, sec_status_insecure, NULL);
+ libworker_delete(w);
+ free(qinfo.qname);
+ return UB_NOERROR;
+ }
+ /* process new query */
+ if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
+ w->back->udp_buff, qid, libworker_fg_done_cb, q)) {
+ free(qinfo.qname);
+ return UB_NOMEM;
+ }
+ free(qinfo.qname);
+
+ /* wait for reply */
+ comm_base_dispatch(w->base);
+
+ libworker_delete(w);
+ return UB_NOERROR;
+}
+
+/** add result to the bg worker result queue */
+static void
+add_bg_result(struct libworker* w, struct ctx_query* q, ldns_buffer* pkt,
+ int err, char* reason)
+{
+ uint8_t* msg = NULL;
+ uint32_t len = 0;
+
+ /* serialize and delete unneeded q */
+ if(w->is_bg_thread) {
+ lock_basic_lock(&w->ctx->cfglock);
+ if(reason)
+ q->res->why_bogus = strdup(reason);
+ if(pkt) {
+ q->msg_len = ldns_buffer_remaining(pkt);
+ q->msg = memdup(ldns_buffer_begin(pkt), q->msg_len);
+ if(!q->msg)
+ msg = context_serialize_answer(q, UB_NOMEM,
+ NULL, &len);
+ else msg = context_serialize_answer(q, err,
+ NULL, &len);
+ } else msg = context_serialize_answer(q, err, NULL, &len);
+ lock_basic_unlock(&w->ctx->cfglock);
+ } else {
+ if(reason)
+ q->res->why_bogus = strdup(reason);
+ msg = context_serialize_answer(q, err, pkt, &len);
+ (void)rbtree_delete(&w->ctx->queries, q->node.key);
+ w->ctx->num_async--;
+ context_query_delete(q);
+ }
+
+ if(!msg) {
+ log_err("out of memory for async answer");
+ return;
+ }
+ if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) {
+ log_err("out of memory for async answer");
+ return;
+ }
+}
+
+void
+libworker_bg_done_cb(void* arg, int rcode, ldns_buffer* buf, enum sec_status s,
+ char* why_bogus)
+{
+ struct ctx_query* q = (struct ctx_query*)arg;
+
+ if(q->cancelled) {
+ if(q->w->is_bg_thread) {
+ /* delete it now */
+ struct ub_ctx* ctx = q->w->ctx;
+ lock_basic_lock(&ctx->cfglock);
+ (void)rbtree_delete(&ctx->queries, q->node.key);
+ ctx->num_async--;
+ context_query_delete(q);
+ lock_basic_unlock(&ctx->cfglock);
+ }
+ /* cancelled, do not give answer */
+ return;
+ }
+ q->msg_security = s;
+ if(rcode != 0) {
+ error_encode(buf, rcode, NULL, 0, BIT_RD, NULL);
+ }
+ add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus);
+}
+
+
+/** handle new query command for bg worker */
+static void
+handle_newq(struct libworker* w, uint8_t* buf, uint32_t len)
+{
+ uint16_t qflags, qid;
+ struct query_info qinfo;
+ struct edns_data edns;
+ struct ctx_query* q;
+ if(w->is_bg_thread) {
+ lock_basic_lock(&w->ctx->cfglock);
+ q = context_lookup_new_query(w->ctx, buf, len);
+ lock_basic_unlock(&w->ctx->cfglock);
+ } else {
+ q = context_deserialize_new_query(w->ctx, buf, len);
+ }
+ free(buf);
+ if(!q) {
+ log_err("failed to deserialize newq");
+ return;
+ }
+ if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
+ add_bg_result(w, q, NULL, UB_SYNTAX, NULL);
+ return;
+ }
+ qid = 0;
+ qflags = BIT_RD;
+ /* see if there is a fixed answer */
+ ldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
+ ldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
+ if(local_zones_answer(w->ctx->local_zones, &qinfo, &edns,
+ w->back->udp_buff, w->env->scratch)) {
+ regional_free_all(w->env->scratch);
+ q->msg_security = sec_status_insecure;
+ add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL);
+ free(qinfo.qname);
+ return;
+ }
+ q->w = w;
+ /* process new query */
+ if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
+ w->back->udp_buff, qid, libworker_bg_done_cb, q)) {
+ add_bg_result(w, q, NULL, UB_NOMEM, NULL);
+ }
+ free(qinfo.qname);
+}
+
+void libworker_alloc_cleanup(void* arg)
+{
+ struct libworker* w = (struct libworker*)arg;
+ slabhash_clear(&w->env->rrset_cache->table);
+ slabhash_clear(w->env->msg_cache);
+}
+
+/** compare outbound entry qstates */
+static int
+outbound_entry_compare(void* a, void* b)
+{
+ struct outbound_entry* e1 = (struct outbound_entry*)a;
+ struct outbound_entry* e2 = (struct outbound_entry*)b;
+ if(e1->qstate == e2->qstate)
+ return 1;
+ return 0;
+}
+
+struct outbound_entry* libworker_send_query(uint8_t* qname, size_t qnamelen,
+ uint16_t qtype, uint16_t qclass, uint16_t flags, int dnssec,
+ int want_dnssec, struct sockaddr_storage* addr, socklen_t addrlen,
+ uint8_t* zone, size_t zonelen, struct module_qstate* q)
+{
+ struct libworker* w = (struct libworker*)q->env->worker;
+ struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
+ q->region, sizeof(*e));
+ if(!e)
+ return NULL;
+ e->qstate = q;
+ e->qsent = outnet_serviced_query(w->back, qname,
+ qnamelen, qtype, qclass, flags, dnssec, want_dnssec,
+ q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, addr,
+ addrlen, zone, zonelen, libworker_handle_service_reply, e,
+ w->back->udp_buff, &outbound_entry_compare);
+ if(!e->qsent) {
+ return NULL;
+ }
+ return e;
+}
+
+int
+libworker_handle_reply(struct comm_point* c, void* arg, int error,
+ struct comm_reply* reply_info)
+{
+ struct module_qstate* q = (struct module_qstate*)arg;
+ struct libworker* lw = (struct libworker*)q->env->worker;
+ struct outbound_entry e;
+ e.qstate = q;
+ e.qsent = NULL;
+
+ if(error != 0) {
+ mesh_report_reply(lw->env->mesh, &e, reply_info, error);
+ return 0;
+ }
+ /* sanity check. */
+ if(!LDNS_QR_WIRE(ldns_buffer_begin(c->buffer))
+ || LDNS_OPCODE_WIRE(ldns_buffer_begin(c->buffer)) !=
+ LDNS_PACKET_QUERY
+ || LDNS_QDCOUNT(ldns_buffer_begin(c->buffer)) > 1) {
+ /* error becomes timeout for the module as if this reply
+ * never arrived. */
+ mesh_report_reply(lw->env->mesh, &e, reply_info,
+ NETEVENT_TIMEOUT);
+ return 0;
+ }
+ mesh_report_reply(lw->env->mesh, &e, reply_info, NETEVENT_NOERROR);
+ return 0;
+}
+
+int
+libworker_handle_service_reply(struct comm_point* c, void* arg, int error,
+ struct comm_reply* reply_info)
+{
+ struct outbound_entry* e = (struct outbound_entry*)arg;
+ struct libworker* lw = (struct libworker*)e->qstate->env->worker;
+
+ if(error != 0) {
+ mesh_report_reply(lw->env->mesh, e, reply_info, error);
+ return 0;
+ }
+ /* sanity check. */
+ if(!LDNS_QR_WIRE(ldns_buffer_begin(c->buffer))
+ || LDNS_OPCODE_WIRE(ldns_buffer_begin(c->buffer)) !=
+ LDNS_PACKET_QUERY
+ || LDNS_QDCOUNT(ldns_buffer_begin(c->buffer)) > 1) {
+ /* error becomes timeout for the module as if this reply
+ * never arrived. */
+ mesh_report_reply(lw->env->mesh, e, reply_info,
+ NETEVENT_TIMEOUT);
+ return 0;
+ }
+ mesh_report_reply(lw->env->mesh, e, reply_info, NETEVENT_NOERROR);
+ return 0;
+}
+
+/* --- fake callbacks for fptr_wlist to work --- */
+void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
+ uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
+ int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
+{
+ log_assert(0);
+}
+
+int worker_handle_request(struct comm_point* ATTR_UNUSED(c),
+ void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
+ struct comm_reply* ATTR_UNUSED(repinfo))
+{
+ log_assert(0);
+ return 0;
+}
+
+int worker_handle_reply(struct comm_point* ATTR_UNUSED(c),
+ void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
+ struct comm_reply* ATTR_UNUSED(reply_info))
+{
+ log_assert(0);
+ return 0;
+}
+
+int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
+ void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
+ struct comm_reply* ATTR_UNUSED(reply_info))
+{
+ log_assert(0);
+ return 0;
+}
+
+int remote_accept_callback(struct comm_point* ATTR_UNUSED(c),
+ void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
+ struct comm_reply* ATTR_UNUSED(repinfo))
+{
+ log_assert(0);
+ return 0;
+}
+
+int remote_control_callback(struct comm_point* ATTR_UNUSED(c),
+ void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
+ struct comm_reply* ATTR_UNUSED(repinfo))
+{
+ log_assert(0);
+ return 0;
+}
+
+void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg))
+{
+ log_assert(0);
+}
+
+struct outbound_entry* worker_send_query(uint8_t* ATTR_UNUSED(qname),
+ size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
+ uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
+ int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
+ struct sockaddr_storage* ATTR_UNUSED(addr),
+ socklen_t ATTR_UNUSED(addrlen), struct module_qstate* ATTR_UNUSED(q))
+{
+ log_assert(0);
+ return 0;
+}
+
+void
+worker_alloc_cleanup(void* ATTR_UNUSED(arg))
+{
+ log_assert(0);
+}
+
+void worker_stat_timer_cb(void* ATTR_UNUSED(arg))
+{
+ log_assert(0);
+}
+
+void worker_probe_timer_cb(void* ATTR_UNUSED(arg))
+{
+ log_assert(0);
+}
+
+int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
+{
+ log_assert(0);
+ return 0;
+}
+
+int
+codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
+{
+ log_assert(0);
+ return 0;
+}
+
+int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
+{
+ log_assert(0);
+ return 0;
+}
+
+void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg))
+{
+ log_assert(0);
+}
+
+#ifdef UB_ON_WINDOWS
+void
+worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void*
+ ATTR_UNUSED(arg)) {
+ log_assert(0);
+}
+
+void
+wsvc_cron_cb(void* ATTR_UNUSED(arg))
+{
+ log_assert(0);
+}
+#endif /* UB_ON_WINDOWS */
diff --git a/usr.sbin/unbound/libunbound/libworker.h b/usr.sbin/unbound/libunbound/libworker.h
new file mode 100644
index 00000000000..c3896fc5a82
--- /dev/null
+++ b/usr.sbin/unbound/libunbound/libworker.h
@@ -0,0 +1,170 @@
+/*
+ * libunbound/worker.h - worker thread or process that resolves
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains the worker process or thread that performs
+ * the DNS resolving and validation. The worker is called by a procedure
+ * and if in the background continues until exit, if in the foreground
+ * returns from the procedure when done.
+ */
+#ifndef LIBUNBOUND_WORKER_H
+#define LIBUNBOUND_WORKER_H
+#include "util/data/packed_rrset.h"
+struct ub_ctx;
+struct ub_result;
+struct module_env;
+struct comm_base;
+struct outside_network;
+struct ub_randstate;
+struct ctx_query;
+struct outbound_entry;
+struct module_qstate;
+struct comm_point;
+struct comm_reply;
+struct regional;
+struct tube;
+
+/**
+ * The library-worker status structure
+ * Internal to the worker.
+ */
+struct libworker {
+ /** every worker has a unique thread_num. (first in struct) */
+ int thread_num;
+ /** context we are operating under */
+ struct ub_ctx* ctx;
+
+ /** is this the bg worker? */
+ int is_bg;
+ /** is this a bg worker that is threaded (not forked)? */
+ int is_bg_thread;
+
+ /** copy of the module environment with worker local entries. */
+ struct module_env* env;
+ /** the event base this worker works with */
+ struct comm_base* base;
+ /** the backside outside network interface to the auth servers */
+ struct outside_network* back;
+ /** random() table for this worker. */
+ struct ub_randstate* rndstate;
+ /** sslcontext for SSL wrapped DNS over TCP queries */
+ void* sslctx;
+};
+
+/**
+ * Create a background worker
+ * @param ctx: is updated with pid/tid of the background worker.
+ * a new allocation cache is obtained from ctx. It contains the
+ * threadnumber and unique id for further (shared) cache insertions.
+ * @return 0 if OK, else error.
+ * Further communication is done via the pipes in ctx.
+ */
+int libworker_bg(struct ub_ctx* ctx);
+
+/**
+ * Create a foreground worker.
+ * This worker will join the threadpool of resolver threads.
+ * It exits when the query answer has been obtained (or error).
+ * This routine blocks until the worker is finished.
+ * @param ctx: new allocation cache obtained and returned to it.
+ * @param q: query (result is stored in here).
+ * @return 0 if finished OK, else error.
+ */
+int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q);
+
+/** cleanup the cache to remove all rrset IDs from it, arg is libworker */
+void libworker_alloc_cleanup(void* arg);
+
+/**
+ * Worker service routine to send serviced queries to authoritative servers.
+ * @param qname: query name. (host order)
+ * @param qnamelen: length in bytes of qname, including trailing 0.
+ * @param qtype: query type. (host order)
+ * @param qclass: query class. (host order)
+ * @param flags: host order flags word, with opcode and CD bit.
+ * @param dnssec: if set, EDNS record will have DO bit set.
+ * @param want_dnssec: signatures needed.
+ * @param addr: where to.
+ * @param addrlen: length of addr.
+ * @param zone: delegation point name.
+ * @param zonelen: length of zone name wireformat dname.
+ * @param q: wich query state to reactivate upon return.
+ * @return: false on failure (memory or socket related). no query was
+ * sent.
+ */
+struct outbound_entry* libworker_send_query(uint8_t* qname, size_t qnamelen,
+ uint16_t qtype, uint16_t qclass, uint16_t flags, int dnssec,
+ int want_dnssec, struct sockaddr_storage* addr, socklen_t addrlen,
+ uint8_t* zone, size_t zonelen, struct module_qstate* q);
+
+/** process incoming replies from the network */
+int libworker_handle_reply(struct comm_point* c, void* arg, int error,
+ struct comm_reply* reply_info);
+
+/** process incoming serviced query replies from the network */
+int libworker_handle_service_reply(struct comm_point* c, void* arg, int error,
+ struct comm_reply* reply_info);
+
+/** handle control command coming into server */
+void libworker_handle_control_cmd(struct tube* tube, uint8_t* msg, size_t len,
+ int err, void* arg);
+
+/** handle opportunity to write result back */
+void libworker_handle_result_write(struct tube* tube, uint8_t* msg, size_t len,
+ int err, void* arg);
+
+/** mesh callback with fg results */
+void libworker_fg_done_cb(void* arg, int rcode, ldns_buffer* buf,
+ enum sec_status s, char* why_bogus);
+
+/** mesh callback with bg results */
+void libworker_bg_done_cb(void* arg, int rcode, ldns_buffer* buf,
+ enum sec_status s, char* why_bogus);
+
+/**
+ * fill result from parsed message, on error fills servfail
+ * @param res: is clear at start, filled in at end.
+ * @param buf: contains DNS message.
+ * @param temp: temporary buffer for parse.
+ * @param msg_security: security status of the DNS message.
+ * On error, the res may contain a different status
+ * (out of memory is not secure, not bogus).
+ */
+void libworker_enter_result(struct ub_result* res, ldns_buffer* buf,
+ struct regional* temp, enum sec_status msg_security);
+
+#endif /* LIBUNBOUND_WORKER_H */
diff --git a/usr.sbin/unbound/libunbound/ubsyms.def b/usr.sbin/unbound/libunbound/ubsyms.def
new file mode 100644
index 00000000000..7e3fdd1e25e
--- /dev/null
+++ b/usr.sbin/unbound/libunbound/ubsyms.def
@@ -0,0 +1,29 @@
+ub_ctx_create
+ub_ctx_delete
+ub_ctx_get_option
+ub_ctx_set_option
+ub_ctx_config
+ub_ctx_set_fwd
+ub_ctx_resolvconf
+ub_ctx_hosts
+ub_ctx_add_ta
+ub_ctx_add_ta_file
+ub_ctx_trustedkeys
+ub_ctx_debugout
+ub_ctx_debuglevel
+ub_ctx_async
+ub_poll
+ub_wait
+ub_fd
+ub_process
+ub_resolve
+ub_resolve_async
+ub_cancel
+ub_resolve_free
+ub_strerror
+ub_ctx_print_local_zones
+ub_ctx_zone_add
+ub_ctx_zone_remove
+ub_ctx_data_add
+ub_ctx_data_remove
+ub_version
diff --git a/usr.sbin/unbound/libunbound/unbound.h b/usr.sbin/unbound/libunbound/unbound.h
new file mode 100644
index 00000000000..085f9f53415
--- /dev/null
+++ b/usr.sbin/unbound/libunbound/unbound.h
@@ -0,0 +1,556 @@
+/*
+ * unbound.h - unbound validating resolver public API
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains functions to resolve DNS queries and
+ * validate the answers. Synchonously and asynchronously.
+ *
+ * Several ways to use this interface from an application wishing
+ * to perform (validated) DNS lookups.
+ *
+ * All start with
+ * ctx = ub_ctx_create();
+ * err = ub_ctx_add_ta(ctx, "...");
+ * err = ub_ctx_add_ta(ctx, "...");
+ * ... some lookups
+ * ... call ub_ctx_delete(ctx); when you want to stop.
+ *
+ * Application not threaded. Blocking.
+ * int err = ub_resolve(ctx, "www.example.com", ...
+ * if(err) fprintf(stderr, "lookup error: %s\n", ub_strerror(err));
+ * ... use the answer
+ *
+ * Application not threaded. Non-blocking ('asynchronous').
+ * err = ub_resolve_async(ctx, "www.example.com", ... my_callback);
+ * ... application resumes processing ...
+ * ... and when either ub_poll(ctx) is true
+ * ... or when the file descriptor ub_fd(ctx) is readable,
+ * ... or whenever, the app calls ...
+ * ub_process(ctx);
+ * ... if no result is ready, the app resumes processing above,
+ * ... or process() calls my_callback() with results.
+ *
+ * ... if the application has nothing more to do, wait for answer
+ * ub_wait(ctx);
+ *
+ * Application threaded. Blocking.
+ * Blocking, same as above. The current thread does the work.
+ * Multiple threads can use the *same context*, each does work and uses
+ * shared cache data from the context.
+ *
+ * Application threaded. Non-blocking ('asynchronous').
+ * ... setup threaded-asynchronous config option
+ * err = ub_ctx_async(ctx, 1);
+ * ... same as async for non-threaded
+ * ... the callbacks are called in the thread that calls process(ctx)
+ *
+ * If no threading is compiled in, the above async example uses fork(2) to
+ * create a process to perform the work. The forked process exits when the
+ * calling process exits, or ctx_delete() is called.
+ * Otherwise, for asynchronous with threading, a worker thread is created.
+ *
+ * The blocking calls use shared ctx-cache when threaded. Thus
+ * ub_resolve() and ub_resolve_async() && ub_wait() are
+ * not the same. The first makes the current thread do the work, setting
+ * up buffers, etc, to perform the work (but using shared cache data).
+ * The second calls another worker thread (or process) to perform the work.
+ * And no buffers need to be set up, but a context-switch happens.
+ */
+#ifndef _UB_UNBOUND_H
+#define _UB_UNBOUND_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * The validation context is created to hold the resolver status,
+ * validation keys and a small cache (containing messages, rrsets,
+ * roundtrip times, trusted keys, lameness information).
+ *
+ * Its contents are internally defined.
+ */
+struct ub_ctx;
+
+/**
+ * The validation and resolution results.
+ * Allocated by the resolver, and need to be freed by the application
+ * with ub_resolve_free().
+ */
+struct ub_result {
+ /** The original question, name text string. */
+ char* qname;
+ /** the type asked for */
+ int qtype;
+ /** the class asked for */
+ int qclass;
+
+ /**
+ * a list of network order DNS rdata items, terminated with a
+ * NULL pointer, so that data[0] is the first result entry,
+ * data[1] the second, and the last entry is NULL.
+ * If there was no data, data[0] is NULL.
+ */
+ char** data;
+
+ /** the length in bytes of the data items, len[i] for data[i] */
+ int* len;
+
+ /**
+ * canonical name for the result (the final cname).
+ * zero terminated string.
+ * May be NULL if no canonical name exists.
+ */
+ char* canonname;
+
+ /**
+ * DNS RCODE for the result. May contain additional error code if
+ * there was no data due to an error. 0 (NOERROR) if okay.
+ */
+ int rcode;
+
+ /**
+ * The DNS answer packet. Network formatted. Can contain DNSSEC types.
+ */
+ void* answer_packet;
+ /** length of the answer packet in octets. */
+ int answer_len;
+
+ /**
+ * If there is any data, this is true.
+ * If false, there was no data (nxdomain may be true, rcode can be set).
+ */
+ int havedata;
+
+ /**
+ * If there was no data, and the domain did not exist, this is true.
+ * If it is false, and there was no data, then the domain name
+ * is purported to exist, but the requested data type is not available.
+ */
+ int nxdomain;
+
+ /**
+ * True, if the result is validated securely.
+ * False, if validation failed or domain queried has no security info.
+ *
+ * It is possible to get a result with no data (havedata is false),
+ * and secure is true. This means that the non-existance of the data
+ * was cryptographically proven (with signatures).
+ */
+ int secure;
+
+ /**
+ * If the result was not secure (secure==0), and this result is due
+ * to a security failure, bogus is true.
+ * This means the data has been actively tampered with, signatures
+ * failed, expected signatures were not present, timestamps on
+ * signatures were out of date and so on.
+ *
+ * If !secure and !bogus, this can happen if the data is not secure
+ * because security is disabled for that domain name.
+ * This means the data is from a domain where data is not signed.
+ */
+ int bogus;
+
+ /**
+ * If the result is bogus this contains a string (zero terminated)
+ * that describes the failure. There may be other errors as well
+ * as the one described, the description may not be perfectly accurate.
+ * Is NULL if the result is not bogus.
+ */
+ char* why_bogus;
+};
+
+/**
+ * Callback for results of async queries.
+ * The readable function definition looks like:
+ * void my_callback(void* my_arg, int err, struct ub_result* result);
+ * It is called with
+ * void* my_arg: your pointer to a (struct of) data of your choice,
+ * or NULL.
+ * int err: if 0 all is OK, otherwise an error occured and no results
+ * are forthcoming.
+ * struct result: pointer to more detailed result structure.
+ * This structure is allocated on the heap and needs to be
+ * freed with ub_resolve_free(result);
+ */
+typedef void (*ub_callback_t)(void*, int, struct ub_result*);
+
+/**
+ * Create a resolving and validation context.
+ * The information from /etc/resolv.conf and /etc/hosts is not utilised by
+ * default. Use ub_ctx_resolvconf and ub_ctx_hosts to read them.
+ * @return a new context. default initialisation.
+ * returns NULL on error.
+ */
+struct ub_ctx* ub_ctx_create(void);
+
+/**
+ * Destroy a validation context and free all its resources.
+ * Outstanding async queries are killed and callbacks are not called for them.
+ * @param ctx: context to delete.
+ */
+void ub_ctx_delete(struct ub_ctx* ctx);
+
+/**
+ * Set an option for the context.
+ * @param ctx: context.
+ * @param opt: option name from the unbound.conf config file format.
+ * (not all settings applicable). The name includes the trailing ':'
+ * for example ub_ctx_set_option(ctx, "logfile:", "mylog.txt");
+ * This is a power-users interface that lets you specify all sorts
+ * of options.
+ * For some specific options, such as adding trust anchors, special
+ * routines exist.
+ * @param val: value of the option.
+ * @return: 0 if OK, else error.
+ */
+int ub_ctx_set_option(struct ub_ctx* ctx, char* opt, char* val);
+
+/**
+ * Get an option from the context.
+ * @param ctx: context.
+ * @param opt: option name from the unbound.conf config file format.
+ * (not all settings applicable). The name excludes the trailing ':'
+ * for example ub_ctx_get_option(ctx, "logfile", &result);
+ * This is a power-users interface that lets you specify all sorts
+ * of options.
+ * @param str: the string is malloced and returned here. NULL on error.
+ * The caller must free() the string. In cases with multiple
+ * entries (auto-trust-anchor-file), a newline delimited list is
+ * returned in the string.
+ * @return 0 if OK else an error code (malloc failure, syntax error).
+ */
+int ub_ctx_get_option(struct ub_ctx* ctx, char* opt, char** str);
+
+/**
+ * setup configuration for the given context.
+ * @param ctx: context.
+ * @param fname: unbound config file (not all settings applicable).
+ * This is a power-users interface that lets you specify all sorts
+ * of options.
+ * For some specific options, such as adding trust anchors, special
+ * routines exist.
+ * @return: 0 if OK, else error.
+ */
+int ub_ctx_config(struct ub_ctx* ctx, char* fname);
+
+/**
+ * Set machine to forward DNS queries to, the caching resolver to use.
+ * IP4 or IP6 address. Forwards all DNS requests to that machine, which
+ * is expected to run a recursive resolver. If the proxy is not
+ * DNSSEC-capable, validation may fail. Can be called several times, in
+ * that case the addresses are used as backup servers.
+ *
+ * To read the list of nameservers from /etc/resolv.conf (from DHCP or so),
+ * use the call ub_ctx_resolvconf.
+ *
+ * @param ctx: context.
+ * At this time it is only possible to set configuration before the
+ * first resolve is done.
+ * @param addr: address, IP4 or IP6 in string format.
+ * If the addr is NULL, forwarding is disabled.
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_set_fwd(struct ub_ctx* ctx, char* addr);
+
+/**
+ * Read list of nameservers to use from the filename given.
+ * Usually "/etc/resolv.conf". Uses those nameservers as caching proxies.
+ * If they do not support DNSSEC, validation may fail.
+ *
+ * Only nameservers are picked up, the searchdomain, ndots and other
+ * settings from resolv.conf(5) are ignored.
+ *
+ * @param ctx: context.
+ * At this time it is only possible to set configuration before the
+ * first resolve is done.
+ * @param fname: file name string. If NULL "/etc/resolv.conf" is used.
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_resolvconf(struct ub_ctx* ctx, char* fname);
+
+/**
+ * Read list of hosts from the filename given.
+ * Usually "/etc/hosts".
+ * These addresses are not flagged as DNSSEC secure when queried for.
+ *
+ * @param ctx: context.
+ * At this time it is only possible to set configuration before the
+ * first resolve is done.
+ * @param fname: file name string. If NULL "/etc/hosts" is used.
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_hosts(struct ub_ctx* ctx, char* fname);
+
+/**
+ * Add a trust anchor to the given context.
+ * The trust anchor is a string, on one line, that holds a valid DNSKEY or
+ * DS RR.
+ * @param ctx: context.
+ * At this time it is only possible to add trusted keys before the
+ * first resolve is done.
+ * @param ta: string, with zone-format RR on one line.
+ * [domainname] [TTL optional] [type] [class optional] [rdata contents]
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_add_ta(struct ub_ctx* ctx, char* ta);
+
+/**
+ * Add trust anchors to the given context.
+ * Pass name of a file with DS and DNSKEY records (like from dig or drill).
+ * @param ctx: context.
+ * At this time it is only possible to add trusted keys before the
+ * first resolve is done.
+ * @param fname: filename of file with keyfile with trust anchors.
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_add_ta_file(struct ub_ctx* ctx, char* fname);
+
+/**
+ * Add trust anchors to the given context.
+ * Pass the name of a bind-style config file with trusted-keys{}.
+ * @param ctx: context.
+ * At this time it is only possible to add trusted keys before the
+ * first resolve is done.
+ * @param fname: filename of file with bind-style config entries with trust
+ * anchors.
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_trustedkeys(struct ub_ctx* ctx, char* fname);
+
+/**
+ * Set debug output (and error output) to the specified stream.
+ * Pass NULL to disable. Default is stderr.
+ * @param ctx: context.
+ * @param out: FILE* out file stream to log to.
+ * Type void* to avoid stdio dependency of this header file.
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_debugout(struct ub_ctx* ctx, void* out);
+
+/**
+ * Set debug verbosity for the context
+ * Output is directed to stderr.
+ * @param ctx: context.
+ * @param d: debug level, 0 is off, 1 is very minimal, 2 is detailed,
+ * and 3 is lots.
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_debuglevel(struct ub_ctx* ctx, int d);
+
+/**
+ * Set a context behaviour for asynchronous action.
+ * @param ctx: context.
+ * @param dothread: if true, enables threading and a call to resolve_async()
+ * creates a thread to handle work in the background.
+ * If false, a process is forked to handle work in the background.
+ * Changes to this setting after async() calls have been made have
+ * no effect (delete and re-create the context to change).
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_async(struct ub_ctx* ctx, int dothread);
+
+/**
+ * Poll a context to see if it has any new results
+ * Do not poll in a loop, instead extract the fd below to poll for readiness,
+ * and then check, or wait using the wait routine.
+ * @param ctx: context.
+ * @return: 0 if nothing to read, or nonzero if a result is available.
+ * If nonzero, call ctx_process() to do callbacks.
+ */
+int ub_poll(struct ub_ctx* ctx);
+
+/**
+ * Wait for a context to finish with results. Calls ub_process() after
+ * the wait for you. After the wait, there are no more outstanding
+ * asynchronous queries.
+ * @param ctx: context.
+ * @return: 0 if OK, else error.
+ */
+int ub_wait(struct ub_ctx* ctx);
+
+/**
+ * Get file descriptor. Wait for it to become readable, at this point
+ * answers are returned from the asynchronous validating resolver.
+ * Then call the ub_process to continue processing.
+ * This routine works immediately after context creation, the fd
+ * does not change.
+ * @param ctx: context.
+ * @return: -1 on error, or file descriptor to use select(2) with.
+ */
+int ub_fd(struct ub_ctx* ctx);
+
+/**
+ * Call this routine to continue processing results from the validating
+ * resolver (when the fd becomes readable).
+ * Will perform necessary callbacks.
+ * @param ctx: context
+ * @return: 0 if OK, else error.
+ */
+int ub_process(struct ub_ctx* ctx);
+
+/**
+ * Perform resolution and validation of the target name.
+ * @param ctx: context.
+ * The context is finalized, and can no longer accept config changes.
+ * @param name: domain name in text format (a zero terminated text string).
+ * @param rrtype: type of RR in host order, 1 is A (address).
+ * @param rrclass: class of RR in host order, 1 is IN (for internet).
+ * @param result: the result data is returned in a newly allocated result
+ * structure. May be NULL on return, return value is set to an error
+ * in that case (out of memory).
+ * @return 0 if OK, else error.
+ */
+int ub_resolve(struct ub_ctx* ctx, char* name, int rrtype,
+ int rrclass, struct ub_result** result);
+
+/**
+ * Perform resolution and validation of the target name.
+ * Asynchronous, after a while, the callback will be called with your
+ * data and the result.
+ * @param ctx: context.
+ * If no thread or process has been created yet to perform the
+ * work in the background, it is created now.
+ * The context is finalized, and can no longer accept config changes.
+ * @param name: domain name in text format (a string).
+ * @param rrtype: type of RR in host order, 1 is A.
+ * @param rrclass: class of RR in host order, 1 is IN (for internet).
+ * @param mydata: this data is your own data (you can pass NULL),
+ * and is passed on to the callback function.
+ * @param callback: this is called on completion of the resolution.
+ * It is called as:
+ * void callback(void* mydata, int err, struct ub_result* result)
+ * with mydata: the same as passed here, you may pass NULL,
+ * with err: is 0 when a result has been found.
+ * with result: a newly allocated result structure.
+ * The result may be NULL, in that case err is set.
+ *
+ * If an error happens during processing, your callback will be called
+ * with error set to a nonzero value (and result==NULL).
+ * @param async_id: if you pass a non-NULL value, an identifier number is
+ * returned for the query as it is in progress. It can be used to
+ * cancel the query.
+ * @return 0 if OK, else error.
+ */
+int ub_resolve_async(struct ub_ctx* ctx, char* name, int rrtype,
+ int rrclass, void* mydata, ub_callback_t callback, int* async_id);
+
+/**
+ * Cancel an async query in progress.
+ * Its callback will not be called.
+ *
+ * @param ctx: context.
+ * @param async_id: which query to cancel.
+ * @return 0 if OK, else error.
+ * This routine can return an error if the async_id passed does not exist
+ * or has already been delivered. If another thread is processing results
+ * at the same time, the result may be delivered at the same time and the
+ * cancel fails with an error. Also the cancel can fail due to a system
+ * error, no memory or socket failures.
+ */
+int ub_cancel(struct ub_ctx* ctx, int async_id);
+
+/**
+ * Free storage associated with a result structure.
+ * @param result: to free
+ */
+void ub_resolve_free(struct ub_result* result);
+
+/**
+ * Convert error value to a human readable string.
+ * @param err: error code from one of the ub_val* functions.
+ * @return pointer to constant text string, zero terminated.
+ */
+const char* ub_strerror(int err);
+
+/**
+ * Debug routine. Print the local zone information to debug output.
+ * @param ctx: context. Is finalized by the routine.
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_print_local_zones(struct ub_ctx* ctx);
+
+/**
+ * Add a new zone with the zonetype to the local authority info of the
+ * library.
+ * @param ctx: context. Is finalized by the routine.
+ * @param zone_name: name of the zone in text, "example.com"
+ * If it already exists, the type is updated.
+ * @param zone_type: type of the zone (like for unbound.conf) in text.
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_zone_add(struct ub_ctx* ctx, char *zone_name, char *zone_type);
+
+/**
+ * Remove zone from local authority info of the library.
+ * @param ctx: context. Is finalized by the routine.
+ * @param zone_name: name of the zone in text, "example.com"
+ * If it does not exist, nothing happens.
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_zone_remove(struct ub_ctx* ctx, char *zone_name);
+
+/**
+ * Add localdata to the library local authority info.
+ * Similar to local-data config statement.
+ * @param ctx: context. Is finalized by the routine.
+ * @param data: the resource record in text format, for example
+ * "www.example.com IN A 127.0.0.1"
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_data_add(struct ub_ctx* ctx, char *data);
+
+/**
+ * Remove localdata from the library local authority info.
+ * @param ctx: context. Is finalized by the routine.
+ * @param data: the name to delete all data from, like "www.example.com".
+ * @return 0 if OK, else error.
+ */
+int ub_ctx_data_remove(struct ub_ctx* ctx, char *data);
+
+/**
+ * Get a version string from the libunbound implementation.
+ * @return a static constant string with the version number.
+ */
+const char* ub_version(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _UB_UNBOUND_H */