aboutsummaryrefslogtreecommitdiff
path: root/contrib/unbound/iterator/iterator.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/unbound/iterator/iterator.c')
-rw-r--r--contrib/unbound/iterator/iterator.c295
1 files changed, 203 insertions, 92 deletions
diff --git a/contrib/unbound/iterator/iterator.c b/contrib/unbound/iterator/iterator.c
index e9fea544aa69..5732a414857e 100644
--- a/contrib/unbound/iterator/iterator.c
+++ b/contrib/unbound/iterator/iterator.c
@@ -52,6 +52,7 @@
#include "iterator/iter_priv.h"
#include "validator/val_neg.h"
#include "services/cache/dns.h"
+#include "services/cache/rrset.h"
#include "services/cache/infra.h"
#include "services/authzone.h"
#include "util/module.h"
@@ -678,30 +679,40 @@ errinf_reply(struct module_qstate* qstate, struct iter_qstate* iq)
/** see if last resort is possible - does config allow queries to parent */
static int
-can_have_last_resort(struct module_env* env, uint8_t* nm, size_t nmlen,
- uint16_t qclass, struct delegpt** retdp)
+can_have_last_resort(struct module_env* env, uint8_t* nm, size_t ATTR_UNUSED(nmlen),
+ uint16_t qclass, int* have_dp, struct delegpt** retdp,
+ struct regional* region)
{
- struct delegpt* fwddp;
- struct iter_hints_stub* stub;
- int labs = dname_count_labels(nm);
+ struct delegpt* dp = NULL;
+ int nolock = 0;
/* do not process a last resort (the parent side) if a stub
* or forward is configured, because we do not want to go 'above'
* the configured servers */
- if(!dname_is_root(nm) && (stub = (struct iter_hints_stub*)
- name_tree_find(&env->hints->tree, nm, nmlen, labs, qclass)) &&
+ if(!dname_is_root(nm) &&
+ (dp = hints_find(env->hints, nm, qclass, nolock)) &&
/* has_parent side is turned off for stub_first, where we
* are allowed to go to the parent */
- stub->dp->has_parent_side_NS) {
- if(retdp) *retdp = stub->dp;
+ dp->has_parent_side_NS) {
+ if(retdp) *retdp = delegpt_copy(dp, region);
+ lock_rw_unlock(&env->hints->lock);
+ if(have_dp) *have_dp = 1;
return 0;
}
- if((fwddp = forwards_find(env->fwds, nm, qclass)) &&
+ if(dp) {
+ lock_rw_unlock(&env->hints->lock);
+ dp = NULL;
+ }
+ if((dp = forwards_find(env->fwds, nm, qclass, nolock)) &&
/* has_parent_side is turned off for forward_first, where
* we are allowed to go to the parent */
- fwddp->has_parent_side_NS) {
- if(retdp) *retdp = fwddp;
+ dp->has_parent_side_NS) {
+ if(retdp) *retdp = delegpt_copy(dp, region);
+ lock_rw_unlock(&env->fwds->lock);
+ if(have_dp) *have_dp = 1;
return 0;
}
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(dp) { lock_rw_unlock(&env->fwds->lock); }
return 1;
}
@@ -877,10 +888,11 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
{
struct delegpt* dp;
struct module_qstate* subq;
+ int nolock = 0;
verbose(VERB_DETAIL, "priming . %s NS",
sldns_lookup_by_id(sldns_rr_classes, (int)qclass)?
sldns_lookup_by_id(sldns_rr_classes, (int)qclass)->name:"??");
- dp = hints_lookup_root(qstate->env->hints, qclass);
+ dp = hints_find_root(qstate->env->hints, qclass, nolock);
if(!dp) {
verbose(VERB_ALGO, "Cannot prime due to lack of hints");
return 0;
@@ -890,6 +902,7 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
if(!generate_sub_request((uint8_t*)"\000", 1, LDNS_RR_TYPE_NS,
qclass, qstate, id, iq, QUERYTARGETS_STATE, PRIME_RESP_STATE,
&subq, 0, 0)) {
+ lock_rw_unlock(&qstate->env->hints->lock);
verbose(VERB_ALGO, "could not prime root");
return 0;
}
@@ -900,6 +913,7 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
* copy dp, it is now part of the root prime query.
* dp was part of in the fixed hints structure. */
subiq->dp = delegpt_copy(dp, subq->region);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(!subiq->dp) {
log_err("out of memory priming root, copydp");
fptr_ok(fptr_whitelist_modenv_kill_sub(
@@ -911,6 +925,8 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
subiq->num_target_queries = 0;
subiq->dnssec_expected = iter_indicates_dnssec(
qstate->env, subiq->dp, NULL, subq->qinfo.qclass);
+ } else {
+ lock_rw_unlock(&qstate->env->hints->lock);
}
/* this module stops, our submodule starts, and does the query. */
@@ -941,18 +957,21 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
struct iter_hints_stub* stub;
struct delegpt* stub_dp;
struct module_qstate* subq;
+ int nolock = 0;
if(!qname) return 0;
- stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp);
+ stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp,
+ nolock);
/* The stub (if there is one) does not need priming. */
- if(!stub)
- return 0;
+ if(!stub) return 0;
stub_dp = stub->dp;
/* if we have an auth_zone dp, and stub is equal, don't prime stub
* yet, unless we want to fallback and avoid the auth_zone */
if(!iq->auth_zone_avoid && iq->dp && iq->dp->auth_dp &&
- query_dname_compare(iq->dp->name, stub_dp->name) == 0)
+ query_dname_compare(iq->dp->name, stub_dp->name) == 0) {
+ lock_rw_unlock(&qstate->env->hints->lock);
return 0;
+ }
/* is it a noprime stub (always use) */
if(stub->noprime) {
@@ -961,13 +980,14 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
/* copy the dp out of the fixed hints structure, so that
* it can be changed when servicing this query */
iq->dp = delegpt_copy(stub_dp, qstate->region);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(!iq->dp) {
log_err("out of memory priming stub");
errinf(qstate, "malloc failure, priming stub");
(void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
return 1; /* return 1 to make module stop, with error */
}
- log_nametypeclass(VERB_DETAIL, "use stub", stub_dp->name,
+ log_nametypeclass(VERB_DETAIL, "use stub", iq->dp->name,
LDNS_RR_TYPE_NS, qclass);
return r;
}
@@ -981,6 +1001,7 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
if(!generate_sub_request(stub_dp->name, stub_dp->namelen,
LDNS_RR_TYPE_NS, qclass, qstate, id, iq,
QUERYTARGETS_STATE, PRIME_RESP_STATE, &subq, 0, 0)) {
+ lock_rw_unlock(&qstate->env->hints->lock);
verbose(VERB_ALGO, "could not prime stub");
errinf(qstate, "could not generate lookup for stub prime");
(void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
@@ -993,6 +1014,7 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
/* Set the initial delegation point to the hint. */
/* make copy to avoid use of stub dp by different qs/threads */
subiq->dp = delegpt_copy(stub_dp, subq->region);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(!subiq->dp) {
log_err("out of memory priming stub, copydp");
fptr_ok(fptr_whitelist_modenv_kill_sub(
@@ -1009,6 +1031,8 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
subiq->wait_priming_stub = 1;
subiq->dnssec_expected = iter_indicates_dnssec(
qstate->env, subiq->dp, NULL, subq->qinfo.qclass);
+ } else {
+ lock_rw_unlock(&qstate->env->hints->lock);
}
/* this module stops, our submodule starts, and does the query. */
@@ -1181,7 +1205,7 @@ generate_ns_check(struct module_qstate* qstate, struct iter_qstate* iq, int id)
if(iq->depth == ie->max_dependency_depth)
return;
if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen,
- iq->qchase.qclass, NULL))
+ iq->qchase.qclass, NULL, NULL, NULL))
return;
/* is this query the same as the nscheck? */
if(qstate->qinfo.qtype == LDNS_RR_TYPE_NS &&
@@ -1294,6 +1318,7 @@ forward_request(struct module_qstate* qstate, struct iter_qstate* iq)
struct delegpt* dp;
uint8_t* delname = iq->qchase.qname;
size_t delnamelen = iq->qchase.qname_len;
+ int nolock = 0;
if(iq->refetch_glue && iq->dp) {
delname = iq->dp->name;
delnamelen = iq->dp->namelen;
@@ -1302,12 +1327,13 @@ forward_request(struct module_qstate* qstate, struct iter_qstate* iq)
if( (iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue)
&& !dname_is_root(iq->qchase.qname))
dname_remove_label(&delname, &delnamelen);
- dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass);
- if(!dp)
- return 0;
+ dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass,
+ nolock);
+ if(!dp) return 0;
/* send recursion desired to forward addr */
iq->chase_flags |= BIT_RD;
iq->dp = delegpt_copy(dp, qstate->region);
+ lock_rw_unlock(&qstate->env->fwds->lock);
/* iq->dp checked by caller */
verbose(VERB_ALGO, "forwarding request");
return 1;
@@ -1335,6 +1361,7 @@ static int
processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
struct iter_env* ie, int id)
{
+ uint8_t dpname_storage[LDNS_MAX_DOMAINLEN+1];
uint8_t* delname, *dpname=NULL;
size_t delnamelen, dpnamelen=0;
struct dns_msg* msg = NULL;
@@ -1381,7 +1408,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
if (iq->refetch_glue &&
iq->dp &&
!can_have_last_resort(qstate->env, iq->dp->name,
- iq->dp->namelen, iq->qchase.qclass, NULL)) {
+ iq->dp->namelen, iq->qchase.qclass, NULL, NULL, NULL)) {
iq->refetch_glue = 0;
}
@@ -1389,8 +1416,61 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
/* This either results in a query restart (CNAME cache response), a
* terminating response (ANSWER), or a cache miss (null). */
-
- if (iter_stub_fwd_no_cache(qstate, &iq->qchase, &dpname, &dpnamelen)) {
+
+ /* Check RPZ for override */
+ if(qstate->env->auth_zones) {
+ /* apply rpz qname triggers, like after cname */
+ struct dns_msg* forged_response =
+ rpz_callback_from_iterator_cname(qstate, iq);
+ if(forged_response) {
+ uint8_t* sname = 0;
+ size_t slen = 0;
+ int count = 0;
+ while(forged_response && reply_find_rrset_section_an(
+ forged_response->rep, iq->qchase.qname,
+ iq->qchase.qname_len, LDNS_RR_TYPE_CNAME,
+ iq->qchase.qclass) &&
+ iq->qchase.qtype != LDNS_RR_TYPE_CNAME &&
+ count++ < ie->max_query_restarts) {
+ /* another cname to follow */
+ if(!handle_cname_response(qstate, iq, forged_response,
+ &sname, &slen)) {
+ errinf(qstate, "malloc failure, CNAME info");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ iq->qchase.qname = sname;
+ iq->qchase.qname_len = slen;
+ forged_response =
+ rpz_callback_from_iterator_cname(qstate, iq);
+ }
+ if(forged_response != NULL) {
+ qstate->ext_state[id] = module_finished;
+ qstate->return_rcode = LDNS_RCODE_NOERROR;
+ qstate->return_msg = forged_response;
+ iq->response = forged_response;
+ next_state(iq, FINISHED_STATE);
+ if(!iter_prepend(iq, qstate->return_msg, qstate->region)) {
+ log_err("rpz: after cached cname, prepend rrsets: out of memory");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ qstate->return_msg->qinfo = qstate->qinfo;
+ return 0;
+ }
+ /* Follow the CNAME response */
+ iq->dp = NULL;
+ iq->refetch_glue = 0;
+ iq->query_restart_count++;
+ iq->sent_count = 0;
+ iq->dp_target_count = 0;
+ sock_list_insert(&qstate->reply_origin, NULL, 0, qstate->region);
+ if(qstate->env->cfg->qname_minimisation)
+ iq->minimisation_state = INIT_MINIMISE_STATE;
+ return next_state(iq, INIT_REQUEST_STATE);
+ }
+ }
+
+ if (iter_stub_fwd_no_cache(qstate, &iq->qchase, &dpname, &dpnamelen,
+ dpname_storage, sizeof(dpname_storage))) {
/* Asked to not query cache. */
verbose(VERB_ALGO, "no-cache set, going to the network");
qstate->no_cache_lookup = 1;
@@ -1449,39 +1529,6 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
iq->qchase.qname = sname;
iq->qchase.qname_len = slen;
- if(qstate->env->auth_zones) {
- /* apply rpz qname triggers after cname */
- struct dns_msg* forged_response =
- rpz_callback_from_iterator_cname(qstate, iq);
- while(forged_response && reply_find_rrset_section_an(
- forged_response->rep, iq->qchase.qname,
- iq->qchase.qname_len, LDNS_RR_TYPE_CNAME,
- iq->qchase.qclass)) {
- /* another cname to follow */
- if(!handle_cname_response(qstate, iq, forged_response,
- &sname, &slen)) {
- errinf(qstate, "malloc failure, CNAME info");
- return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
- }
- iq->qchase.qname = sname;
- iq->qchase.qname_len = slen;
- forged_response =
- rpz_callback_from_iterator_cname(qstate, iq);
- }
- if(forged_response != NULL) {
- qstate->ext_state[id] = module_finished;
- qstate->return_rcode = LDNS_RCODE_NOERROR;
- qstate->return_msg = forged_response;
- iq->response = forged_response;
- next_state(iq, FINISHED_STATE);
- if(!iter_prepend(iq, qstate->return_msg, qstate->region)) {
- log_err("rpz: after cached cname, prepend rrsets: out of memory");
- return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
- }
- qstate->return_msg->qinfo = qstate->qinfo;
- return 0;
- }
- }
/* This *is* a query restart, even if it is a cheap
* one. */
iq->dp = NULL;
@@ -1494,7 +1541,6 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
iq->minimisation_state = INIT_MINIMISE_STATE;
return next_state(iq, INIT_REQUEST_STATE);
}
-
/* if from cache, NULL, else insert 'cache IP' len=0 */
if(qstate->reply_origin)
sock_list_insert(&qstate->reply_origin, NULL, 0, qstate->region);
@@ -1555,7 +1601,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
if(iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue ||
(iq->qchase.qtype == LDNS_RR_TYPE_NS && qstate->prefetch_leeway
- && can_have_last_resort(qstate->env, delname, delnamelen, iq->qchase.qclass, NULL))) {
+ && can_have_last_resort(qstate->env, delname, delnamelen, iq->qchase.qclass, NULL, NULL, NULL))) {
/* remove first label from delname, root goes to hints,
* but only to fetch glue, not for qtype=DS. */
/* also when prefetching an NS record, fetch it again from
@@ -1584,6 +1630,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
* root priming situation. */
if(iq->dp == NULL) {
int r;
+ int nolock = 0;
/* if under auth zone, no prime needed */
if(!auth_zone_delegpt(qstate, iq, delname, delnamelen))
return error_response(qstate, id,
@@ -1597,12 +1644,13 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
break; /* got noprime-stub-zone, continue */
else if(r)
return 0; /* stub prime request made */
- if(forwards_lookup_root(qstate->env->fwds,
- iq->qchase.qclass)) {
+ if(forwards_lookup_root(qstate->env->fwds,
+ iq->qchase.qclass, nolock)) {
+ lock_rw_unlock(&qstate->env->fwds->lock);
/* forward zone root, no root prime needed */
/* fill in some dp - safety belt */
- iq->dp = hints_lookup_root(qstate->env->hints,
- iq->qchase.qclass);
+ iq->dp = hints_find_root(qstate->env->hints,
+ iq->qchase.qclass, nolock);
if(!iq->dp) {
log_err("internal error: no hints dp");
errinf(qstate, "no hints for this class");
@@ -1610,6 +1658,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
LDNS_RCODE_SERVFAIL);
}
iq->dp = delegpt_copy(iq->dp, qstate->region);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(!iq->dp) {
log_err("out of memory in safety belt");
errinf(qstate, "malloc failure, in safety belt");
@@ -1649,15 +1698,13 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
if(iter_dp_is_useless(&qstate->qinfo, qstate->query_flags,
iq->dp, ie->supports_ipv4, ie->supports_ipv6,
ie->use_nat64)) {
- struct delegpt* retdp = NULL;
- if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen, iq->qchase.qclass, &retdp)) {
- if(retdp) {
+ int have_dp = 0;
+ if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen, iq->qchase.qclass, &have_dp, &iq->dp, qstate->region)) {
+ if(have_dp) {
verbose(VERB_QUERY, "cache has stub "
"or fwd but no addresses, "
"fallback to config");
- iq->dp = delegpt_copy(retdp,
- qstate->region);
- if(!iq->dp) {
+ if(have_dp && !iq->dp) {
log_err("out of memory in "
"stub/fwd fallback");
errinf(qstate, "malloc failure, for fallback to config");
@@ -1677,10 +1724,11 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
if(dname_is_root(iq->dp->name)) {
/* use safety belt */
+ int nolock = 0;
verbose(VERB_QUERY, "Cache has root NS but "
"no addresses. Fallback to the safety belt.");
- iq->dp = hints_lookup_root(qstate->env->hints,
- iq->qchase.qclass);
+ iq->dp = hints_find_root(qstate->env->hints,
+ iq->qchase.qclass, nolock);
/* note deleg_msg is from previous lookup,
* but RD is on, so it is not used */
if(!iq->dp) {
@@ -1689,6 +1737,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
LDNS_RCODE_REFUSED);
}
iq->dp = delegpt_copy(iq->dp, qstate->region);
+ lock_rw_unlock(&qstate->env->hints->lock);
if(!iq->dp) {
log_err("out of memory in safety belt");
errinf(qstate, "malloc failure, in safety belt, for root");
@@ -1744,6 +1793,7 @@ processInitRequest2(struct module_qstate* qstate, struct iter_qstate* iq,
delnamelen = iq->qchase.qname_len;
if(iq->refetch_glue) {
struct iter_hints_stub* stub;
+ int nolock = 0;
if(!iq->dp) {
log_err("internal or malloc fail: no dp for refetch");
errinf(qstate, "malloc failure, no delegation info");
@@ -1753,12 +1803,14 @@ processInitRequest2(struct module_qstate* qstate, struct iter_qstate* iq,
* this is above stub without stub-first. */
stub = hints_lookup_stub(
qstate->env->hints, iq->qchase.qname, iq->qchase.qclass,
- iq->dp);
+ iq->dp, nolock);
if(!stub || !stub->dp->has_parent_side_NS ||
dname_subdomain_c(iq->dp->name, stub->dp->name)) {
delname = iq->dp->name;
delnamelen = iq->dp->namelen;
}
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(stub) { lock_rw_unlock(&qstate->env->hints->lock); }
}
if(iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue) {
if(!dname_is_root(delname))
@@ -2062,7 +2114,7 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
log_assert(iq->dp);
if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen,
- iq->qchase.qclass, NULL)) {
+ iq->qchase.qclass, NULL, NULL, NULL)) {
/* fail -- no more targets, no more hope of targets, no hope
* of a response. */
errinf(qstate, "all the configured stub or forward servers failed,");
@@ -2072,21 +2124,24 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
return error_response_cache(qstate, id, LDNS_RCODE_SERVFAIL);
}
if(!iq->dp->has_parent_side_NS && dname_is_root(iq->dp->name)) {
- struct delegpt* p = hints_lookup_root(qstate->env->hints,
- iq->qchase.qclass);
- if(p) {
+ struct delegpt* dp;
+ int nolock = 0;
+ dp = hints_find_root(qstate->env->hints,
+ iq->qchase.qclass, nolock);
+ if(dp) {
struct delegpt_addr* a;
iq->chase_flags &= ~BIT_RD; /* go to authorities */
- for(ns = p->nslist; ns; ns=ns->next) {
+ for(ns = dp->nslist; ns; ns=ns->next) {
(void)delegpt_add_ns(iq->dp, qstate->region,
ns->name, ns->lame, ns->tls_auth_name,
ns->port);
}
- for(a = p->target_list; a; a=a->next_target) {
+ for(a = dp->target_list; a; a=a->next_target) {
(void)delegpt_add_addr(iq->dp, qstate->region,
&a->addr, a->addrlen, a->bogus,
a->lame, a->tls_auth_name, -1, NULL);
}
+ lock_rw_unlock(&qstate->env->hints->lock);
}
iq->dp->has_parent_side_NS = 1;
} else if(!iq->dp->has_parent_side_NS) {
@@ -2164,7 +2219,7 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
if( ((ie->supports_ipv6 && !ns->done_pside6) ||
((ie->supports_ipv4 || ie->use_nat64) && !ns->done_pside4)) &&
!can_have_last_resort(qstate->env, ns->name, ns->namelen,
- iq->qchase.qclass, NULL)) {
+ iq->qchase.qclass, NULL, NULL, NULL)) {
log_nametypeclass(VERB_ALGO, "cannot pside lookup ns "
"because it is also a stub/forward,",
ns->name, LDNS_RR_TYPE_NS, iq->qchase.qclass);
@@ -2746,8 +2801,51 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
delegpt_add_unused_targets(iq->dp);
if(qstate->env->auth_zones) {
- /* apply rpz triggers at query time */
+ uint8_t* sname = NULL;
+ size_t snamelen = 0;
+ /* apply rpz triggers at query time; nameserver IP and dname */
+ struct dns_msg* forged_response_after_cname;
struct dns_msg* forged_response = rpz_callback_from_iterator_module(qstate, iq);
+ int count = 0;
+ while(forged_response && reply_find_rrset_section_an(
+ forged_response->rep, iq->qchase.qname,
+ iq->qchase.qname_len, LDNS_RR_TYPE_CNAME,
+ iq->qchase.qclass) &&
+ iq->qchase.qtype != LDNS_RR_TYPE_CNAME &&
+ count++ < ie->max_query_restarts) {
+ /* another cname to follow */
+ if(!handle_cname_response(qstate, iq, forged_response,
+ &sname, &snamelen)) {
+ errinf(qstate, "malloc failure, CNAME info");
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ iq->qchase.qname = sname;
+ iq->qchase.qname_len = snamelen;
+ forged_response_after_cname =
+ rpz_callback_from_iterator_cname(qstate, iq);
+ if(forged_response_after_cname) {
+ forged_response = forged_response_after_cname;
+ } else {
+ /* Follow the CNAME with a query restart */
+ iq->deleg_msg = NULL;
+ iq->dp = NULL;
+ iq->dsns_point = NULL;
+ iq->auth_zone_response = 0;
+ iq->refetch_glue = 0;
+ iq->query_restart_count++;
+ iq->sent_count = 0;
+ iq->dp_target_count = 0;
+ if(qstate->env->cfg->qname_minimisation)
+ iq->minimisation_state = INIT_MINIMISE_STATE;
+ outbound_list_clear(&iq->outlist);
+ iq->num_current_queries = 0;
+ fptr_ok(fptr_whitelist_modenv_detach_subs(
+ qstate->env->detach_subs));
+ (*qstate->env->detach_subs)(qstate);
+ iq->num_target_queries = 0;
+ return next_state(iq, INIT_REQUEST_STATE);
+ }
+ }
if(forged_response != NULL) {
qstate->ext_state[id] = module_finished;
qstate->return_rcode = LDNS_RCODE_NOERROR;
@@ -3082,7 +3180,8 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
/* DNAME to a subdomain loop; do not recurse */
type = RESPONSE_TYPE_ANSWER;
}
- } else if(type == RESPONSE_TYPE_CNAME &&
+ }
+ if(type == RESPONSE_TYPE_CNAME &&
iq->qchase.qtype == LDNS_RR_TYPE_CNAME &&
iq->minimisation_state == MINIMISE_STATE &&
query_dname_compare(iq->qchase.qname, iq->qinfo_out.qname) == 0) {
@@ -3193,6 +3292,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
}
return final_state(iq);
} else if(type == RESPONSE_TYPE_REFERRAL) {
+ struct delegpt* old_dp = NULL;
/* REFERRAL type responses get a reset of the
* delegation point, and back to the QUERYTARGETS_STATE. */
verbose(VERB_DETAIL, "query response was REFERRAL");
@@ -3244,6 +3344,8 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
/* Reset the event state, setting the current delegation
* point to the referral. */
iq->deleg_msg = iq->response;
+ /* Keep current delegation point for label comparison */
+ old_dp = iq->dp;
iq->dp = delegpt_from_message(iq->response, qstate->region);
if (qstate->env->cfg->qname_minimisation)
iq->minimisation_state = INIT_MINIMISE_STATE;
@@ -3251,6 +3353,20 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
errinf(qstate, "malloc failure, for delegation point");
return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
}
+ if(old_dp->namelabs + 1 < iq->dp->namelabs) {
+ /* We got a grandchild delegation (more than one label
+ * difference) than expected. Check for in-between
+ * delegations in the cache and remove them.
+ * They could prove problematic when they expire
+ * and rrset_expired_above() encounters them during
+ * delegation cache lookups. */
+ uint8_t* qname = iq->dp->name;
+ size_t qnamelen = iq->dp->namelen;
+ rrset_cache_remove_above(qstate->env->rrset_cache,
+ &qname, &qnamelen, LDNS_RR_TYPE_NS,
+ iq->qchase.qclass, *qstate->env->now,
+ old_dp->name, old_dp->namelen);
+ }
if(!cache_fill_missing(qstate->env, iq->qchase.qclass,
qstate->region, iq->dp)) {
errinf(qstate, "malloc failure, copy extra info into delegation point");
@@ -3341,10 +3457,13 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
/* apply rpz qname triggers after cname */
struct dns_msg* forged_response =
rpz_callback_from_iterator_cname(qstate, iq);
+ int count = 0;
while(forged_response && reply_find_rrset_section_an(
forged_response->rep, iq->qchase.qname,
iq->qchase.qname_len, LDNS_RR_TYPE_CNAME,
- iq->qchase.qclass)) {
+ iq->qchase.qclass) &&
+ iq->qchase.qtype != LDNS_RR_TYPE_CNAME &&
+ count++ < ie->max_query_restarts) {
/* another cname to follow */
if(!handle_cname_response(qstate, iq, forged_response,
&sname, &snamelen)) {
@@ -3926,17 +4045,9 @@ processFinished(struct module_qstate* qstate, struct iter_qstate* iq,
!qstate->env->cfg->val_log_squelch) {
char* err_str = errinf_to_str_misc(qstate);
if(err_str) {
- size_t err_str_len = strlen(err_str);
verbose(VERB_ALGO, "iterator EDE: %s", err_str);
- /* allocate space and store the error
- * string */
- iq->response->rep->reason_bogus_str = regional_alloc(
- qstate->region,
- sizeof(char) * (err_str_len+1));
- memcpy(iq->response->rep->reason_bogus_str,
- err_str, err_str_len+1);
+ iq->response->rep->reason_bogus_str = err_str;
}
- free(err_str);
}
/* we have finished processing this query */