diff -Nru unbound-1.13.1/debian/changelog unbound-1.13.1/debian/changelog
--- unbound-1.13.1/debian/changelog 2021-02-09 23:53:57.000000000 +0100
+++ unbound-1.13.1/debian/changelog 2023-04-05 23:06:47.000000000 +0200
@@ -1,3 +1,41 @@
+unbound (1.13.1-1+deb11u1) bullseye; urgency=high
+
+ * Non-maintainer upload by the LTS team.
+ * Fix the following security vulnerabilities.
+ CVE-2022-3204:
+ A vulnerability named 'Non-Responsive Delegation Attack' (NRDelegation
+ Attack) has been discovered in various DNS resolving software. The
+ NRDelegation Attack works by having a malicious delegation with a
+ considerable number of non responsive nameservers. The attack starts by
+ querying a resolver for a record that relies on those unresponsive
+ nameservers. The attack can cause a resolver to spend a lot of
+ time/resources resolving records under a malicious delegation point where a
+ considerable number of unresponsive NS records reside. It can trigger high
+ CPU usage in some resolver implementations that continually look in the
+ cache for resolved NS records in that delegation. This can lead to degraded
+ performance and eventually denial of service in orchestrated attacks.
+ Unbound does not suffer from high CPU usage, but resources are still needed
+ for resolving the malicious delegation. Unbound will keep trying to resolve
+ the record until hard limits are reached. Based on the nature of the attack
+ and the replies, different limits could be reached. From now on Unbound
+ introduces fixes for better performance when under load, by cutting
+ opportunistic queries for nameserver discovery and DNSKEY prefetching and
+ limiting the number of times a delegation point can issue a cache lookup
+ for missing records.
+ * CVE-2022-30698 and CVE-2022-30699: (Closes: #1016493)
+ Unbound is vulnerable to a novel type of the "ghost domain names" attack.
+ The vulnerability works by targeting an Unbound instance. Unbound is
+ queried for a rogue domain name when the cached delegation information is
+ about to expire. The rogue nameserver delays the response so that the
+ cached delegation information is expired. Upon receiving the delayed answer
+ containing the delegation information, Unbound overwrites the now expired
+ entries. This action can be repeated when the delegation information is
+ about to expire making the rogue delegation information ever-updating. From
+ now on Unbound stores the start time for a query and uses that to decide if
+ the cached delegation information can be overwritten.
+
+ -- Markus Koschany <apo@debian.org> Wed, 05 Apr 2023 23:06:47 +0200
+
unbound (1.13.1-1) unstable; urgency=medium
* New upstream version 1.13.1
diff -Nru unbound-1.13.1/debian/patches/CVE-2022-30698-and-CVE-2022-30699.patch unbound-1.13.1/debian/patches/CVE-2022-30698-and-CVE-2022-30699.patch
--- unbound-1.13.1/debian/patches/CVE-2022-30698-and-CVE-2022-30699.patch 1970-01-01 01:00:00.000000000 +0100
+++ unbound-1.13.1/debian/patches/CVE-2022-30698-and-CVE-2022-30699.patch 2023-04-05 23:06:47.000000000 +0200
@@ -0,0 +1,612 @@
+From: Markus Koschany <apo@debian.org>
+Date: Wed, 5 Apr 2023 13:03:57 +0200
+Subject: CVE-2022-30698 and CVE-2022-30699
+
+Origin: https://github.com/NLnetLabs/unbound/commit/f6753a0f1018133df552347a199e0362fc1dac68
+---
+ cachedb/cachedb.c | 2 +-
+ daemon/cachedump.c | 5 +-
+ daemon/worker.c | 2 +-
+ dns64/dns64.c | 4 +-
+ ipsecmod/ipsecmod.c | 2 +-
+ iterator/iter_utils.c | 4 +-
+ iterator/iter_utils.h | 2 +-
+ iterator/iterator.c | 19 ++++---
+ pythonmod/interface.i | 5 +-
+ pythonmod/pythonmod_utils.c | 3 +-
+ services/cache/dns.c | 111 ++++++++++++++++++++++++++++++++------
+ services/cache/dns.h | 18 +++++--
+ services/mesh.c | 1 +
+ testdata/iter_prefetch_change.rpl | 16 +++---
+ util/module.h | 6 +++
+ validator/validator.c | 4 +-
+ 16 files changed, 156 insertions(+), 48 deletions(-)
+
+diff --git a/cachedb/cachedb.c b/cachedb/cachedb.c
+index e948a6b..b6b2b92 100644
+--- a/cachedb/cachedb.c
++++ b/cachedb/cachedb.c
+@@ -656,7 +656,7 @@ cachedb_intcache_store(struct module_qstate* qstate)
+ return;
+ (void)dns_cache_store(qstate->env, &qstate->qinfo,
+ qstate->return_msg->rep, 0, qstate->prefetch_leeway, 0,
+- qstate->region, store_flags);
++ qstate->region, store_flags, qstate->qstarttime);
+ }
+
+ /**
+diff --git a/daemon/cachedump.c b/daemon/cachedump.c
+index b1ce53b..908d2f9 100644
+--- a/daemon/cachedump.c
++++ b/daemon/cachedump.c
+@@ -677,7 +677,8 @@ load_msg(RES* ssl, sldns_buffer* buf, struct worker* worker)
+ if(!go_on)
+ return 1; /* skip this one, not all references satisfied */
+
+- if(!dns_cache_store(&worker->env, &qinf, &rep, 0, 0, 0, NULL, flags)) {
++ if(!dns_cache_store(&worker->env, &qinf, &rep, 0, 0, 0, NULL, flags,
++ *worker->env.now)) {
+ log_warn("error out of memory");
+ return 0;
+ }
+@@ -848,7 +849,7 @@ int print_deleg_lookup(RES* ssl, struct worker* worker, uint8_t* nm,
+ while(1) {
+ dp = dns_cache_find_delegation(&worker->env, nm, nmlen,
+ qinfo.qtype, qinfo.qclass, region, &msg,
+- *worker->env.now);
++ *worker->env.now, 0, NULL, 0);
+ if(!dp) {
+ return ssl_printf(ssl, "no delegation from "
+ "cache; goes to configured roots\n");
+diff --git a/daemon/worker.c b/daemon/worker.c
+index 57d58a9..6d352af 100644
+--- a/daemon/worker.c
++++ b/daemon/worker.c
+@@ -491,7 +491,7 @@ answer_norec_from_cache(struct worker* worker, struct query_info* qinfo,
+
+ dp = dns_cache_find_delegation(&worker->env, qinfo->qname,
+ qinfo->qname_len, qinfo->qtype, qinfo->qclass,
+- worker->scratchpad, &msg, timenow);
++ worker->scratchpad, &msg, timenow, 0, NULL, 0);
+ if(!dp) { /* no delegation, need to reprime */
+ return 0;
+ }
+diff --git a/dns64/dns64.c b/dns64/dns64.c
+index c79bc9c..0546364 100644
+--- a/dns64/dns64.c
++++ b/dns64/dns64.c
+@@ -652,7 +652,7 @@ handle_event_moddone(struct module_qstate* qstate, int id)
+ if ( (!iq || !iq->started_no_cache_store) &&
+ qstate->return_msg && qstate->return_msg->rep &&
+ !dns_cache_store(qstate->env, &qstate->qinfo, qstate->return_msg->rep,
+- 0, 0, 0, NULL, qstate->query_flags))
++ 0, 0, 0, NULL, qstate->query_flags, qstate->qstarttime))
+ log_err("out of memory");
+
+ /* do nothing */
+@@ -986,7 +986,7 @@ dns64_inform_super(struct module_qstate* qstate, int id,
+ /* Store the generated response in cache. */
+ if ( (!super_dq || !super_dq->started_no_cache_store) &&
+ !dns_cache_store(super->env, &super->qinfo, super->return_msg->rep,
+- 0, 0, 0, NULL, super->query_flags))
++ 0, 0, 0, NULL, super->query_flags, qstate->qstarttime))
+ log_err("out of memory");
+ }
+
+diff --git a/ipsecmod/ipsecmod.c b/ipsecmod/ipsecmod.c
+index a1f40a5..e7ae736 100644
+--- a/ipsecmod/ipsecmod.c
++++ b/ipsecmod/ipsecmod.c
+@@ -443,7 +443,7 @@ ipsecmod_handle_query(struct module_qstate* qstate,
+ /* Store A/AAAA in cache. */
+ if(!dns_cache_store(qstate->env, &qstate->qinfo,
+ qstate->return_msg->rep, 0, qstate->prefetch_leeway,
+- 0, qstate->region, qstate->query_flags)) {
++ 0, qstate->region, qstate->query_flags, qstate->qstarttime)) {
+ log_err("ipsecmod: out of memory caching record");
+ }
+ qstate->ext_state[id] = module_finished;
+diff --git a/iterator/iter_utils.c b/iterator/iter_utils.c
+index 011d394..512f6cf 100644
+--- a/iterator/iter_utils.c
++++ b/iterator/iter_utils.c
+@@ -653,10 +653,10 @@ dns_copy_msg(struct dns_msg* from, struct regional* region)
+ void
+ iter_dns_store(struct module_env* env, struct query_info* msgqinf,
+ struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
+- struct regional* region, uint16_t flags)
++ struct regional* region, uint16_t flags, time_t qstarttime)
+ {
+ if(!dns_cache_store(env, msgqinf, msgrep, is_referral, leeway,
+- pside, region, flags))
++ pside, region, flags, qstarttime))
+ log_err("out of memory: cannot store data in cache");
+ }
+
+diff --git a/iterator/iter_utils.h b/iterator/iter_utils.h
+index 5c2af20..983a94d 100644
+--- a/iterator/iter_utils.h
++++ b/iterator/iter_utils.h
+@@ -147,7 +147,7 @@ struct dns_msg* dns_copy_msg(struct dns_msg* from, struct regional* regional);
+ */
+ void iter_dns_store(struct module_env* env, struct query_info* qinf,
+ struct reply_info* rep, int is_referral, time_t leeway, int pside,
+- struct regional* region, uint16_t flags);
++ struct regional* region, uint16_t flags, time_t qstarttime);
+
+ /**
+ * Select randomly with n/m probability.
+diff --git a/iterator/iterator.c b/iterator/iterator.c
+index 3cae90b..b0bc33f 100644
+--- a/iterator/iterator.c
++++ b/iterator/iterator.c
+@@ -370,7 +370,7 @@ error_response_cache(struct module_qstate* qstate, int id, int rcode)
+ err.security = sec_status_indeterminate;
+ verbose(VERB_ALGO, "store error response in message cache");
+ iter_dns_store(qstate->env, &qstate->qinfo, &err, 0, 0, 0, NULL,
+- qstate->query_flags);
++ qstate->query_flags, qstate->qstarttime);
+ }
+ return error_response(qstate, id, rcode);
+ }
+@@ -1431,7 +1431,8 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
+ iq->dp = dns_cache_find_delegation(qstate->env, delname,
+ delnamelen, iq->qchase.qtype, iq->qchase.qclass,
+ qstate->region, &iq->deleg_msg,
+- *qstate->env->now+qstate->prefetch_leeway);
++ *qstate->env->now+qstate->prefetch_leeway, 1,
++ NULL, 0);
+ else iq->dp = NULL;
+
+ /* If the cache has returned nothing, then we have a
+@@ -1753,7 +1754,8 @@ generate_parentside_target_query(struct module_qstate* qstate,
+ subiq->dp = dns_cache_find_delegation(qstate->env,
+ name, namelen, qtype, qclass, subq->region,
+ &subiq->deleg_msg,
+- *qstate->env->now+subq->prefetch_leeway);
++ *qstate->env->now+subq->prefetch_leeway,
++ 1, NULL, 0);
+ /* if no dp, then it's from root, refetch unneeded */
+ if(subiq->dp) {
+ subiq->dnssec_expected = iter_indicates_dnssec(
+@@ -2823,7 +2825,8 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
+ iter_dns_store(qstate->env, &iq->response->qinfo,
+ iq->response->rep, 0, qstate->prefetch_leeway,
+ iq->dp&&iq->dp->has_parent_side_NS,
+- qstate->region, qstate->query_flags);
++ qstate->region, qstate->query_flags,
++ qstate->qstarttime);
+ /* close down outstanding requests to be discarded */
+ outbound_list_clear(&iq->outlist);
+ iq->num_current_queries = 0;
+@@ -2920,7 +2923,8 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
+ /* Store the referral under the current query */
+ /* no prefetch-leeway, since its not the answer */
+ iter_dns_store(qstate->env, &iq->response->qinfo,
+- iq->response->rep, 1, 0, 0, NULL, 0);
++ iq->response->rep, 1, 0, 0, NULL, 0,
++ qstate->qstarttime);
+ if(iq->store_parent_NS)
+ iter_store_parentside_NS(qstate->env,
+ iq->response->rep);
+@@ -3031,7 +3035,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
+ iter_dns_store(qstate->env, &iq->response->qinfo,
+ iq->response->rep, 1, qstate->prefetch_leeway,
+ iq->dp&&iq->dp->has_parent_side_NS, NULL,
+- qstate->query_flags);
++ qstate->query_flags, qstate->qstarttime);
+ /* set the current request's qname to the new value. */
+ iq->qchase.qname = sname;
+ iq->qchase.qname_len = snamelen;
+@@ -3600,7 +3604,8 @@ processFinished(struct module_qstate* qstate, struct iter_qstate* iq,
+ iter_dns_store(qstate->env, &qstate->qinfo,
+ iq->response->rep, 0, qstate->prefetch_leeway,
+ iq->dp&&iq->dp->has_parent_side_NS,
+- qstate->region, qstate->query_flags);
++ qstate->region, qstate->query_flags,
++ qstate->qstarttime);
+ }
+ }
+ qstate->return_rcode = LDNS_RCODE_NOERROR;
+diff --git a/pythonmod/interface.i b/pythonmod/interface.i
+index 5dae04a..0b6ecbe 100644
+--- a/pythonmod/interface.i
++++ b/pythonmod/interface.i
+@@ -1371,7 +1371,8 @@ int set_return_msg(struct module_qstate* qstate,
+ /* Functions which we will need to lookup delegations */
+ struct delegpt* dns_cache_find_delegation(struct module_env* env,
+ uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
+- struct regional* region, struct dns_msg** msg, uint32_t timenow);
++ struct regional* region, struct dns_msg** msg, uint32_t timenow,
++ int noexpiredabove, uint8_t* expiretop, size_t expiretoplen);
+ int iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags,
+ struct delegpt* dp);
+ struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
+@@ -1400,7 +1401,7 @@ struct delegpt* find_delegation(struct module_qstate* qstate, char *nm, size_t n
+ qinfo.qclass = LDNS_RR_CLASS_IN;
+
+ while(1) {
+- dp = dns_cache_find_delegation(qstate->env, (uint8_t*)nm, nmlen, qinfo.qtype, qinfo.qclass, region, &msg, timenow);
++ dp = dns_cache_find_delegation(qstate->env, (uint8_t*)nm, nmlen, qinfo.qtype, qinfo.qclass, region, &msg, timenow, 0, NULL, 0);
+ if(!dp)
+ return NULL;
+ if(iter_dp_is_useless(&qinfo, BIT_RD, dp)) {
+diff --git a/pythonmod/pythonmod_utils.c b/pythonmod/pythonmod_utils.c
+index 9f72825..8735acb 100644
+--- a/pythonmod/pythonmod_utils.c
++++ b/pythonmod/pythonmod_utils.c
+@@ -69,7 +69,8 @@ int storeQueryInCache(struct module_qstate* qstate, struct query_info* qinfo, st
+ }
+
+ return dns_cache_store(qstate->env, qinfo, msgrep, is_referral,
+- qstate->prefetch_leeway, 0, NULL, qstate->query_flags);
++ qstate->prefetch_leeway, 0, NULL, qstate->query_flags,
++ qstate->qstarttime);
+ }
+
+ /* Invalidate the message associated with query_info stored in message cache */
+diff --git a/services/cache/dns.c b/services/cache/dns.c
+index 747995e..6edcae7 100644
+--- a/services/cache/dns.c
++++ b/services/cache/dns.c
+@@ -68,11 +68,16 @@
+ * in a prefetch situation to be updated (without becoming sticky).
+ * @param qrep: update rrsets here if cache is better
+ * @param region: for qrep allocs.
++ * @param qstarttime: time when delegations were looked up, this is perhaps
++ * earlier than the time in now. The time is used to determine if RRsets
++ * of type NS have expired, so that they can only be updated using
++ * lookups of delegation points that did not use them, since they had
++ * expired then.
+ */
+ static void
+ store_rrsets(struct module_env* env, struct reply_info* rep, time_t now,
+ time_t leeway, int pside, struct reply_info* qrep,
+- struct regional* region)
++ struct regional* region, time_t qstarttime)
+ {
+ size_t i;
+ /* see if rrset already exists in cache, if not insert it. */
+@@ -81,8 +86,8 @@ store_rrsets(struct module_env* env, struct reply_info* rep, time_t now,
+ rep->ref[i].id = rep->rrsets[i]->id;
+ /* update ref if it was in the cache */
+ switch(rrset_cache_update(env->rrset_cache, &rep->ref[i],
+- env->alloc, now + ((ntohs(rep->ref[i].key->rk.type)==
+- LDNS_RR_TYPE_NS && !pside)?0:leeway))) {
++ env->alloc, ((ntohs(rep->ref[i].key->rk.type)==
++ LDNS_RR_TYPE_NS && !pside)?qstarttime:now + leeway))) {
+ case 0: /* ref unchanged, item inserted */
+ break;
+ case 2: /* ref updated, cache is superior */
+@@ -155,7 +160,8 @@ msg_del_servfail(struct module_env* env, struct query_info* qinfo,
+ void
+ dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
+ hashvalue_type hash, struct reply_info* rep, time_t leeway, int pside,
+- struct reply_info* qrep, uint32_t flags, struct regional* region)
++ struct reply_info* qrep, uint32_t flags, struct regional* region,
++ time_t qstarttime)
+ {
+ struct msgreply_entry* e;
+ time_t ttl = rep->ttl;
+@@ -170,7 +176,8 @@ dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
+ /* there was a reply_info_sortref(rep) here but it seems to be
+ * unnecessary, because the cache gets locked per rrset. */
+ reply_info_set_ttls(rep, *env->now);
+- store_rrsets(env, rep, *env->now, leeway, pside, qrep, region);
++ store_rrsets(env, rep, *env->now, leeway, pside, qrep, region,
++ qstarttime);
+ if(ttl == 0 && !(flags & DNSCACHE_STORE_ZEROTTL)) {
+ /* we do not store the message, but we did store the RRs,
+ * which could be useful for delegation information */
+@@ -194,10 +201,51 @@ dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
+ slabhash_insert(env->msg_cache, hash, &e->entry, rep, env->alloc);
+ }
+
++/** see if an rrset is expired above the qname, return upper qname. */
++static int
++rrset_expired_above(struct module_env* env, uint8_t** qname, size_t* qnamelen,
++ uint16_t searchtype, uint16_t qclass, time_t now, uint8_t* expiretop,
++ size_t expiretoplen)
++{
++ struct ub_packed_rrset_key *rrset;
++ uint8_t lablen;
++
++ while(*qnamelen > 0) {
++ /* look one label higher */
++ lablen = **qname;
++ *qname += lablen + 1;
++ *qnamelen -= lablen + 1;
++ if(*qnamelen <= 0)
++ break;
++
++ /* looks up with a time of 0, to see expired entries */
++ if((rrset = rrset_cache_lookup(env->rrset_cache, *qname,
++ *qnamelen, searchtype, qclass, 0, 0, 0))) {
++ struct packed_rrset_data* data =
++ (struct packed_rrset_data*)rrset->entry.data;
++ if(now > data->ttl) {
++ /* it is expired, this is not wanted */
++ lock_rw_unlock(&rrset->entry.lock);
++ log_nametypeclass(VERB_ALGO, "this rrset is expired", *qname, searchtype, qclass);
++ return 1;
++ }
++ /* it is not expired, continue looking */
++ lock_rw_unlock(&rrset->entry.lock);
++ }
++
++ /* do not look above the expiretop. */
++ if(expiretop && *qnamelen == expiretoplen &&
++ query_dname_compare(*qname, expiretop)==0)
++ break;
++ }
++ return 0;
++}
++
+ /** find closest NS or DNAME and returns the rrset (locked) */
+ static struct ub_packed_rrset_key*
+ find_closest_of_type(struct module_env* env, uint8_t* qname, size_t qnamelen,
+- uint16_t qclass, time_t now, uint16_t searchtype, int stripfront)
++ uint16_t qclass, time_t now, uint16_t searchtype, int stripfront,
++ int noexpiredabove, uint8_t* expiretop, size_t expiretoplen)
+ {
+ struct ub_packed_rrset_key *rrset;
+ uint8_t lablen;
+@@ -212,8 +260,40 @@ find_closest_of_type(struct module_env* env, uint8_t* qname, size_t qnamelen,
+ /* snip off front part of qname until the type is found */
+ while(qnamelen > 0) {
+ if((rrset = rrset_cache_lookup(env->rrset_cache, qname,
+- qnamelen, searchtype, qclass, 0, now, 0)))
+- return rrset;
++ qnamelen, searchtype, qclass, 0, now, 0))) {
++ uint8_t* origqname = qname;
++ size_t origqnamelen = qnamelen;
++ if(!noexpiredabove)
++ return rrset;
++ /* if expiretop set, do not look above it, but
++ * qname is equal, so the just found result is also
++ * the nonexpired above part. */
++ if(expiretop && qnamelen == expiretoplen &&
++ query_dname_compare(qname, expiretop)==0)
++ return rrset;
++ /* check for expiry, but we have to let go of the rrset
++ * for the lock ordering */
++ lock_rw_unlock(&rrset->entry.lock);
++ /* the expired_above function always takes off one
++ * label (if qnamelen>0) and returns the final qname
++ * where it searched, so we can continue from there
++ * turning the O N*N search into O N. */
++ if(!rrset_expired_above(env, &qname, &qnamelen,
++ searchtype, qclass, now, expiretop,
++ expiretoplen)) {
++ /* we want to return rrset, but it may be
++ * gone from cache, if so, just loop like
++ * it was not in the cache in the first place.
++ */
++ if((rrset = rrset_cache_lookup(env->
++ rrset_cache, origqname, origqnamelen,
++ searchtype, qclass, 0, now, 0))) {
++ return rrset;
++ }
++ }
++ log_nametypeclass(VERB_ALGO, "ignoring rrset because expired rrsets exist above it", origqname, searchtype, qclass);
++ continue;
++ }
+
+ /* snip off front label */
+ lablen = *qname;
+@@ -464,7 +544,8 @@ dns_msg_ansadd(struct dns_msg* msg, struct regional* region,
+ struct delegpt*
+ dns_cache_find_delegation(struct module_env* env, uint8_t* qname,
+ size_t qnamelen, uint16_t qtype, uint16_t qclass,
+- struct regional* region, struct dns_msg** msg, time_t now)
++ struct regional* region, struct dns_msg** msg, time_t now,
++ int noexpiredabove, uint8_t* expiretop, size_t expiretoplen)
+ {
+ /* try to find closest NS rrset */
+ struct ub_packed_rrset_key* nskey;
+@@ -472,7 +553,7 @@ dns_cache_find_delegation(struct module_env* env, uint8_t* qname,
+ struct delegpt* dp;
+
+ nskey = find_closest_of_type(env, qname, qnamelen, qclass, now,
+- LDNS_RR_TYPE_NS, 0);
++ LDNS_RR_TYPE_NS, 0, noexpiredabove, expiretop, expiretoplen);
+ if(!nskey) /* hope the caller has hints to prime or something */
+ return NULL;
+ nsdata = (struct packed_rrset_data*)nskey->entry.data;
+@@ -838,7 +919,7 @@ dns_cache_lookup(struct module_env* env,
+ * consistent with the DNAME */
+ if(!no_partial &&
+ (rrset=find_closest_of_type(env, qname, qnamelen, qclass, now,
+- LDNS_RR_TYPE_DNAME, 1))) {
++ LDNS_RR_TYPE_DNAME, 1, 0, NULL, 0))) {
+ /* synthesize a DNAME+CNAME message based on this */
+ enum sec_status sec_status = sec_status_unchecked;
+ struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k,
+@@ -968,7 +1049,7 @@ dns_cache_lookup(struct module_env* env,
+ int
+ dns_cache_store(struct module_env* env, struct query_info* msgqinf,
+ struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
+- struct regional* region, uint32_t flags)
++ struct regional* region, uint32_t flags, time_t qstarttime)
+ {
+ struct reply_info* rep = NULL;
+ /* alloc, malloc properly (not in region, like msg is) */
+@@ -991,9 +1072,9 @@ dns_cache_store(struct module_env* env, struct query_info* msgqinf,
+ /*ignore ret: it was in the cache, ref updated */
+ /* no leeway for typeNS */
+ (void)rrset_cache_update(env->rrset_cache, &ref,
+- env->alloc, *env->now +
++ env->alloc,
+ ((ntohs(ref.key->rk.type)==LDNS_RR_TYPE_NS
+- && !pside) ? 0:leeway));
++ && !pside) ? qstarttime:*env->now + leeway));
+ }
+ free(rep);
+ return 1;
+@@ -1015,7 +1096,7 @@ dns_cache_store(struct module_env* env, struct query_info* msgqinf,
+ rep->flags &= ~(BIT_AA | BIT_CD);
+ h = query_info_hash(&qinf, (uint16_t)flags);
+ dns_cache_store_msg(env, &qinf, h, rep, leeway, pside, msgrep,
+- flags, region);
++ flags, region, qstarttime);
+ /* qname is used inside query_info_entrysetup, and set to
+ * NULL. If it has not been used, free it. free(0) is safe. */
+ free(qinf.qname);
+diff --git a/services/cache/dns.h b/services/cache/dns.h
+index f1b77fb..4422504 100644
+--- a/services/cache/dns.h
++++ b/services/cache/dns.h
+@@ -88,11 +88,13 @@ struct dns_msg {
+ * @param flags: flags with BIT_CD for AAAA queries in dns64 translation.
+ * The higher 16 bits are used internally to customize the cache policy.
+ * (See DNSCACHE_STORE_xxx flags).
++ * @param qstarttime: time when the query was started, and thus when the
++ * delegations were looked up.
+ * @return 0 on alloc error (out of memory).
+ */
+ int dns_cache_store(struct module_env* env, struct query_info* qinf,
+ struct reply_info* rep, int is_referral, time_t leeway, int pside,
+- struct regional* region, uint32_t flags);
++ struct regional* region, uint32_t flags, time_t qstarttime);
+
+ /**
+ * Store message in the cache. Stores in message cache and rrset cache.
+@@ -112,11 +114,14 @@ int dns_cache_store(struct module_env* env, struct query_info* qinf,
+ * can be updated to full TTL even in prefetch situations.
+ * @param qrep: message that can be altered with better rrs from cache.
+ * @param flags: customization flags for the cache policy.
++ * @param qstarttime: time when the query was started, and thus when the
++ * delegations were looked up.
+ * @param region: to allocate into for qmsg.
+ */
+ void dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
+ hashvalue_type hash, struct reply_info* rep, time_t leeway, int pside,
+- struct reply_info* qrep, uint32_t flags, struct regional* region);
++ struct reply_info* qrep, uint32_t flags, struct regional* region,
++ time_t qstarttime);
+
+ /**
+ * Find a delegation from the cache.
+@@ -129,11 +134,18 @@ void dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
+ * @param msg: if not NULL, delegation message is returned here, synthesized
+ * from the cache.
+ * @param timenow: the time now, for checking if TTL on cache entries is OK.
++ * @param noexpiredabove: if set, no expired NS rrsets above the one found
++ * are tolerated. It only returns delegations where the delegations above
++ * it are valid.
++ * @param expiretop: if not NULL, name where check for expiry ends for
++ * noexpiredabove.
++ * @param expiretoplen: length of expiretop dname.
+ * @return new delegation or NULL on error or if not found in cache.
+ */
+ struct delegpt* dns_cache_find_delegation(struct module_env* env,
+ uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
+- struct regional* region, struct dns_msg** msg, time_t timenow);
++ struct regional* region, struct dns_msg** msg, time_t timenow,
++ int noexpiredabove, uint8_t* expiretop, size_t expiretoplen);
+
+ /**
+ * generate dns_msg from cached message
+diff --git a/services/mesh.c b/services/mesh.c
+index 1c768c3..ee8a932 100644
+--- a/services/mesh.c
++++ b/services/mesh.c
+@@ -833,6 +833,7 @@ mesh_state_create(struct module_env* env, struct query_info* qinfo,
+ mstate->s.no_cache_store = 0;
+ mstate->s.need_refetch = 0;
+ mstate->s.was_ratelimited = 0;
++ mstate->s.qstarttime = *env->now;
+
+ /* init modules */
+ for(i=0; i<env->mesh->mods.num; i++) {
+diff --git a/testdata/iter_prefetch_change.rpl b/testdata/iter_prefetch_change.rpl
+index 007025a..1be9e6a 100644
+--- a/testdata/iter_prefetch_change.rpl
++++ b/testdata/iter_prefetch_change.rpl
+@@ -22,9 +22,9 @@ REPLY QR NOERROR
+ SECTION QUESTION
+ . IN NS
+ SECTION ANSWER
+-. IN NS K.ROOT-SERVERS.NET.
++. 86400 IN NS K.ROOT-SERVERS.NET.
+ SECTION ADDITIONAL
+-K.ROOT-SERVERS.NET. IN A 193.0.14.129
++K.ROOT-SERVERS.NET. 86400 IN A 193.0.14.129
+ ENTRY_END
+
+ ENTRY_BEGIN
+@@ -34,9 +34,9 @@ REPLY QR NOERROR
+ SECTION QUESTION
+ com. IN A
+ SECTION AUTHORITY
+-com. IN NS a.gtld-servers.net.
++com. 86400 IN NS a.gtld-servers.net.
+ SECTION ADDITIONAL
+-a.gtld-servers.net. IN A 192.5.6.30
++a.gtld-servers.net. 86400 IN A 192.5.6.30
+ ENTRY_END
+ RANGE_END
+
+@@ -50,9 +50,9 @@ REPLY QR NOERROR
+ SECTION QUESTION
+ com. IN NS
+ SECTION ANSWER
+-com. IN NS a.gtld-servers.net.
++com. 86400 IN NS a.gtld-servers.net.
+ SECTION ADDITIONAL
+-a.gtld-servers.net. IN A 192.5.6.30
++a.gtld-servers.net. 86400 IN A 192.5.6.30
+ ENTRY_END
+
+ ENTRY_BEGIN
+@@ -78,9 +78,9 @@ REPLY QR NOERROR
+ SECTION QUESTION
+ com. IN NS
+ SECTION ANSWER
+-com. IN NS a.gtld-servers.net.
++com. 86400 IN NS a.gtld-servers.net.
+ SECTION ADDITIONAL
+-a.gtld-servers.net. IN A 192.5.6.30
++a.gtld-servers.net. 86400 IN A 192.5.6.30
+ ENTRY_END
+
+ ENTRY_BEGIN
+diff --git a/util/module.h b/util/module.h
+index 81a31a9..09854a1 100644
+--- a/util/module.h
++++ b/util/module.h
+@@ -652,6 +652,12 @@ struct module_qstate {
+ int need_refetch;
+ /** whether the query (or a subquery) was ratelimited */
+ int was_ratelimited;
++ /** time when query was started. This is when the qstate is created.
++ * This is used so that type NS data cannot be overwritten by them
++ * expiring while the lookup is in progress, using data fetched from
++ * those servers. By comparing expiry time with qstarttime for type NS.
++ */
++ time_t qstarttime;
+
+ /**
+ * Attributes of clients that share the qstate that may affect IP-based
+diff --git a/validator/validator.c b/validator/validator.c
+index e12180b..e078954 100644
+--- a/validator/validator.c
++++ b/validator/validator.c
+@@ -2144,7 +2144,7 @@ processFinished(struct module_qstate* qstate, struct val_qstate* vq,
+ if(!qstate->no_cache_store) {
+ if(!dns_cache_store(qstate->env, &vq->orig_msg->qinfo,
+ vq->orig_msg->rep, 0, qstate->prefetch_leeway, 0, NULL,
+- qstate->query_flags)) {
++ qstate->query_flags, qstate->qstarttime)) {
+ log_err("out of memory caching validator results");
+ }
+ }
+@@ -2153,7 +2153,7 @@ processFinished(struct module_qstate* qstate, struct val_qstate* vq,
+ /* and this does not get prefetched, so no leeway */
+ if(!dns_cache_store(qstate->env, &vq->orig_msg->qinfo,
+ vq->orig_msg->rep, 1, 0, 0, NULL,
+- qstate->query_flags)) {
++ qstate->query_flags, qstate->qstarttime)) {
+ log_err("out of memory caching validator results");
+ }
+ }
diff -Nru unbound-1.13.1/debian/patches/CVE-2022-3204.patch unbound-1.13.1/debian/patches/CVE-2022-3204.patch
--- unbound-1.13.1/debian/patches/CVE-2022-3204.patch 1970-01-01 01:00:00.000000000 +0100
+++ unbound-1.13.1/debian/patches/CVE-2022-3204.patch 2023-04-05 23:06:47.000000000 +0200
@@ -0,0 +1,215 @@
+From: Markus Koschany <apo@debian.org>
+Date: Wed, 5 Apr 2023 12:56:14 +0200
+Subject: CVE-2022-3204
+
+Origin: https://github.com/NLnetLabs/unbound/commit/137719522a8ea5b380fbb6206d2466f402f5b554
+---
+ iterator/iter_delegpt.c | 3 +++
+ iterator/iter_delegpt.h | 2 ++
+ iterator/iter_utils.c | 3 +++
+ iterator/iter_utils.h | 9 +++++++++
+ iterator/iterator.c | 36 +++++++++++++++++++++++++++++++++++-
+ services/cache/dns.c | 3 +++
+ services/mesh.c | 7 +++++++
+ services/mesh.h | 11 +++++++++++
+ 8 files changed, 73 insertions(+), 1 deletion(-)
+
+diff --git a/iterator/iter_delegpt.c b/iterator/iter_delegpt.c
+index 9a672b0..55a025b 100644
+--- a/iterator/iter_delegpt.c
++++ b/iterator/iter_delegpt.c
+@@ -76,6 +76,7 @@ struct delegpt* delegpt_copy(struct delegpt* dp, struct regional* region)
+ for(ns = dp->nslist; ns; ns = ns->next) {
+ if(!delegpt_add_ns(copy, region, ns->name, ns->lame))
+ return NULL;
++ copy->nslist->cache_lookup_count = ns->cache_lookup_count;
+ copy->nslist->resolved = ns->resolved;
+ copy->nslist->got4 = ns->got4;
+ copy->nslist->got6 = ns->got6;
+@@ -119,6 +120,7 @@ delegpt_add_ns(struct delegpt* dp, struct regional* region, uint8_t* name,
+ ns->namelen = len;
+ dp->nslist = ns;
+ ns->name = regional_alloc_init(region, name, ns->namelen);
++ ns->cache_lookup_count = 0;
+ ns->resolved = 0;
+ ns->got4 = 0;
+ ns->got6 = 0;
+@@ -597,6 +599,7 @@ int delegpt_add_ns_mlc(struct delegpt* dp, uint8_t* name, uint8_t lame)
+ }
+ ns->next = dp->nslist;
+ dp->nslist = ns;
++ ns->cache_lookup_count = 0;
+ ns->resolved = 0;
+ ns->got4 = 0;
+ ns->got6 = 0;
+diff --git a/iterator/iter_delegpt.h b/iterator/iter_delegpt.h
+index 138eb6e..8265c71 100644
+--- a/iterator/iter_delegpt.h
++++ b/iterator/iter_delegpt.h
+@@ -99,6 +99,8 @@ struct delegpt_ns {
+ uint8_t* name;
+ /** length of name */
+ size_t namelen;
++ /** number of cache lookups for the name */
++ int cache_lookup_count;
+ /**
+ * If the name has been resolved. false if not queried for yet.
+ * true if the A, AAAA queries have been generated.
+diff --git a/iterator/iter_utils.c b/iterator/iter_utils.c
+index 7bc67da..011d394 100644
+--- a/iterator/iter_utils.c
++++ b/iterator/iter_utils.c
+@@ -1191,6 +1191,9 @@ int iter_lookup_parent_glue_from_cache(struct module_env* env,
+ struct delegpt_ns* ns;
+ size_t num = delegpt_count_targets(dp);
+ for(ns = dp->nslist; ns; ns = ns->next) {
++ if(ns->cache_lookup_count > ITERATOR_NAME_CACHELOOKUP_MAX_PSIDE)
++ continue;
++ ns->cache_lookup_count++;
+ /* get cached parentside A */
+ akey = rrset_cache_lookup(env->rrset_cache, ns->name,
+ ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass,
+diff --git a/iterator/iter_utils.h b/iterator/iter_utils.h
+index f771930..5c2af20 100644
+--- a/iterator/iter_utils.h
++++ b/iterator/iter_utils.h
+@@ -60,6 +60,15 @@ struct module_qstate;
+ struct sock_list;
+ struct ub_packed_rrset_key;
+
++/* max number of lookups in the cache for target nameserver names.
++ * This stops, for large delegations, N*N lookups in the cache. */
++#define ITERATOR_NAME_CACHELOOKUP_MAX 3
++/* max number of lookups in the cache for parentside glue for nameserver names
++ * This stops, for larger delegations, N*N lookups in the cache.
++ * It is a little larger than the nonpside max, so it allows a couple extra
++ * lookups of parent side glue. */
++#define ITERATOR_NAME_CACHELOOKUP_MAX_PSIDE 5
++
+ /**
+ * Process config options and set iterator module state.
+ * Sets default values if no config is found.
+diff --git a/iterator/iterator.c b/iterator/iterator.c
+index 99d0201..3cae90b 100644
+--- a/iterator/iterator.c
++++ b/iterator/iterator.c
+@@ -1152,6 +1152,15 @@ generate_dnskey_prefetch(struct module_qstate* qstate,
+ (qstate->query_flags&BIT_RD) && !(qstate->query_flags&BIT_CD)){
+ return;
+ }
++ /* we do not generate this prefetch when the query list is full,
++ * the query is fetched, if needed, when the validator wants it.
++ * At that time the validator waits for it, after spawning it.
++ * This means there is one state that uses cpu and a socket, the
++ * spawned while this one waits, and not several at the same time,
++ * if we had created the lookup here. And this helps to keep
++ * the total load down, but the query still succeeds to resolve. */
++ if(mesh_jostle_exceeded(qstate->env->mesh))
++ return;
+
+ /* if the DNSKEY is in the cache this lookup will stop quickly */
+ log_nametypeclass(VERB_ALGO, "schedule dnskey prefetch",
+@@ -1866,6 +1875,14 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
+ return 0;
+ }
+ query_count++;
++ /* If the mesh query list is full, exit the loop here.
++ * This makes the routine spawn one query at a time,
++ * and this means there is no query state load
++ * increase, because the spawned state uses cpu and a
++ * socket while this state waits for that spawned
++ * state. Next time we can look up further targets */
++ if(mesh_jostle_exceeded(qstate->env->mesh))
++ break;
+ }
+ /* Send the A request. */
+ if(ie->supports_ipv4 && !ns->got4) {
+@@ -1878,6 +1895,9 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
+ return 0;
+ }
+ query_count++;
++ /* If the mesh query list is full, exit the loop. */
++ if(mesh_jostle_exceeded(qstate->env->mesh))
++ break;
+ }
+
+ /* mark this target as in progress. */
+@@ -2035,6 +2055,15 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
+ }
+ ns->done_pside6 = 1;
+ query_count++;
++ if(mesh_jostle_exceeded(qstate->env->mesh)) {
++ /* Wait for the lookup; do not spawn multiple
++ * lookups at a time. */
++ verbose(VERB_ALGO, "try parent-side glue lookup");
++ iq->num_target_queries += query_count;
++ target_count_increase(iq, query_count);
++ qstate->ext_state[id] = module_wait_subquery;
++ return 0;
++ }
+ }
+ if(ie->supports_ipv4 && !ns->done_pside4) {
+ /* Send the A request. */
+@@ -2401,7 +2430,12 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
+ if(iq->depth < ie->max_dependency_depth
+ && iq->num_target_queries == 0
+ && (!iq->target_count || iq->target_count[2]==0)
+- && iq->sent_count < TARGET_FETCH_STOP) {
++ && iq->sent_count < TARGET_FETCH_STOP
++ /* if the mesh query list is full, then do not waste cpu
++ * and sockets to fetch promiscuous targets. They can be
++ * looked up when needed. */
++ && !mesh_jostle_exceeded(qstate->env->mesh)
++ ) {
+ tf_policy = ie->target_fetch_policy[iq->depth];
+ }
+
+diff --git a/services/cache/dns.c b/services/cache/dns.c
+index f3149b6..747995e 100644
+--- a/services/cache/dns.c
++++ b/services/cache/dns.c
+@@ -324,6 +324,9 @@ cache_fill_missing(struct module_env* env, uint16_t qclass,
+ struct ub_packed_rrset_key* akey;
+ time_t now = *env->now;
+ for(ns = dp->nslist; ns; ns = ns->next) {
++ if(ns->cache_lookup_count > ITERATOR_NAME_CACHELOOKUP_MAX)
++ continue;
++ ns->cache_lookup_count++;
+ akey = rrset_cache_lookup(env->rrset_cache, ns->name,
+ ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0);
+ if(akey) {
+diff --git a/services/mesh.c b/services/mesh.c
+index 91d23de..1c768c3 100644
+--- a/services/mesh.c
++++ b/services/mesh.c
+@@ -2055,3 +2055,10 @@ mesh_serve_expired_callback(void* arg)
+ mesh_do_callback(mstate, LDNS_RCODE_NOERROR, msg->rep, c, &tv);
+ }
+ }
++
++int mesh_jostle_exceeded(struct mesh_area* mesh)
++{
++ if(mesh->all.count < mesh->max_reply_states)
++ return 0;
++ return 1;
++}
+diff --git a/services/mesh.h b/services/mesh.h
+index d0a4b5f..2248178 100644
+--- a/services/mesh.h
++++ b/services/mesh.h
+@@ -674,4 +674,15 @@ struct dns_msg*
+ mesh_serve_expired_lookup(struct module_qstate* qstate,
+ struct query_info* lookup_qinfo);
+
++/**
++ * See if the mesh has space for more queries. You can allocate queries
++ * anyway, but this checks for the allocated space.
++ * @param mesh: mesh area.
++ * @return true if the query list is full.
++ * It checks the number of all queries, not just number of reply states,
++ * that have a client address. So that spawned queries count too,
++ * that were created by the iterator, or other modules.
++ */
++int mesh_jostle_exceeded(struct mesh_area* mesh);
++
+ #endif /* SERVICES_MESH_H */
diff -Nru unbound-1.13.1/debian/patches/series unbound-1.13.1/debian/patches/series
--- unbound-1.13.1/debian/patches/series 2021-02-09 23:53:57.000000000 +0100
+++ unbound-1.13.1/debian/patches/series 2023-04-05 23:06:47.000000000 +0200
@@ -1 +1,3 @@
0001-Enable-remote-control-by-default.patch
+CVE-2022-3204.patch
+CVE-2022-30698-and-CVE-2022-30699.patch
Attachment:
signature.asc
Description: This is a digitally signed message part