resolve.c 52.6 KB
Newer Older
1
/*  Copyright (C) 2014-2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
Marek Vavruša's avatar
Marek Vavruša committed
2 3 4 5 6 7 8 9 10 11 12 13

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
14
    along with this program.  If not, see <https://www.gnu.org/licenses/>.
Marek Vavruša's avatar
Marek Vavruša committed
15 16
 */

17
#include <ctype.h>
Vitezslav Kriz's avatar
Vitezslav Kriz committed
18
#include <inttypes.h>
19
#include <stdio.h>
Marek Vavruša's avatar
Marek Vavruša committed
20
#include <fcntl.h>
21 22
#include <assert.h>
#include <arpa/inet.h>
23
#include <libknot/rrtype/rdname.h>
24
#include <libknot/descriptor.h>
25
#include <ucw/mempool.h>
26
#include "kresconfig.h"
27
#include "lib/resolve.h"
28
#include "lib/layer.h"
29
#include "lib/rplan.h"
30
#include "lib/layer/iterate.h"
31
#include "lib/dnssec/ta.h"
32
#include "lib/dnssec.h"
33
#if defined(ENABLE_COOKIES)
34
#include "lib/cookies/control.h"
35 36
#include "lib/cookies/helper.h"
#include "lib/cookies/nonce.h"
37 38
#else /* Define compatibility macros */
#define KNOT_EDNS_OPTION_COOKIE 10
39
#endif /* defined(ENABLE_COOKIES) */
40

41
#define VERBOSE_MSG(qry, ...) QRVERBOSE((qry), "resl",  __VA_ARGS__)
42

43 44 45 46 47
bool kr_rank_check(uint8_t rank)
{
	switch (rank & ~KR_RANK_AUTH) {
	case KR_RANK_INITIAL:
	case KR_RANK_OMIT:
Vladimír Čunát's avatar
Vladimír Čunát committed
48
	case KR_RANK_TRY:
49 50 51
	case KR_RANK_INDET:
	case KR_RANK_BOGUS:
	case KR_RANK_MISMATCH:
52
	case KR_RANK_MISSING:
53 54 55 56 57 58 59 60
	case KR_RANK_INSECURE:
	case KR_RANK_SECURE:
		return true;
	default:
		return false;
	}
}

61
/** @internal Set @a yielded to all RRs with matching @a qry_uid. */
62 63 64 65 66 67 68 69 70 71
static void set_yield(ranked_rr_array_t *array, const uint32_t qry_uid, const bool yielded)
{
	for (unsigned i = 0; i < array->len; ++i) {
		ranked_rr_array_entry_t *entry = array->at[i];
		if (entry->qry_uid == qry_uid) {
			entry->yielded = yielded;
		}
	}
}

72 73 74 75
/**
 * @internal Defer execution of current query.
 * The current layer state and input will be pushed to a stack and resumed on next iteration.
 */
76
static int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt)
77
{
78
	struct kr_request *req = ctx->req;
79 80 81 82 83
	size_t pkt_size = pkt->size;
	if (knot_pkt_has_tsig(pkt)) {
		pkt_size += pkt->tsig_wire.len;
	}
	knot_pkt_t *pkt_copy = knot_pkt_new(NULL, pkt_size, &req->pool);
84 85 86 87 88 89 90 91
	struct kr_layer_pickle *pickle = mm_alloc(&req->pool, sizeof(*pickle));
	if (pickle && pkt_copy && knot_pkt_copy(pkt_copy, pkt) == 0) {
		struct kr_query *qry = req->current_query;
		pickle->api = ctx->api;
		pickle->state = ctx->state;
		pickle->pkt = pkt_copy;
		pickle->next = qry->deferred;
		qry->deferred = pickle;
92 93
		set_yield(&req->answ_selected, qry->uid, true);
		set_yield(&req->auth_selected, qry->uid, true);
94 95 96 97
		return kr_ok();
	}
	return kr_error(ENOMEM);
}
98
static int begin_yield(kr_layer_t *ctx) { return kr_ok(); }
99 100 101
static int reset_yield(kr_layer_t *ctx) { return kr_ok(); }
static int finish_yield(kr_layer_t *ctx) { return kr_ok(); }
static int produce_yield(kr_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
102
static int checkout_yield(kr_layer_t *ctx, knot_pkt_t *packet, struct sockaddr *dst, int type) { return kr_ok(); }
103
static int answer_finalize_yield(kr_layer_t *ctx) { return kr_ok(); }
104

105
/** @internal Macro for iterating module layers. */
106 107 108 109
#define RESUME_LAYERS(from, r, qry, func, ...) \
    (r)->current_query = (qry); \
	for (size_t i = (from); i < (r)->ctx->modules->len; ++i) { \
		struct kr_module *mod = (r)->ctx->modules->at[i]; \
110
		if (mod->layer) { \
111
			struct kr_layer layer = {.state = (r)->state, .api = mod->layer, .req = (r)}; \
112
			if (layer.api && layer.api->func) { \
113 114
				(r)->state = layer.api->func(&layer, ##__VA_ARGS__); \
				if ((r)->state == KR_STATE_YIELD) { \
115
					func ## _yield(&layer, ##__VA_ARGS__); \
116
					break; \
117
				} \
118
			} \
119
		} \
Grigorii Demidov's avatar
Grigorii Demidov committed
120
	} /* Invalidate current query. */ \
121
	(r)->current_query = NULL
122

123 124 125 126
/** @internal Macro for starting module iteration. */
#define ITERATE_LAYERS(req, qry, func, ...) RESUME_LAYERS(0, req, qry, func, ##__VA_ARGS__)

/** @internal Find layer id matching API. */
127
static inline size_t layer_id(struct kr_request *req, const struct kr_layer_api *api) {
128 129
	module_array_t *modules = req->ctx->modules;
	for (size_t i = 0; i < modules->len; ++i) {
130
		if (modules->at[i]->layer == api) {
131 132 133 134 135 136
			return i;
		}
	}
	return 0; /* Not found, try all. */
}

137
/* @internal We don't need to deal with locale here */
138
KR_CONST static inline bool isletter(unsigned chr)
139 140
{ return (chr | 0x20 /* tolower */) - 'a' <= 'z' - 'a'; }

141 142 143 144
/* Randomize QNAME letter case.
 * This adds 32 bits of randomness at maximum, but that's more than an average domain name length.
 * https://tools.ietf.org/html/draft-vixie-dnsext-dns0x20-00
 */
145
static void randomized_qname_case(knot_dname_t * restrict qname, uint32_t secret)
146
{
147 148 149
	if (secret == 0) {
		return;
	}
150 151 152 153 154
	assert(qname);
	const int len = knot_dname_size(qname) - 2; /* Skip first, last label. */
	for (int i = 0; i < len; ++i) {
		if (isletter(*++qname)) {
				*qname ^= ((secret >> (i & 31)) & 1) * 0x20;
155 156 157 158
		}
	}
}

159
/** Invalidate current NS/addr pair. */
160
static int invalidate_ns(struct kr_rplan *rplan, struct kr_query *qry)
161
{
162
	if (qry->ns.addr[0].ip.sa_family != AF_UNSPEC) {
163
		const char *addr = kr_inaddr(&qry->ns.addr[0].ip);
164
		int addr_len = kr_inaddr_len(&qry->ns.addr[0].ip);
165 166 167 168 169 170
		int ret = kr_zonecut_del(&qry->zone_cut, qry->ns.name, addr, addr_len);
		/* Also remove it from the qry->ns.addr array.
		 * That's useful at least for STUB and FORWARD modes. */
		memmove(qry->ns.addr, qry->ns.addr + 1,
			sizeof(qry->ns.addr[0]) * (KR_NSREP_MAXADDR - 1));
		return ret;
171
	} else {
172
		return kr_zonecut_del_all(&qry->zone_cut, qry->ns.name);
173
	}
174
}
175

176 177 178
/** This turns of QNAME minimisation if there is a non-terminal between current zone cut, and name target.
 *  It save several minimization steps, as the zone cut is likely final one.
 */
179
static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct kr_cache *cache, uint32_t timestamp)
180
{
181
	// FIXME cleanup, etc.
Vladimír Čunát's avatar
.  
Vladimír Čunát committed
182
#if 0
183
	if (qry->flags.NO_MINIMIZE) {
184 185 186 187 188
		return;
	}

	const knot_dname_t *target = qry->sname;
	const knot_dname_t *cut_name = qry->zone_cut.name;
189 190 191
	if (!target || !cut_name)
		return;

192
	struct kr_cache_entry *entry = NULL;
193 194
	/* @note: The non-terminal must be direct child of zone cut (e.g. label distance <= 2),
	 *        otherwise this would risk leaking information to parent if the NODATA TTD > zone cut TTD. */
195 196
	int labels = knot_dname_labels(target, NULL) - knot_dname_labels(cut_name, NULL);
	while (target[0] && labels > 2) {
197 198
		target = knot_wire_next_label(target, NULL);
		--labels;
199
	}
200
	for (int i = 0; i < labels; ++i) {
201
		int ret = kr_cache_peek(cache, KR_CACHE_PKT, target, KNOT_RRTYPE_NS, &entry, &timestamp);
202 203
		if (ret == 0) { /* Either NXDOMAIN or NODATA, start here. */
			/* @todo We could stop resolution here for NXDOMAIN, but we can't because of broken CDNs */
204
			qry->flags.NO_MINIMIZE = true;
205
			kr_make_query(qry, pkt);
206
			break;
207
		}
208
		assert(target[0]);
209 210
		target = knot_wire_next_label(target, NULL);
	}
211
	kr_cache_commit(cache);
Vladimír Čunát's avatar
.  
Vladimír Čunát committed
212
#endif
213 214
}

215 216
static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name,
			struct kr_request *req, knot_pkt_t *pkt)
217
{
218 219
	/* It can occur that here parent query already have
	 * provably insecured zonecut which not in the cache yet. */
220 221 222 223 224 225 226
	struct kr_qflags pflags;
	if (qry->parent) {
		pflags = qry->parent->flags;
	}
	const bool is_insecured = qry->parent != NULL
		&& !(pflags.AWAIT_IPV4 || pflags.AWAIT_IPV6)
		&& (pflags.DNSSEC_INSECURE || pflags.DNSSEC_NODS);
227

228 229
	/* Want DNSSEC if it's possible to secure this name
	 * (e.g. is covered by any TA) */
230 231 232
	if (is_insecured) {
		/* If parent is unsecured we don't want DNSSEC
		 * even if cut name is covered by TA. */
233 234
		qry->flags.DNSSEC_WANT = false;
		qry->flags.DNSSEC_INSECURE = true;
Marek Vavruša's avatar
Marek Vavruša committed
235
		VERBOSE_MSG(qry, "=> going insecure because parent query is insecure\n");
236
	} else if (kr_ta_covers_qry(req->ctx, qry->zone_cut.name, KNOT_RRTYPE_NS)) {
237
		qry->flags.DNSSEC_WANT = true;
238
	} else {
239
		qry->flags.DNSSEC_WANT = false;
Marek Vavruša's avatar
Marek Vavruša committed
240
		VERBOSE_MSG(qry, "=> going insecure because there's no covering TA\n");
241
	}
242

243
	struct kr_zonecut cut_found;
244
	kr_zonecut_init(&cut_found, requested_name, req->rplan.pool);
245 246 247 248
	/* Cut that has been found can differs from cut that has been requested.
	 * So if not already insecured,
	 * try to fetch ta & keys even if initial cut name not covered by TA */
	bool secured = !is_insecured;
249
	int ret = kr_zonecut_find_cached(req->ctx, &cut_found, requested_name,
250
					 qry, &secured);
251 252 253
	if (ret == kr_error(ENOENT)) {
		/* No cached cut found, start from SBELT
		 * and issue priming query. */
254
		kr_zonecut_deinit(&cut_found);
255 256 257 258 259
		ret = kr_zonecut_set_sbelt(req->ctx, &qry->zone_cut);
		if (ret != 0) {
			return KR_STATE_FAIL;
		}
		VERBOSE_MSG(qry, "=> using root hints\n");
260
		qry->flags.AWAIT_CUT = false;
261 262
		return KR_STATE_DONE;
	} else if (ret != kr_ok()) {
263
		kr_zonecut_deinit(&cut_found);
264 265
		return KR_STATE_FAIL;
	}
266

267 268
	/* Find out security status.
	 * Go insecure if the zone cut is provably insecure */
269
	if ((qry->flags.DNSSEC_WANT) && !secured) {
270
		VERBOSE_MSG(qry, "=> NS is provably without DS, going insecure\n");
271 272
		qry->flags.DNSSEC_WANT = false;
		qry->flags.DNSSEC_INSECURE = true;
273 274 275
	}
	/* Zonecut name can change, check it again
	 * to prevent unnecessary DS & DNSKEY queries */
276
	if (!(qry->flags.DNSSEC_INSECURE) &&
277
	    kr_ta_covers_qry(req->ctx, cut_found.name, KNOT_RRTYPE_NS)) {
278
		qry->flags.DNSSEC_WANT = true;
279
	} else {
280
		qry->flags.DNSSEC_WANT = false;
281
	}
282
	/* Check if any DNSKEY found for cached cut */
283
	if (qry->flags.DNSSEC_WANT && cut_found.key == NULL &&
284
	    kr_zonecut_is_empty(&cut_found)) {
285 286 287 288
		/* Cut found and there are no proofs of zone insecurity.
		 * But no DNSKEY found and no glue fetched.
		 * We have got circular dependency - must fetch A\AAAA
		 * from authoritative, but we have no key to verify it. */
289
		kr_zonecut_deinit(&cut_found);
290 291 292 293 294 295 296 297 298 299
		if (requested_name[0] != '\0' ) {
			/* If not root - try next label */
			return KR_STATE_CONSUME;
		}
		/* No cached cut & keys found, start from SBELT */
		ret = kr_zonecut_set_sbelt(req->ctx, &qry->zone_cut);
		if (ret != 0) {
			return KR_STATE_FAIL;
		}
		VERBOSE_MSG(qry, "=> using root hints\n");
300
		qry->flags.AWAIT_CUT = false;
301
		return KR_STATE_DONE;
302
	}
303 304
	/* Use the found zone cut. */
	kr_zonecut_move(&qry->zone_cut, &cut_found);
305 306 307
	/* Check if there's a non-terminal between target and current cut. */
	struct kr_cache *cache = &req->ctx->cache;
	check_empty_nonterms(qry, pkt, cache, qry->timestamp.tv_sec);
308 309
	/* Cut found */
	return KR_STATE_PRODUCE;
310 311
}

Marek Vavruša's avatar
Marek Vavruša committed
312
static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param)
313
{
Marek Vavruša's avatar
Marek Vavruša committed
314
	struct kr_rplan *rplan = &param->rplan;
315 316
	struct kr_context *ctx = param->ctx;

317

318 319 320
	/* Start NS queries from root, to avoid certain cases
	 * where a NS drops out of cache and the rest is unavailable,
	 * this would lead to dependency loop in current zone cut.
321
	 * Prefer IPv6 and continue with IPv4 if not available.
322
	 */
323
	uint16_t next_type = 0;
324 325
	if (!(qry->flags.AWAIT_IPV6) &&
	    !(ctx->options.NO_IPV6)) {
326
		next_type = KNOT_RRTYPE_AAAA;
327 328 329
		qry->flags.AWAIT_IPV6 = true;
	} else if (!(qry->flags.AWAIT_IPV4) &&
		   !(ctx->options.NO_IPV4)) {
330
		next_type = KNOT_RRTYPE_A;
331
		qry->flags.AWAIT_IPV4 = true;
332
		/* Hmm, no useable IPv6 then. */
333 334
		qry->ns.reputation |= KR_NS_NOIP6;
		kr_nsrep_update_rep(&qry->ns, qry->ns.reputation, ctx->cache_rep);
335 336 337
	}
	/* Bail out if the query is already pending or dependency loop. */
	if (!next_type || kr_rplan_satisfies(qry->parent, qry->ns.name, KNOT_CLASS_IN, next_type)) {
338 339
		/* Fall back to SBELT if root server query fails. */
		if (!next_type && qry->zone_cut.name[0] == '\0') {
340
			VERBOSE_MSG(qry, "=> fallback to root hints\n");
341
			kr_zonecut_set_sbelt(ctx, &qry->zone_cut);
342
			qry->flags.NO_THROTTLE = true; /* Pick even bad SBELT servers */
343 344
			return kr_error(EAGAIN);
		}
345
		/* No IPv4 nor IPv6, flag server as unusable. */
346
		VERBOSE_MSG(qry, "=> unresolvable NS address, bailing out\n");
347 348
		qry->ns.reputation |= KR_NS_NOIP4 | KR_NS_NOIP6;
		kr_nsrep_update_rep(&qry->ns, qry->ns.reputation, ctx->cache_rep);
349
		invalidate_ns(rplan, qry);
350
		return kr_error(EHOSTUNREACH);
351
	}
352 353 354 355 356
	/* Push new query to the resolution plan */
	struct kr_query *next =
		kr_rplan_push(rplan, qry, qry->ns.name, KNOT_CLASS_IN, next_type);
	if (!next) {
		return kr_error(ENOMEM);
357
	}
358 359
	next->flags.NONAUTH = true;

360
	/* At the root level with no NS addresses, add SBELT subrequest. */
361 362
	int ret = 0;
	if (qry->zone_cut.name[0] == '\0') {
363 364 365 366
		ret = kr_zonecut_set_sbelt(ctx, &next->zone_cut);
		if (ret == 0) { /* Copy TA and key since it's the same cut to avoid lookup. */
			kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
			kr_zonecut_set_sbelt(ctx, &qry->zone_cut); /* Add SBELT to parent in case query fails. */
367
			qry->flags.NO_THROTTLE = true; /* Pick even bad SBELT servers */
368
		}
369
	} else {
370
		next->flags.AWAIT_CUT = true;
371 372
	}
	return ret;
373 374
}

375
static int edns_put(knot_pkt_t *pkt, bool reclaim)
376
{
377 378 379
	if (!pkt->opt_rr) {
		return kr_ok();
	}
380 381 382 383 384 385
	if (reclaim) {
		/* Reclaim reserved size. */
		int ret = knot_pkt_reclaim(pkt, knot_edns_wire_size(pkt->opt_rr));
		if (ret != 0) {
			return ret;
		}
386 387 388 389 390 391
	}
	/* Write to packet. */
	assert(pkt->current == KNOT_ADDITIONAL);
	return knot_pkt_put(pkt, KNOT_COMPR_HINT_NONE, pkt->opt_rr, KNOT_PF_FREE);
}

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
/** Removes last EDNS OPT RR written to the packet. */
static int edns_erase_and_reserve(knot_pkt_t *pkt)
{
	/* Nothing to be done. */
	if (!pkt || !pkt->opt_rr) {
		return 0;
	}

	/* Fail if the data are located elsewhere than at the end of packet. */
	if (pkt->current != KNOT_ADDITIONAL ||
	    pkt->opt_rr != &pkt->rr[pkt->rrset_count - 1]) {
		return -1;
	}

	size_t len = knot_rrset_size(pkt->opt_rr);
407
	int16_t rr_removed = pkt->opt_rr->rrs.count;
408 409 410 411 412 413 414 415 416 417 418 419
	/* Decrease rrset counters. */
	pkt->rrset_count -= 1;
	pkt->sections[pkt->current].count -= 1;
	pkt->size -= len;
	knot_wire_add_arcount(pkt->wire, -rr_removed); /* ADDITIONAL */

	pkt->opt_rr = NULL;

	/* Reserve the freed space. */
	return knot_pkt_reserve(pkt, len);
}

420
static int edns_create(knot_pkt_t *pkt, knot_pkt_t *template, struct kr_request *req)
421
{
422
	pkt->opt_rr = knot_rrset_copy(req->ctx->opt_rr, &pkt->mm);
423
	size_t wire_size = knot_edns_wire_size(pkt->opt_rr);
424
#if defined(ENABLE_COOKIES)
425 426
	if (req->ctx->cookie_ctx.clnt.enabled ||
	    req->ctx->cookie_ctx.srvr.enabled) {
427
		wire_size += KR_COOKIE_OPT_MAX_LEN;
428
	}
429
#endif /* defined(ENABLE_COOKIES) */
430
	if (req->qsource.flags.tls) {
431 432 433 434 435 436 437
		if (req->ctx->tls_padding == -1)
			/* FIXME: we do not know how to reserve space for the
			 * default padding policy, since we can't predict what
			 * it will select. So i'm just guessing :/ */
			wire_size += KNOT_EDNS_OPTION_HDRLEN + 512;
		if (req->ctx->tls_padding >= 2)
			wire_size += KNOT_EDNS_OPTION_HDRLEN + req->ctx->tls_padding;
438
	}
439
	return knot_pkt_reserve(pkt, wire_size);
440 441
}

442
static int answer_prepare(struct kr_request *req, knot_pkt_t *query)
443
{
444
	knot_pkt_t *answer = req->answer;
445 446 447 448
	if (knot_pkt_init_response(answer, query) != 0) {
		return kr_error(ENOMEM); /* Failed to initialize answer */
	}
	/* Handle EDNS in the query */
449
	if (knot_pkt_has_edns(query)) {
450 451 452
		answer->opt_rr = knot_rrset_copy(req->ctx->opt_rr, &answer->mm);
		if (answer->opt_rr == NULL){
			return kr_error(ENOMEM);
453
		}
454 455 456 457
		/* Set DO bit if set (DNSSEC requested). */
		if (knot_pkt_has_dnssec(query)) {
			knot_edns_set_do(answer->opt_rr);
		}
458 459 460 461
	}
	return kr_ok();
}

462
/** @return error code, ignoring if forced to truncate the packet. */
463
static int write_extra_records(const rr_array_t *arr, uint16_t reorder, knot_pkt_t *answer)
464 465
{
	for (size_t i = 0; i < arr->len; ++i) {
466
		int err = knot_pkt_put_rotate(answer, 0, arr->at[i], reorder, 0);
467 468 469
		if (err != KNOT_EOK) {
			return err == KNOT_ESPACE ? kr_ok() : kr_error(err);
		}
470
	}
471
	return kr_ok();
472 473
}

474
/**
475
 * @param all_secure optionally &&-combine security of written RRs into its value.
476
 *		     (i.e. if you pass a pointer to false, it will always remain)
477
 * @param all_cname optionally output if all written RRs are CNAMEs and RRSIGs of CNAMEs
478 479
 * @return error code, ignoring if forced to truncate the packet.
 */
480 481
static int write_extra_ranked_records(const ranked_rr_array_t *arr, uint16_t reorder,
				      knot_pkt_t *answer, bool *all_secure, bool *all_cname)
482
{
483
	const bool has_dnssec = knot_pkt_has_dnssec(answer);
484
	bool all_sec = true;
485
	bool all_cn = (all_cname != NULL); /* optim.: init as false if not needed */
486 487
	int err = kr_ok();

488 489 490 491 492 493
	for (size_t i = 0; i < arr->len; ++i) {
		ranked_rr_array_entry_t * entry = arr->at[i];
		if (!entry->to_wire) {
			continue;
		}
		knot_rrset_t *rr = entry->rr;
494
		if (!has_dnssec) {
495 496 497 498
			if (rr->type != knot_pkt_qtype(answer) && knot_rrtype_is_dnssec(rr->type)) {
				continue;
			}
		}
499
		err = knot_pkt_put_rotate(answer, 0, rr, reorder, 0);
500
		if (err != KNOT_EOK) {
501 502 503 504 505 506 507
			if (err == KNOT_ESPACE) {
				err = kr_ok();
			}
			break;
		}

		if (rr->type != KNOT_RRTYPE_RRSIG) {
508
			all_sec = all_sec && kr_rank_test(entry->rank, KR_RANK_SECURE);
509
		}
510
		all_cn = all_cn && kr_rrset_type_maysig(entry->rr) == KNOT_RRTYPE_CNAME;
511
	}
512 513 514 515

	if (all_secure) {
		*all_secure = *all_secure && all_sec;
	}
516 517 518
	if (all_cname) {
		*all_cname = all_cn;
	}
519
	return err;
520 521
}

522
/** @internal Add an EDNS padding RR into the answer if requested and required. */
523
static int answer_padding(struct kr_request *request)
524
{
525 526 527 528
	if (!request || !request->answer || !request->ctx) {
		assert(false);
		return kr_error(EINVAL);
	}
529
	int32_t padding = request->ctx->tls_padding;
530 531
	knot_pkt_t *answer = request->answer;
	knot_rrset_t *opt_rr = answer->opt_rr;
532 533 534
	int32_t pad_bytes = -1;

	if (padding == -1) { /* use the default padding policy from libknot */
535
		pad_bytes =  knot_pkt_default_padding_size(answer, opt_rr);
536 537 538 539
	}
	if (padding >= 2) {
		int32_t max_pad_bytes = knot_edns_get_payload(opt_rr) - (answer->size + knot_rrset_size(opt_rr));
		pad_bytes = MIN(knot_edns_alignment_size(answer->size, knot_rrset_size(opt_rr), padding),
540
				max_pad_bytes);
541
	}
542

543 544
	if (pad_bytes >= 0) {
		uint8_t zeros[MAX(1, pad_bytes)];
545 546 547 548 549
		memset(zeros, 0, sizeof(zeros));
		int r = knot_edns_add_option(opt_rr, KNOT_EDNS_OPTION_PADDING,
					     pad_bytes, zeros, &answer->mm);
		if (r != KNOT_EOK) {
			knot_rrset_clear(opt_rr, &answer->mm);
550
			return kr_error(r);
551 552
		}
	}
553
	return kr_ok();
554 555 556 557
}

static int answer_fail(struct kr_request *request)
{
558
	/* Note: OPT in SERVFAIL response is still useful for cookies/additional info. */
559
	knot_pkt_t *answer = request->answer;
560
	knot_rrset_t *opt_rr = answer->opt_rr; /* it gets NULLed below */
561 562 563 564
	int ret = kr_pkt_clear_payload(answer);
	knot_wire_clear_ad(answer->wire);
	knot_wire_clear_aa(answer->wire);
	knot_wire_set_rcode(answer->wire, KNOT_RCODE_SERVFAIL);
565
	if (ret == 0 && opt_rr) {
566
		knot_pkt_begin(answer, KNOT_ADDITIONAL);
567
		answer_padding(request); /* Ignore failed padding in SERVFAIL answer. */
568
		answer->opt_rr = opt_rr;
569
		ret = edns_put(answer, false);
570 571 572 573
	}
	return ret;
}

574
static int answer_finalize(struct kr_request *request, int state)
575
{
576
	struct kr_rplan *rplan = &request->rplan;
577
	knot_pkt_t *answer = request->answer;
578 579

	/* Always set SERVFAIL for bogus answers. */
580
	if ((state & KR_STATE_FAIL) && rplan->pending.len > 0) {
581
		struct kr_query *last = array_tail(rplan->pending);
582
		if ((last->flags.DNSSEC_WANT) && (last->flags.DNSSEC_BOGUS)) {
583
			return answer_fail(request);
584 585 586
		}
	}

587 588
	struct kr_query *last = rplan->resolved.len > 0 ? array_tail(rplan->resolved) : NULL;
		/* TODO  ^^^^ this is slightly fragile */
589 590 591 592

	/* AD flag.  We can only change `secure` from true to false.
	 * Be conservative.  Primary approach: check ranks of all RRs in wire.
	 * Only "negative answers" need special handling. */
593 594
	bool secure = last != NULL && state == KR_STATE_DONE /*< suspicious otherwise */
		&& knot_pkt_qtype(answer) != KNOT_RRTYPE_RRSIG;
595
	if (last && (last->flags.STUB)) {
596 597
		secure = false; /* don't trust forwarding for now */
	}
598
	if (last && (last->flags.DNSSEC_OPTOUT)) {
599
		VERBOSE_MSG(NULL, "AD: opt-out\n");
600 601 602
		secure = false; /* the last answer is insecure due to opt-out */
	}

603
	const uint16_t reorder = last ? last->reorder : 0;
604
	bool answ_all_cnames = false/*arbitrary*/;
605 606 607 608 609 610
	if (request->answ_selected.len > 0) {
		assert(answer->current <= KNOT_ANSWER);
		/* Write answer records. */
		if (answer->current < KNOT_ANSWER) {
			knot_pkt_begin(answer, KNOT_ANSWER);
		}
611 612
		if (write_extra_ranked_records(&request->answ_selected, reorder,
						answer, &secure, &answ_all_cnames))
613
		{
614 615
			return answer_fail(request);
		}
616 617
	}

618 619
	/* Write authority records. */
	if (answer->current < KNOT_AUTHORITY) {
620
		knot_pkt_begin(answer, KNOT_AUTHORITY);
621
	}
622 623
	if (write_extra_ranked_records(&request->auth_selected, reorder,
	    answer, &secure, NULL)) {
624 625
		return answer_fail(request);
	}
626
	/* Write additional records. */
627
	knot_pkt_begin(answer, KNOT_ADDITIONAL);
628
	if (write_extra_records(&request->additional, reorder, answer)) {
629 630
		return answer_fail(request);
	}
631
	/* Write EDNS information */
632
	if (answer->opt_rr) {
633
		if (request->qsource.flags.tls) {
634
			if (answer_padding(request) != kr_ok()) {
635 636 637
				return answer_fail(request);
			}
		}
638
		knot_pkt_begin(answer, KNOT_ADDITIONAL);
639 640 641 642 643
		int ret = knot_pkt_put(answer, KNOT_COMPR_HINT_NONE,
				       answer->opt_rr, KNOT_PF_FREE);
		if (ret != KNOT_EOK) {
			return answer_fail(request);
		}
644
	}
645

646
	if (!last) secure = false; /*< should be no-op, mostly documentation */
647
	/* AD: "negative answers" need more handling. */
648 649 650 651 652 653 654
	if (kr_response_classify(answer) != PKT_NOERROR
	    /* Additionally check for CNAME chains that "end in NODATA",
	     * as those would also be PKT_NOERROR. */
	    || (answ_all_cnames && knot_pkt_qtype(answer) != KNOT_RRTYPE_CNAME)) {

		secure = secure && last->flags.DNSSEC_WANT
			&& !last->flags.DNSSEC_BOGUS && !last->flags.DNSSEC_INSECURE;
655
	}
656

657
	if (secure) {
658 659
		struct kr_query *cname_parent = last->cname_parent;
		while (cname_parent != NULL) {
660
			if (cname_parent->flags.DNSSEC_OPTOUT) {
661
				secure = false;
662 663 664 665 666 667
				break;
			}
			cname_parent = cname_parent->cname_parent;
		}
	}

668 669
	/* No detailed analysis ATM, just _SECURE or not.
	 * LATER: request->rank might better be computed in validator's finish phase. */
Vladimír Čunát's avatar
Vladimír Čunát committed
670
	VERBOSE_MSG(last, "AD: request%s classified as SECURE\n", secure ? "" : " NOT");
671 672 673 674 675 676 677
	request->rank = secure ? KR_RANK_SECURE : KR_RANK_INITIAL;

	/* Clear AD if not secure.  ATM answer has AD=1 if requested secured answer. */
	if (!secure) {
		knot_wire_clear_ad(answer->wire);
	}

678
	return kr_ok();
679 680
}

681
static int query_finalize(struct kr_request *request, struct kr_query *qry, knot_pkt_t *pkt)
682 683
{
	knot_pkt_begin(pkt, KNOT_ADDITIONAL);
684 685 686 687 688 689 690 691 692 693 694 695
	if (qry->flags.SAFEMODE)
		return kr_ok();
	/* Remove any EDNS records from any previous iteration. */
	int ret = edns_erase_and_reserve(pkt);
	if (ret) return ret;
	ret = edns_create(pkt, request->answer, request);
	if (ret) return ret;
	if (qry->flags.STUB) {
		/* Stub resolution (ask for +rd and +do) */
		knot_wire_set_rd(pkt->wire);
		if (knot_pkt_has_dnssec(request->qsource.packet)) {
			knot_edns_set_do(pkt->opt_rr);
696
		}
697 698 699 700 701 702 703 704 705
		if (knot_wire_get_cd(request->qsource.packet->wire)) {
			knot_wire_set_cd(pkt->wire);
		}
	} else {
		/* Full resolution (ask for +cd and +do) */
		knot_edns_set_do(pkt->opt_rr);
		knot_wire_set_cd(pkt->wire);
		if (qry->flags.FORWARD) {
			knot_wire_set_rd(pkt->wire);
706 707
		}
	}
708
	return kr_ok();
709 710
}

Marek Vavruša's avatar
Marek Vavruša committed
711 712 713 714 715
int kr_resolve_begin(struct kr_request *request, struct kr_context *ctx, knot_pkt_t *answer)
{
	/* Initialize request */
	request->ctx = ctx;
	request->answer = answer;
716
	request->options = ctx->options;
717
	request->state = KR_STATE_CONSUME;
718
	request->current_query = NULL;
719
	array_init(request->additional);
720 721
	array_init(request->answ_selected);
	array_init(request->auth_selected);
Vladimír Čunát's avatar
.  
Vladimír Čunát committed
722
	array_init(request->add_selected);
723 724
	request->answ_validated = false;
	request->auth_validated = false;
725
	request->rank = KR_RANK_INITIAL;
726
	request->trace_log = NULL;
727
	request->trace_finish = NULL;
728

Marek Vavruša's avatar
Marek Vavruša committed
729
	/* Expect first query */
730
	kr_rplan_init(&request->rplan, request, &request->pool);
731
	return KR_STATE_CONSUME;
732 733
}

734
static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
735
{
Marek Vavruša's avatar
Marek Vavruša committed
736
	struct kr_rplan *rplan = &request->rplan;
737 738 739
	const knot_dname_t *qname = knot_pkt_qname(packet);
	uint16_t qclass = knot_pkt_qclass(packet);
	uint16_t qtype = knot_pkt_qtype(packet);
740
	struct kr_query *qry = NULL;
741 742
	struct kr_context *ctx = request->ctx;
	struct kr_cookie_ctx *cookie_ctx = ctx ? &ctx->cookie_ctx : NULL;
743 744 745

	if (qname != NULL) {
		qry = kr_rplan_push(rplan, NULL, qname, qclass, qtype);
Grigorii Demidov's avatar
Grigorii Demidov committed
746
	} else if (cookie_ctx && cookie_ctx->srvr.enabled &&
747
		   knot_wire_get_qdcount(packet->wire) == 0 &&
Grigorii Demidov's avatar
Grigorii Demidov committed
748
		   knot_pkt_has_edns(packet) &&
749
		   knot_pkt_edns_option(packet, KNOT_EDNS_OPTION_COOKIE)) {
750 751 752
		/* Plan empty query only for cookies. */
		qry = kr_rplan_push_empty(rplan, NULL);
	}
Marek Vavruša's avatar
Marek Vavruša committed
753
	if (!qry) {
754
		return KR_STATE_FAIL;
755
	}
Marek Vavruša's avatar
Marek Vavruša committed
756

757 758 759 760 761 762 763 764
	if (qname != NULL) {
		/* Deferred zone cut lookup for this query. */
		qry->flags.AWAIT_CUT = true;
		/* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */
		if ((knot_wire_get_ad(packet->wire) || knot_pkt_has_dnssec(packet)) &&
		    kr_ta_covers_qry(request->ctx, qname, qtype)) {
			qry->flags.DNSSEC_WANT = true;
		}
765
	}
766

767
	/* Initialize answer packet */
Marek Vavruša's avatar
Marek Vavruša committed
768 769 770 771 772
	knot_pkt_t *answer = request->answer;
	knot_wire_set_qr(answer->wire);
	knot_wire_clear_aa(answer->wire);
	knot_wire_set_ra(answer->wire);
	knot_wire_set_rcode(answer->wire, KNOT_RCODE_NOERROR);
Grigorii Demidov's avatar
Grigorii Demidov committed
773

774 775
	assert(request->qsource.packet);
	if (knot_wire_get_cd(request->qsource.packet->wire)) {
Grigorii Demidov's avatar
Grigorii Demidov committed
776
		knot_wire_set_cd(answer->wire);
777
	} else if (qry->flags.DNSSEC_WANT) {
778 779
		knot_wire_set_ad(answer->wire);
	}
Marek Vavruša's avatar
Marek Vavruša committed
780

781
	/* Expect answer, pop if satisfied immediately */
782
	ITERATE_LAYERS(request, qry, begin);
783
	if ((request->state & KR_STATE_DONE) != 0) {
784
		kr_rplan_pop(rplan, qry);
785 786 787 788 789
	} else if (qname == NULL) {
		/* it is an empty query which must be resolved by
		   `begin` layer of cookie module.
		   If query isn't resolved, fail. */
		request->state = KR_STATE_FAIL;
790
	}
791
	return request->state;
792 793
}

794 795 796 797 798 799 800 801 802 803
KR_PURE static bool kr_inaddr_equal(const struct sockaddr *a, const struct sockaddr *b)
{
	const int a_len = kr_inaddr_len(a);
	const int b_len = kr_inaddr_len(b);
	return a_len == b_len && memcmp(kr_inaddr(a), kr_inaddr(b), a_len) == 0;
}

static void update_nslist_rtt(struct kr_context *ctx, struct kr_query *qry, const struct sockaddr *src)
{
	/* Do not track in safe mode. */
804
	if (qry->flags.SAFEMODE) {
805 806 807 808
		return;
	}

	/* Calculate total resolution time from the time the query was generated. */
Vitezslav Kriz's avatar
Vitezslav Kriz committed
809 810
	uint64_t elapsed = kr_now() - qry->timestamp_mono;
	elapsed = elapsed > UINT_MAX ? UINT_MAX : elapsed;
811

812 813 814 815 816 817 818 819 820 821 822 823 824 825
	/* NSs in the preference list prior to the one who responded will be penalised
	 * with the RETRY timer interval. This is because we know they didn't respond
	 * for N retries, so their RTT must be at least N * RETRY.
	 * The NS in the preference list that responded will have RTT relative to the
	 * time when the query was sent out, not when it was originated.
	 */
	for (size_t i = 0; i < KR_NSREP_MAXADDR; ++i) {
		const struct sockaddr *addr = &qry->ns.addr[i].ip;
		if (addr->sa_family == AF_UNSPEC) {
			break;
		}
		/* If this address is the source of the answer, update its RTT */
		if (kr_inaddr_equal(src, addr)) {
			kr_nsrep_update_rtt(&qry->ns, addr, elapsed, ctx->cache_rtt, KR_NS_UPDATE);
826
			WITH_VERBOSE(qry) {
827 828
				char addr_str[INET6_ADDRSTRLEN];
				inet_ntop(addr->sa_family, kr_inaddr(addr), addr_str, sizeof(addr_str));
829 830
				VERBOSE_MSG(qry, "<= server: '%s' rtt: %"PRIu64" ms\n",
						addr_str, elapsed);
831 832 833 834 835 836 837 838
			}
		} else {
			/* Response didn't come from this IP, but we know the RTT must be at least
			 * several RETRY timer tries, e.g. if we have addresses [a, b, c] and we have
			 * tried [a, b] when the answer from 'a' came after 350ms, then we know
			 * that 'b' didn't respond for at least 350 - (1 * 300) ms. We can't say that
			 * its RTT is 50ms, but we can say that its score shouldn't be less than 50. */
			 kr_nsrep_update_rtt(&qry->ns, addr, elapsed, ctx->cache_rtt, KR_NS_MAX);
839
			 WITH_VERBOSE(qry) {
840 841
			 	char addr_str[INET6_ADDRSTRLEN];
			 	inet_ntop(addr->sa_family, kr_inaddr(addr), addr_str, sizeof(addr_str));
842 843
				VERBOSE_MSG(qry, "<= server: '%s' rtt: >= %"PRIu64" ms\n",
						addr_str, elapsed);
844 845 846 847 848 849 850 851 852 853 854 855 856
			 }
		}
		/* Subtract query start time from elapsed time */
		if (elapsed < KR_CONN_RETRY) {
			break;
		}
		elapsed = elapsed - KR_CONN_RETRY;
	}
}

static void update_nslist_score(struct kr_request *request, struct kr_query *qry, const struct sockaddr *src, knot_pkt_t *packet)
{
	struct kr_context *ctx = request->ctx;
857
	/* On successful answer, update preference list RTT and penalise timer  */
858
	if (!(request->state & KR_STATE_FAIL)) {
859 860 861 862 863
		/* Update RTT information for preference list */
		update_nslist_rtt(ctx, qry, src);
		/* Do not complete NS address resolution on soft-fail. */
		const int rcode = packet ? knot_wire_get_rcode(packet->wire) : 0;
		if (rcode != KNOT_RCODE_SERVFAIL && rcode != KNOT_RCODE_REFUSED) {
864 865
			qry->flags.AWAIT_IPV6 = false;
			qry->flags.AWAIT_IPV4 = false;
866 867 868 869 870 871
		} else { /* Penalize SERVFAILs. */
			kr_nsrep_update_rtt(&qry->ns, src, KR_NS_PENALTY, ctx->cache_rtt, KR_NS_ADD);
		}
	}
}

872
static bool resolution_time_exceeded(struct kr_query *qry, uint64_t now)
873
{
Vitezslav Kriz's avatar
Vitezslav Kriz committed
874
	uint64_t resolving_time = now - qry->creation_time_mono;
875
	if (resolving_time > KR_RESOLVE_TIME_LIMIT) {
876
		WITH_VERBOSE(qry) {
877 878
			VERBOSE_MSG(qry, "query resolution time limit exceeded\n");
		}
Vitezslav Kriz's avatar
Vitezslav Kriz committed
879
		return true;
880
	}
Vitezslav Kriz's avatar
Vitezslav Kriz committed
881
	return false;
882 883
}

884
int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, knot_pkt_t *packet)
885
{
Marek Vavruša's avatar
Marek Vavruša committed
886
	struct kr_rplan *rplan = &request->rplan;
887

Marek Vavruša's avatar
Marek Vavruša committed
888
	/* Empty resolution plan, push packet as the new query */
889
	if (packet && kr_rplan_empty(rplan)) {
890
		if (answer_prepare(request, packet) != 0) {
891
			return KR_STATE_FAIL;
892
		}
893
		return resolve_query(request, packet);
Marek Vavruša's avatar
Marek Vavruša committed
894 895 896
	}

	/* Different processing for network error */
897
	struct kr_query *qry = array_tail(rplan->pending);
898
	/* Check overall resolution time */
Vitezslav Kriz's avatar
Vitezslav Kriz committed
899
	if (resolution_time_exceeded(qry, kr_now())) {
900 901
		return KR_STATE_FAIL;
	}
902
	bool tried_tcp = (qry->flags.TCP);
903
	if (!packet || packet->size == 0) {
Ondřej Surý's avatar
Ondřej Surý committed
904
		if (tried_tcp) {
905
			request->state = KR_STATE_FAIL;
Ondřej Surý's avatar
Ondřej Surý committed
906
		} else {
907
			qry->flags.TCP = true;
Ondřej Surý's avatar
Ondřej Surý committed
908
		}
Marek Vavruša's avatar
Marek Vavruša committed
909
	} else {
910
		/* Packet cleared, derandomize QNAME. */
911
		knot_dname_t *qname_raw = knot_pkt_qname(packet);
912 913 914
		if (qname_raw && qry->secret != 0) {
			randomized_qname_case(qname_raw, qry->secret);
		}
915
		request->state = KR_STATE_CONSUME;
916
		if (qry->flags.CACHED) {
917 918 919
			ITERATE_LAYERS(request, qry, consume, packet);
		} else {
			/* Fill in source and latency information. */
Vitezslav Kriz's avatar
Vitezslav Kriz committed
920
			request->upstream.rtt = kr_now() - qry->timestamp_mono;
921 922 923 924 925 926
			request->upstream.addr = src;
			ITERATE_LAYERS(request, qry, consume, packet);
			/* Clear temporary information */
			request->upstream.addr = NULL;
			request->upstream.rtt = 0;
		}
927 928
	}

929
	/* Track RTT for iterative answers */
930
	if (src && !(qry->flags.CACHED)) {
931
		update_nslist_score(request, qry, src, packet);
932 933
	}
	/* Resolution failed, invalidate current NS. */
934
	if (request->state & KR_STATE_FAIL) {
935
		invalidate_ns(rplan, qry);
936
		qry->flags.RESOLVED = false;
937 938
	}

Marek Vavruša's avatar
Marek Vavruša committed
939
	/* Pop query if resolved. */
940 941
	if (request->state == KR_STATE_YIELD) {
		return KR_STATE_PRODUCE; /* Requery */
942
	} else if (qry->flags.RESOLVED) {
Marek Vavruša's avatar
Marek Vavruša committed
943
		kr_rplan_pop(rplan, qry);
944
	} else if (!tried_tcp && (qry->flags.TCP)) {
945
		return KR_STATE_PRODUCE; /* Requery over TCP */
946
	} else { /* Clear query flags for next attempt */
947
		qry->flags.CACHED = false;
948 949 950
		if (!request->options.TCP) {
			qry->flags.TCP = false;
		}
Marek Vavruša's avatar
Marek Vavruša committed
951 952
	}

953
	ITERATE_LAYERS(request, qry, reset);
954 955

	/* Do not finish with bogus answer. */
956
	if (qry->flags.DNSSEC_BOGUS)  {
957
		return KR_STATE_FAIL;
958
	}
959

960
	return kr_rplan_empty(&request->rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
961 962
}

963
/** @internal Spawn subrequest in current zone cut (no minimization or lookup). */
964
static struct kr_query *zone_cut_subreq(struct kr_rplan *rplan, struct kr_query *parent,
965
                           const knot_dname_t *qname, uint16_t qtype)
966
{
967 968
	struct kr_query *next = kr_rplan_push(rplan, parent, qname, parent->sclass, qtype);
	if (!next) {
969
		return NULL;
970
	}
971 972 973
	kr_zonecut_set(&next->zone_cut, parent->zone_cut.name);
	if (kr_zonecut_copy(&next->zone_cut, &parent->zone_cut) != 0 ||
	    kr_zonecut_copy_trust(&next->zone_cut, &parent->zone_cut) != 0) {
974
		return NULL;
Marek Vavruša's avatar
Marek Vavruša committed
975
	}
976 977 978
	next->flags.NO_MINIMIZE = true;
	if (parent->flags.DNSSEC_WANT) {
		next->flags.DNSSEC_WANT = true;
979
	}
980
	return next;
981
}
Marek Vavruša's avatar
Marek Vavruša committed
982

Grigorii Demidov's avatar
Grigorii Demidov committed
983
static int forward_trust_chain_check(struct kr_request *request, struct kr_query *qry, bool resume)
984 985 986 987 988
{
	struct kr_rplan *rplan = &request->rplan;
	map_t *trust_anchors = &request->ctx->trust_anchors;
	map_t *negative_anchors = &request->ctx->negative_anchors;

989
	if (qry->parent != NULL &&
990 991
	    !(qry->forward_flags.CNAME) &&
	    !(qry->flags.DNS64_MARK) &&
992
	    knot_dname_in_bailiwick(qry->zone_cut.name, qry->parent->zone_cut.name) >= 0) {
993 994 995
		return KR_STATE_PRODUCE;
	}

996
	assert(qry->flags.FORWARD);
997

998
	if (!trust_anchors) {
999
		qry->flags.AWAIT_CUT = false;
1000 1001 1002
		return KR_STATE_PRODUCE;
	}

1003 1004
	if (qry->flags.DNSSEC_INSECURE) {
		qry->flags.AWAIT_CUT = false;
1005 1006 1007
		return KR_STATE_PRODUCE;
	}

1008 1009
	if (qry->forward_flags.NO_MINIMIZE) {
		qry->flags.AWAIT_CUT = false;
1010 1011 1012
		return KR_STATE_PRODUCE;
	}

1013
	const knot_dname_t *start_name = qry->sname;
1014 1015
	if ((qry->flags.AWAIT_CUT) && !resume) {
		qry->flags.AWAIT_CUT = false;
1016 1017 1018 1019
		const knot_dname_t *longest_ta = kr_ta_get_longest_name(trust_anchors, qry->sname);
		if (longest_ta) {
			start_name = longest_ta;
			qry->zone_cut.name = knot_dname_copy(start_name, qry->zone_cut.pool);
1020
			qry->flags.DNSSEC_WANT = true;
1021
		} else {
1022
			qry->flags.DNSSEC_WANT = false;
1023
			return KR_STATE_PRODUCE;
1024
		}
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	}

	bool has_ta = (qry->zone_cut.trust_anchor != NULL);
	knot_dname_t *ta_name = (has_ta ? qry->zone_cut.trust_anchor->owner : NULL);
	bool refetch_ta = (!has_ta || !knot_dname_is_equal(qry->zone_cut.name, ta_name));
	bool is_dnskey_subreq = kr_rplan_satisfies(qry, ta_name, KNOT_CLASS_IN, KNOT_RRTYPE_DNSKEY);
	bool refetch_key = has_ta && (!qry->zone_cut.key || !knot_dname_is_equal(ta_name, qry->zone_cut.key->owner));
	if (refetch_key && !is_dnskey_subreq) {
		struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
		if (!next) {
			return KR_STATE_FAIL;
		}
		return KR_STATE_DONE;
1038 1039
	}

1040
	int name_offset = 1;
1041 1042
	const knot_dname_t *wanted_name;
	bool nods, ds_req, ns_req, minimized, ns_exist;
Grigorii Demidov's avatar
Grigorii Demidov committed
1043
	do {
1044
		wanted_name = start_name;
Grigorii Demidov's avatar
Grigorii Demidov committed
1045 1046
		ds_req = false;
		ns_req = false;
1047
		ns_exist = true;
Grigorii Demidov's avatar
Grigorii Demidov committed
1048

1049 1050 1051 1052 1053
		int cut_labels = knot_dname_labels(qry->zone_cut.name, NULL);
		int wanted_name_labels = knot_dname_labels(wanted_name, NULL);
		while (wanted_name[0] && wanted_name_labels > cut_labels + name_offset) {
			wanted_name = knot_wire_next_label(wanted_name, NULL);
			wanted_name_labels -= 1;
1054
		}
1055
		minimized = (wanted_name != qry->sname);
1056 1057 1058 1059 1060 1061 1062 1063 1064

		for (int i = 0; i < request->rplan.resolved.len; ++i) {
			struct kr_query *q = request->rplan.resolved.at[i];
			if (q->parent == qry &&
			    q->sclass == qry->sclass &&
			    (q->stype == KNOT_RRTYPE_DS || q->stype == KNOT_RRTYPE_NS) &&
			    knot_dname_is_equal(q->sname, wanted_name)) {
				if (q->stype == KNOT_RRTYPE_DS) {
					ds_req = true;
1065
					if (q->flags.CNAME) {
1066
						ns_exist = false;
1067
					} else if (!(q->flags.DNSSEC_OPTOUT)) {
1068 1069 1070 1071
						int ret = kr_dnssec_matches_name_and_type(&request->auth_selected, q->uid,
											  wanted_name, KNOT_RRTYPE_NS);
						ns_exist = (ret == kr_ok());
					}
1072
				} else {
Vladimír Čunát's avatar