iterate.c 19.7 KB
Newer Older
Marek Vavruša's avatar
Marek Vavruša committed
1
/*  Copyright (C) 2014 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
2

Marek Vavruša's avatar
Marek Vavruša committed
3 4 5 6
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.
7

Marek Vavruša's avatar
Marek Vavruša committed
8 9 10 11
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
12

Marek Vavruša's avatar
Marek Vavruša committed
13 14 15
    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
16

17
#include <sys/time.h>
18
#include <assert.h>
19

20 21
#include <libknot/descriptor.h>
#include <libknot/rrtype/rdname.h>
22
#include <libknot/rrtype/rrsig.h>
23 24

#include "lib/layer/iterate.h"
25
#include "lib/resolve.h"
26
#include "lib/rplan.h"
27
#include "lib/defines.h"
28
#include "lib/nsrep.h"
29
#include "lib/module.h"
30
#include "lib/dnssec/ta.h"
31

32
#define DEBUG_MSG(fmt...) QRDEBUG(req->current_query, "iter", fmt)
33

34
/* Iterator often walks through packet section, this is an abstraction. */
35
typedef int (*rr_callback_t)(const knot_rrset_t *, unsigned, struct kr_request *);
36

37
/** Return minimized QNAME/QTYPE for current zone cut. */
38
static const knot_dname_t *minimized_qname(struct kr_query *query, uint16_t *qtype)
39
{
40 41
	/* Minimization disabled. */
	const knot_dname_t *qname = query->sname;
42
	if (qname[0] == '\0' || query->flags & (QUERY_NO_MINIMIZE|QUERY_STUB)) {
43 44
		return qname;
	}
45

46 47 48
	/* Minimize name to contain current zone cut + 1 label. */
	int cut_labels = knot_dname_labels(query->zone_cut.name, NULL);
	int qname_labels = knot_dname_labels(qname, NULL);
Marek Vavruša's avatar
Marek Vavruša committed
49
	while(qname[0] && qname_labels > cut_labels + 1) {
50 51
		qname = knot_wire_next_label(qname, NULL);
		qname_labels -= 1;
52 53
	}

54 55 56
	/* Hide QTYPE if minimized. */
	if (qname != query->sname) {
		*qtype = KNOT_RRTYPE_NS;
57
	}
58

59 60 61
	return qname;
}

62
/** Answer is paired to query. */
63 64 65
static bool is_paired_to_query(const knot_pkt_t *answer, struct kr_query *query)
{
	uint16_t qtype = query->stype;
66
	const knot_dname_t *qname = minimized_qname(query, &qtype);
67

68
	return query->id      == knot_wire_get_id(answer->wire) &&
69
	       knot_wire_get_qdcount(answer->wire) > 0 &&
70
	       (query->sclass == KNOT_CLASS_ANY || query->sclass  == knot_pkt_qclass(answer)) &&
71 72
	       qtype          == knot_pkt_qtype(answer) &&
	       knot_dname_is_equal(qname, knot_pkt_qname(answer));
73 74
}

75
/** Relaxed rule for AA, either AA=1 or SOA matching zone cut is required. */
76 77 78 79 80 81 82 83 84
static bool is_authoritative(const knot_pkt_t *answer, struct kr_query *query)
{
	if (knot_wire_get_aa(answer->wire)) {
		return true;
	}

	const knot_pktsection_t *ns = knot_pkt_section(answer, KNOT_AUTHORITY);
	for (unsigned i = 0; i < ns->count; ++i) {
		const knot_rrset_t *rr = knot_pkt_rr(ns, i);
85
		if (rr->type == KNOT_RRTYPE_SOA && knot_dname_in(query->zone_cut.name, rr->owner)) {
86 87 88 89
			return true;
		}
	}

90 91 92 93 94 95
#ifndef STRICT_MODE
	/* Last resort to work around broken auths, if the zone cut is at/parent of the QNAME. */
	if (knot_dname_is_equal(query->zone_cut.name, knot_pkt_qname(answer))) {
		return true;
	}
#endif
96 97 98
	return false;
}

99
int kr_response_classify(knot_pkt_t *pkt)
100 101 102 103 104 105 106
{
	const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
	switch (knot_wire_get_rcode(pkt->wire)) {
	case KNOT_RCODE_NOERROR:
		return (an->count == 0) ? PKT_NODATA : PKT_NOERROR;
	case KNOT_RCODE_NXDOMAIN:
		return PKT_NXDOMAIN;
107 108
	case KNOT_RCODE_REFUSED:
		return PKT_REFUSED;
109 110 111 112 113
	default:
		return PKT_ERROR;
	}
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
/** @internal Filter ANY or loopback addresses. */
static bool is_valid_addr(const uint8_t *addr, size_t len)
{
	if (len == sizeof(struct in_addr)) {
		/* Filter ANY and 127.0.0.0/8 */
		uint32_t ip_host = ntohl(*(const uint32_t *)(addr));
		if (ip_host == 0 || (ip_host & 0xff000000) == 0x7f000000) {
			return false;
		}
	} else if (len == sizeof(struct in6_addr)) {
		struct in6_addr ip6_mask;
		memset(&ip6_mask, 0, sizeof(ip6_mask));
		/* All except last byte are zeroed, last byte defines ANY/::1 */
		if (memcmp(addr, ip6_mask.s6_addr, sizeof(ip6_mask.s6_addr) - 1) == 0) {
			return (addr[len - 1] > 1);
		}
	}
	return true;
}

134
static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
135 136
{
	if (rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA) {
137
		const knot_rdata_t *rdata = rr->rrs.data;
138 139 140 141
		if (!(query->flags & QUERY_ALLOW_LOCAL) &&
			!is_valid_addr(knot_rdata_data(rdata), knot_rdata_rdlen(rdata))) {
			return KNOT_STATE_CONSUME; /* Ignore invalid addresses */
		}
142 143 144
		int ret = kr_zonecut_add(&query->zone_cut, rr->owner, rdata);
		if (ret != 0) {
			return KNOT_STATE_FAIL;
145 146 147
		}
	}

148
	return KNOT_STATE_CONSUME;
149 150
}

151
static int update_parent(const knot_rrset_t *rr, struct kr_query *qry)
152
{
153
	return update_nsaddr(rr, qry->parent);
154 155
}

156
static int update_answer(const knot_rrset_t *rr, unsigned hint, knot_pkt_t *answer)
157
{
158
	/* Scrub DNSSEC records when not requested. */
159
	if (!knot_pkt_has_dnssec(answer)) {
160 161 162 163
		if (rr->type != knot_pkt_qtype(answer) && knot_rrtype_is_dnssec(rr->type)) {
			return KNOT_STATE_DONE; /* Scrub */
		}
	}
164 165 166 167
	/* Copy record, as it may be accessed after packet processing. */
	knot_rrset_t *copy = knot_rrset_copy(rr, &answer->mm);
	/* Write to final answer. */
	int ret = knot_pkt_put(answer, hint, copy, KNOT_PF_FREE);
168 169
	if (ret != KNOT_EOK) {
		knot_wire_set_tc(answer->wire);
170
		return KNOT_STATE_DONE;
171 172
	}

173
	return KNOT_STATE_DONE;
174 175
}

176
static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_request *req)
177
{
178
	bool used_glue = false;
179 180 181 182 183
	for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
		const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
		for (unsigned k = 0; k < sec->count; ++k) {
			const knot_rrset_t *rr = knot_pkt_rr(sec, k);
			if (knot_dname_is_equal(ns, rr->owner)) {
184 185
				(void) update_nsaddr(rr, req->current_query);
				used_glue = true;
186
			}
187 188
		}
	}
189 190 191 192 193 194 195
	WITH_DEBUG {
		char name_str[KNOT_DNAME_MAXLEN];
		knot_dname_to_str(name_str, ns, sizeof(name_str));
		if (used_glue) {
			DEBUG_MSG("<= using glue for '%s'\n", name_str);
		}
	}
196 197 198
}

/** Attempt to find glue for given nameserver name (best effort). */
199
static int has_glue(knot_pkt_t *pkt, const knot_dname_t *ns)
200
{
201 202 203 204 205 206 207 208
	for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
		const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
		for (unsigned k = 0; k < sec->count; ++k) {
			const knot_rrset_t *rr = knot_pkt_rr(sec, k);
			if (knot_dname_is_equal(ns, rr->owner) &&
			    (rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA)) {
				return 1;
			}
209 210 211
		}
	}
	return 0;
212 213
}

214
static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_request *req)
215
{
216
	struct kr_query *qry = req->current_query;
217
	struct kr_zonecut *cut = &qry->zone_cut;
218
	int state = KNOT_STATE_CONSUME;
219

220 221
	/* Authority MUST be at/below the authority of the nameserver, otherwise
	 * possible cache injection attempt. */
222
	if (!knot_dname_in(cut->name, rr->owner)) {
223 224
		DEBUG_MSG("<= authority: ns outside bailiwick\n");
#ifdef STRICT_MODE
225
		return KNOT_STATE_FAIL;
226 227 228 229 230 231 232 233
#else
		/* Workaround: ignore out-of-bailiwick NSs for authoritative answers,
		 * but fail for referrals. This is important to detect lame answers. */
		if (knot_pkt_section(pkt, KNOT_ANSWER)->count == 0) {
			state = KNOT_STATE_FAIL;
		}
		return state;
#endif
234
	}
235

236 237
	/* Remember current bailiwick for NS processing. */
	const knot_dname_t *current_cut = cut->name;
238 239
	/* Update zone cut name */
	if (!knot_dname_is_equal(rr->owner, cut->name)) {
240 241 242 243 244 245 246 247 248 249 250
		/* Remember parent cut and descend to new (keep keys and TA). */
		struct kr_zonecut *parent = mm_alloc(&req->pool, sizeof(*parent));
		if (parent) {
			memcpy(parent, cut, sizeof(*parent));
			kr_zonecut_init(cut, rr->owner, &req->pool);
			cut->key = parent->key;
			cut->trust_anchor = parent->trust_anchor;
			cut->parent = parent;
		} else {
			kr_zonecut_set(cut, rr->owner);
		}
251
		state = KNOT_STATE_DONE;
252 253
	}

254 255 256
	/* Fetch glue for each NS */
	for (unsigned i = 0; i < rr->rrs.rr_count; ++i) {
		const knot_dname_t *ns_name = knot_ns_name(&rr->rrs, i);
257
		int glue_records = has_glue(pkt, ns_name);
258
		/* Glue is mandatory for NS below zone */
259 260 261
		if (!glue_records && knot_dname_in(rr->owner, ns_name)) {
			DEBUG_MSG("<= authority: missing mandatory glue, rejecting\n");
			continue;
262
		}
263
		kr_zonecut_add(cut, ns_name, NULL);
264 265 266 267 268 269 270 271 272 273 274
		/* Choose when to use glue records. */
		if (qry->flags & QUERY_PERMISSIVE) {
			fetch_glue(pkt, ns_name, req);
		} else if (qry->flags & QUERY_STRICT) {
			/* Strict mode uses only mandatory glue. */
			if (knot_dname_in(cut->name, ns_name))
				fetch_glue(pkt, ns_name, req);
		} else {
			/* Normal mode uses in-bailiwick glue. */
			if (knot_dname_in(current_cut, ns_name))
				fetch_glue(pkt, ns_name, req);
275
		}
276 277
	}

278
	return state;
279 280
}

281
static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
282
{
283
	int result = KNOT_STATE_CONSUME;
284
	struct kr_query *qry = req->current_query;
285
	const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
286

287 288 289 290 291
	/* Stub resolution doesn't process authority */
	if (qry->flags & QUERY_STUB) {
		return KNOT_STATE_CONSUME;
	}

292
#ifdef STRICT_MODE
293 294
	/* AA, terminate resolution chain. */
	if (knot_wire_get_aa(pkt->wire)) {
295
		return KNOT_STATE_CONSUME;
296
	}
297
#else
298 299 300 301 302 303 304 305
	/* Work around servers sending back CNAME with different delegation and no AA. */
	const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
	if (an->count > 0 && ns->count > 0) {
		const knot_rrset_t *rr = knot_pkt_rr(an, 0);
		if (rr->type == KNOT_RRTYPE_CNAME) {
			return KNOT_STATE_CONSUME;
		}
	}
306
#endif
307

308
	/* Update zone cut information. */
309 310
	for (unsigned i = 0; i < ns->count; ++i) {
		const knot_rrset_t *rr = knot_pkt_rr(ns, i);
311
		if (rr->type == KNOT_RRTYPE_NS) {
312
			int state = update_cut(pkt, rr, req);
313 314 315 316
			switch(state) {
			case KNOT_STATE_DONE: result = state; break;
			case KNOT_STATE_FAIL: return state; break;
			default:              /* continue */ break;
317
			}
318
		} else if (rr->type == KNOT_RRTYPE_SOA && knot_dname_is_sub(rr->owner, qry->zone_cut.name)) {
319
			/* SOA below cut in authority indicates different authority, but same NS set. */
320
			qry->zone_cut.name = knot_dname_copy(rr->owner, &req->pool);
321
		}
322 323
	}

324
	/* CONSUME => Unhelpful referral.
325
	 * DONE    => Zone cut updated.  */
326
	return result;
327 328
}

329
static void finalize_answer(knot_pkt_t *pkt, struct kr_query *qry, struct kr_request *req)
330 331
{
	/* Finalize header */
332
	knot_pkt_t *answer = req->answer;
333 334
	knot_wire_set_rcode(answer->wire, knot_wire_get_rcode(pkt->wire));

335
	/* Fill in bailiwick records in authority */
336 337
	const bool scrub_dnssec = !knot_pkt_has_dnssec(answer);
	const uint16_t qtype = knot_pkt_qtype(answer);
338 339
	struct kr_zonecut *cut = &qry->zone_cut;
	int pkt_class = kr_response_classify(pkt);
340
	if ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA))) {
341 342 343
		const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
		for (unsigned i = 0; i < ns->count; ++i) {
			const knot_rrset_t *rr = knot_pkt_rr(ns, i);
344 345 346 347
			/* Scrub DNSSEC records when not requested. */
			if (scrub_dnssec && rr->type != qtype && knot_rrtype_is_dnssec(rr->type)) {
				continue;
			}
348
			/* Stash the authority records, they will be written to wire on answer finalization. */
349
			if (knot_dname_in(cut->name, rr->owner)) {
350
				kr_rrarray_add(&req->authority, rr, &answer->mm);
351 352 353 354 355
			}
		}
	}
}

356
static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
357
{
358
	struct kr_query *query = req->current_query;
359
	/* Response for minimized QNAME.
360 361
	 * NODATA   => may be empty non-terminal, retry (found zone cut)
	 * NOERROR  => found zone cut, retry
362
	 * NXDOMAIN => parent is zone cut, retry as a workaround for bad authoritatives
363
	 */
364
	bool is_final = (query->parent == NULL);
365
	int pkt_class = kr_response_classify(pkt);
366
	if (!knot_dname_is_equal(knot_pkt_qname(pkt), query->sname) &&
367
	    (pkt_class & (PKT_NOERROR|PKT_NXDOMAIN|PKT_REFUSED|PKT_NODATA))) {
368
		DEBUG_MSG("<= found cut, retrying with non-minimized name\n");
369
		query->flags |= QUERY_NO_MINIMIZE;
370
		return KNOT_STATE_CONSUME;
371 372
	}

373
	/* This answer didn't improve resolution chain, therefore must be authoritative (relaxed to negative). */
374
	if (!(query->flags & QUERY_STUB) && !is_authoritative(pkt, query)) {
375 376 377 378
		if (pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) {
			DEBUG_MSG("<= lame response: non-auth sent negative response\n");
			return KNOT_STATE_FAIL;
		}
379 380
	}

381 382
	/* Process answer type */
	const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
	const knot_dname_t *cname = NULL;
	const knot_dname_t *pending_cname = query->sname;
	unsigned cname_chain_len = 0;
	while (pending_cname) {
		/* CNAME was found at previous iteration, but records may not follow the correct order.
		 * Try to find records for pending_cname owner from section start. */
		cname = pending_cname;
		pending_cname = NULL;
		for (unsigned i = 0; i < an->count; ++i) {
			const knot_rrset_t *rr = knot_pkt_rr(an, i);
			if (!knot_dname_is_equal(rr->owner, cname)) {
				continue;
			}
			/* Process records matching current SNAME */
			unsigned hint = 0;
			if(knot_dname_is_equal(cname, knot_pkt_qname(req->answer))) {
				hint = KNOT_COMPR_HINT_QNAME;
			}
			int state = is_final ? update_answer(rr, hint, req->answer) : update_parent(rr, query);
			if (state == KNOT_STATE_FAIL) {
				return state;
			}
			/* Jump to next CNAME target */
			if ((query->stype == KNOT_RRTYPE_CNAME) || (rr->type != KNOT_RRTYPE_CNAME)) {
				continue;
			}
			cname_chain_len += 1;
			pending_cname = knot_cname_name(&rr->rrs);
			if (!pending_cname) {
				break;
			}
			if (cname_chain_len > an->count || cname_chain_len > KR_CNAME_CHAIN_LIMIT) {
				DEBUG_MSG("<= too long cname chain\n");
				return KNOT_STATE_FAIL;
			}
			/* If secure, don't use pending_cname immediately.
			 * There are can be RRSIG for "old" cname.
			 */
			if (query->flags & QUERY_DNSSEC_WANT) {
				/* Follow chain only within current cut (if secure). */
				if (pending_cname && !knot_dname_in(query->zone_cut.name, pending_cname)) {
					pending_cname = NULL;
				}
			} else {
				/* Try to find next cname */
				cname = pending_cname;
429
			}
430
		}
431 432
	}

433
	/* Make sure that this is an authoritative answer (even with AA=0) for other layers */
434
	knot_wire_set_aa(pkt->wire);
435 436
	/* Either way it resolves current query. */
	query->flags |= QUERY_RESOLVED;
437
	/* Follow canonical name as next SNAME. */
438
	if (!knot_dname_is_equal(cname, query->sname)) {
439 440 441 442 443 444 445 446 447 448 449
		/* Check if target record has been already copied */
		if (is_final) {
			const knot_pktsection_t *an = knot_pkt_section(req->answer, KNOT_ANSWER);
			for (unsigned i = 0; i < an->count; ++i) {
				const knot_rrset_t *rr = knot_pkt_rr(an, i);
				if (!knot_dname_is_equal(rr->owner, cname)) {
					continue;
				}
				if ((rr->rclass != query->sclass) ||
				    (rr->type != query->stype)) {
					continue;
450
				}
451 452 453 454 455 456 457 458 459 460 461 462 463
				finalize_answer(pkt, query, req);
				return KNOT_STATE_DONE;
			}
		}
		DEBUG_MSG("<= cname chain, following\n");
		/* Check if the same query was already resolved */
		for (int i = 0; i < req->rplan.resolved.len; ++i) {
			struct kr_query * q = req->rplan.resolved.at[i];
			if (q->sclass == query->sclass &&
			    q->stype == query->stype   &&
			    knot_dname_is_equal(q->sname, cname)) {
				DEBUG_MSG("<= cname chain loop\n");
				return KNOT_STATE_FAIL;
464 465
			}
		}
466
		struct kr_query *next = kr_rplan_push(&req->rplan, query->parent, cname, query->sclass, query->stype);
467 468 469 470
		if (!next) {
			return KNOT_STATE_FAIL;
		}
		next->flags |= QUERY_AWAIT_CUT;
471 472 473 474 475
		/* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */
		if (kr_ta_covers(&req->ctx->trust_anchors, cname) &&
		    !kr_ta_covers(&req->ctx->negative_anchors, cname)) {
			next->flags |= QUERY_DNSSEC_WANT;
		}
476 477
	} else if (!query->parent) {
		finalize_answer(pkt, query, req);
478
	}
479
	return KNOT_STATE_DONE;
480 481
}

482
/** Error handling, RFC1034 5.3.3, 4d. */
483
static int resolve_error(knot_pkt_t *pkt, struct kr_request *req)
484
{
485
	return KNOT_STATE_FAIL;
486 487
}

488
/* State-less single resolution iteration step, not needed. */
489
static int reset(knot_layer_t *ctx)  { return KNOT_STATE_PRODUCE; }
490 491

/* Set resolution context and parameters. */
492
static int begin(knot_layer_t *ctx, void *module_param)
493
{
494 495 496
	if (ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
		return ctx->state;
	}
497 498 499
	return reset(ctx);
}

500
int kr_make_query(struct kr_query *query, knot_pkt_t *pkt)
501
{
502
	/* Minimize QNAME (if possible). */
503 504
	uint16_t qtype = query->stype;
	const knot_dname_t *qname = minimized_qname(query, &qtype);
505

506
	/* Form a query for the authoritative. */
507
	knot_pkt_clear(pkt);
508
	int ret = knot_pkt_put_question(pkt, qname, query->sclass, qtype);
509
	if (ret != KNOT_EOK) {
510
		return ret;
511 512
	}

513
	/* Query built, expect answer. */
514
	query->id = kr_rand_uint(UINT16_MAX);
515
	knot_wire_set_id(pkt->wire, query->id);
516
	pkt->parsed = pkt->size;
517 518 519 520 521 522 523
	return kr_ok();
}

static int prepare_query(knot_layer_t *ctx, knot_pkt_t *pkt)
{
	assert(pkt && ctx);
	struct kr_request *req = ctx->data;
524
	struct kr_query *query = req->current_query;
525 526 527 528 529 530 531 532 533 534
	if (!query || ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
		return ctx->state;
	}

	/* Make query */
	int ret = kr_make_query(query, pkt);
	if (ret != 0) {
		return KNOT_STATE_FAIL;
	}

535
	return KNOT_STATE_CONSUME;
536 537
}

538 539
static int resolve_badmsg(knot_pkt_t *pkt, struct kr_request *req, struct kr_query *query)
{
540
#ifndef STRICT_MODE
541 542 543 544 545 546 547
	/* Work around broken auths/load balancers */
	if (query->flags & QUERY_SAFEMODE) {
		return resolve_error(pkt, req);
	} else {
		query->flags |= QUERY_SAFEMODE;
		return KNOT_STATE_DONE;
	}
548 549 550
#else
		return resolve_error(pkt, req);
#endif
551 552
}

553
/** Resolve input query or continue resolution with followups.
554 555 556
 *
 *  This roughly corresponds to RFC1034, 5.3.3 4a-d.
 */
557
static int resolve(knot_layer_t *ctx, knot_pkt_t *pkt)
558 559
{
	assert(pkt && ctx);
560
	struct kr_request *req = ctx->data;
561
	struct kr_query *query = req->current_query;
562
	if (!query || (query->flags & QUERY_RESOLVED)) {
563 564
		return ctx->state;
	}
565

566 567 568 569
	/* Check for packet processing errors first.
	 * Note - we *MUST* check if it has at least a QUESTION,
	 * otherwise it would crash on accessing QNAME. */
	if (pkt->parsed < pkt->size || pkt->parsed <= KNOT_WIRE_HEADER_SIZE) {
570
		DEBUG_MSG("<= malformed response\n");
571
		return resolve_badmsg(pkt, req, query);
572
	} else if (!is_paired_to_query(pkt, query)) {
573
		DEBUG_MSG("<= ignoring mismatching response\n");
574 575 576 577
		/* Force TCP, to work around authoritatives messing up question
		 * without yielding to spoofed responses. */
		query->flags |= QUERY_TCP;
		return resolve_badmsg(pkt, req, query);
578
	} else if (knot_wire_get_tc(pkt->wire)) {
579
		DEBUG_MSG("<= truncated response, failover to TCP\n");
580
		if (query) {
581
			/* Fail if already on TCP. */
582
			if (query->flags & QUERY_TCP) {
583
				DEBUG_MSG("<= TC=1 with TCP, bailing out\n");
584
				return resolve_error(pkt, req);
585
			}
586
			query->flags |= QUERY_TCP;
587
		}
588
		return KNOT_STATE_CONSUME;
589
	}
590

591
#ifndef NDEBUG
592
	const knot_lookup_t *rcode = knot_lookup_by_id(knot_rcode_names, knot_wire_get_rcode(pkt->wire));
593
#endif
594 595

	/* Check response code. */
596 597 598
	switch(knot_wire_get_rcode(pkt->wire)) {
	case KNOT_RCODE_NOERROR:
	case KNOT_RCODE_NXDOMAIN:
599
	case KNOT_RCODE_REFUSED:
600
		break; /* OK */
601 602 603 604
	case KNOT_RCODE_FORMERR:
	case KNOT_RCODE_NOTIMPL:
		DEBUG_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
		return resolve_badmsg(pkt, req, query);
605
	default:
606
		DEBUG_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
607
		return resolve_error(pkt, req);
608 609
	}

610
	/* Resolve authority to see if it's referral or authoritative. */
611
	int state = process_authority(pkt, req);
612
	switch(state) {
613
	case KNOT_STATE_CONSUME: /* Not referral, process answer. */
614
		DEBUG_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
615
		state = process_answer(pkt, req);
616
		break;
617
	case KNOT_STATE_DONE: /* Referral */
618
		DEBUG_MSG("<= referral response, follow\n");
619 620 621
		break;
	default:
		break;
622
	}
623

624
	return state;
625 626
}

627
/** Module implementation. */
628
const knot_layer_api_t *iterate_layer(struct kr_module *module)
629
{
630 631 632
	static const knot_layer_api_t _layer = {
		.begin = &begin,
		.reset = &reset,
633 634
		.consume = &resolve,
		.produce = &prepare_query
635 636
	};
	return &_layer;
637 638
}

639
KR_MODULE_EXPORT(iterate)
640 641

#undef DEBUG_MSG