iterate.c 18.7 KB
Newer Older
Marek Vavruša's avatar
Marek Vavruša committed
1
/*  Copyright (C) 2014 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
2

Marek Vavruša's avatar
Marek Vavruša committed
3 4 5 6
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.
7

Marek Vavruša's avatar
Marek Vavruša committed
8 9 10 11
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
12

Marek Vavruša's avatar
Marek Vavruša committed
13 14 15
    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
16

17 18
#include <sys/time.h>

19 20
#include <libknot/descriptor.h>
#include <libknot/rrtype/rdname.h>
21
#include <libknot/rrtype/rrsig.h>
22 23

#include "lib/layer/iterate.h"
Marek Vavruša's avatar
Marek Vavruša committed
24
#include "lib/resolve.h"
25
#include "lib/rplan.h"
26
#include "lib/defines.h"
27
#include "lib/nsrep.h"
28
#include "lib/module.h"
29
#include "lib/dnssec/ta.h"
30

31
#define DEBUG_MSG(fmt...) QRDEBUG(req->current_query, "iter", fmt)
32

33
/* Iterator often walks through packet section, this is an abstraction. */
34
typedef int (*rr_callback_t)(const knot_rrset_t *, unsigned, struct kr_request *);
35

Marek Vavruša's avatar
Marek Vavruša committed
36
/** Return minimized QNAME/QTYPE for current zone cut. */
37
static const knot_dname_t *minimized_qname(struct kr_query *query, uint16_t *qtype)
38
{
39 40
	/* Minimization disabled. */
	const knot_dname_t *qname = query->sname;
41
	if (qname[0] == '\0' || query->flags & QUERY_NO_MINIMIZE) {
42 43
		return qname;
	}
Marek Vavruša's avatar
Marek Vavruša committed
44

45 46 47
	/* Minimize name to contain current zone cut + 1 label. */
	int cut_labels = knot_dname_labels(query->zone_cut.name, NULL);
	int qname_labels = knot_dname_labels(qname, NULL);
Marek Vavruša's avatar
Marek Vavruša committed
48
	while(qname[0] && qname_labels > cut_labels + 1) {
49 50
		qname = knot_wire_next_label(qname, NULL);
		qname_labels -= 1;
51 52
	}

53 54 55
	/* Hide QTYPE if minimized. */
	if (qname != query->sname) {
		*qtype = KNOT_RRTYPE_NS;
56
	}
57

58 59 60
	return qname;
}

Marek Vavruša's avatar
Marek Vavruša committed
61
/** Answer is paired to query. */
62 63 64
static bool is_paired_to_query(const knot_pkt_t *answer, struct kr_query *query)
{
	uint16_t qtype = query->stype;
65
	const knot_dname_t *qname = minimized_qname(query, &qtype);
66

67
	return query->id      == knot_wire_get_id(answer->wire) &&
68
	       knot_wire_get_qdcount(answer->wire) > 0 &&
69
	       (query->sclass == KNOT_CLASS_ANY || query->sclass  == knot_pkt_qclass(answer)) &&
70 71
	       qtype          == knot_pkt_qtype(answer) &&
	       knot_dname_is_equal(qname, knot_pkt_qname(answer));
72 73
}

Marek Vavruša's avatar
Marek Vavruša committed
74
/** Relaxed rule for AA, either AA=1 or SOA matching zone cut is required. */
75 76 77 78 79 80 81 82 83
static bool is_authoritative(const knot_pkt_t *answer, struct kr_query *query)
{
	if (knot_wire_get_aa(answer->wire)) {
		return true;
	}

	const knot_pktsection_t *ns = knot_pkt_section(answer, KNOT_AUTHORITY);
	for (unsigned i = 0; i < ns->count; ++i) {
		const knot_rrset_t *rr = knot_pkt_rr(ns, i);
84
		if (rr->type == KNOT_RRTYPE_SOA && knot_dname_in(query->zone_cut.name, rr->owner)) {
85 86 87 88
			return true;
		}
	}

89 90 91 92 93 94
#ifndef STRICT_MODE
	/* Last resort to work around broken auths, if the zone cut is at/parent of the QNAME. */
	if (knot_dname_is_equal(query->zone_cut.name, knot_pkt_qname(answer))) {
		return true;
	}
#endif
95 96 97
	return false;
}

98
int kr_response_classify(knot_pkt_t *pkt)
99 100 101 102 103 104 105
{
	const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
	switch (knot_wire_get_rcode(pkt->wire)) {
	case KNOT_RCODE_NOERROR:
		return (an->count == 0) ? PKT_NODATA : PKT_NOERROR;
	case KNOT_RCODE_NXDOMAIN:
		return PKT_NXDOMAIN;
106 107
	case KNOT_RCODE_REFUSED:
		return PKT_REFUSED;
108 109 110 111 112
	default:
		return PKT_ERROR;
	}
}

113
static void follow_cname_chain(const knot_dname_t **cname, const knot_rrset_t *rr,
Marek Vavruša's avatar
Marek Vavruša committed
114
                               struct kr_query *cur)
115
{
116 117
	if (rr->type == KNOT_RRTYPE_CNAME) {
		*cname = knot_cname_name(&rr->rrs);
118 119
	} else if (rr->type != KNOT_RRTYPE_RRSIG) {
		/* Terminate CNAME chain (if not RRSIG). */
120
		*cname = cur->sname;
121 122 123
	}
}

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
/** @internal Filter ANY or loopback addresses. */
static bool is_valid_addr(const uint8_t *addr, size_t len)
{
	if (len == sizeof(struct in_addr)) {
		/* Filter ANY and 127.0.0.0/8 */
		uint32_t ip_host = ntohl(*(const uint32_t *)(addr));
		if (ip_host == 0 || (ip_host & 0xff000000) == 0x7f000000) {
			return false;
		}
	} else if (len == sizeof(struct in6_addr)) {
		struct in6_addr ip6_mask;
		memset(&ip6_mask, 0, sizeof(ip6_mask));
		/* All except last byte are zeroed, last byte defines ANY/::1 */
		if (memcmp(addr, ip6_mask.s6_addr, sizeof(ip6_mask.s6_addr) - 1) == 0) {
			return (addr[len - 1] > 1);
		}
	}
	return true;
}

144
static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
145 146
{
	if (rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA) {
Marek Vavruša's avatar
Marek Vavruša committed
147
		const knot_rdata_t *rdata = rr->rrs.data;
148 149 150 151
		if (!(query->flags & QUERY_ALLOW_LOCAL) &&
			!is_valid_addr(knot_rdata_data(rdata), knot_rdata_rdlen(rdata))) {
			return KNOT_STATE_CONSUME; /* Ignore invalid addresses */
		}
152 153 154
		int ret = kr_zonecut_add(&query->zone_cut, rr->owner, rdata);
		if (ret != 0) {
			return KNOT_STATE_FAIL;
155 156 157
		}
	}

158
	return KNOT_STATE_CONSUME;
159 160
}

161
static int update_parent(const knot_rrset_t *rr, struct kr_query *qry)
162
{
163
	return update_nsaddr(rr, qry->parent);
164 165
}

166
static int update_answer(const knot_rrset_t *rr, unsigned hint, knot_pkt_t *answer)
167
{
168
	/* Scrub DNSSEC records when not requested. */
169
	if (!knot_pkt_has_dnssec(answer)) {
170 171 172 173 174
		if (rr->type != knot_pkt_qtype(answer) && knot_rrtype_is_dnssec(rr->type)) {
			return KNOT_STATE_DONE; /* Scrub */
		}
	}

175
	int ret = knot_pkt_put(answer, hint, rr, 0);
Marek Vavruša's avatar
Marek Vavruša committed
176 177
	if (ret != KNOT_EOK) {
		knot_wire_set_tc(answer->wire);
178
		return KNOT_STATE_DONE;
179 180
	}

181
	return KNOT_STATE_DONE;
182 183
}

184
static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_query *qry)
185
{
186 187 188 189 190 191 192
	for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
		const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
		for (unsigned k = 0; k < sec->count; ++k) {
			const knot_rrset_t *rr = knot_pkt_rr(sec, k);
			if (knot_dname_is_equal(ns, rr->owner)) {
				(void) update_nsaddr(rr, qry);
			}
193 194
		}
	}
195 196 197
}

/** Attempt to find glue for given nameserver name (best effort). */
198
static int has_glue(knot_pkt_t *pkt, const knot_dname_t *ns)
199
{
200 201 202 203 204 205 206 207
	for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
		const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
		for (unsigned k = 0; k < sec->count; ++k) {
			const knot_rrset_t *rr = knot_pkt_rr(sec, k);
			if (knot_dname_is_equal(ns, rr->owner) &&
			    (rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA)) {
				return 1;
			}
208 209 210
		}
	}
	return 0;
211 212
}

213
static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_request *req)
214
{
215
	struct kr_query *qry = req->current_query;
216
	struct kr_zonecut *cut = &qry->zone_cut;
217
	int state = KNOT_STATE_CONSUME;
218

219 220
	/* Authority MUST be at/below the authority of the nameserver, otherwise
	 * possible cache injection attempt. */
221
	if (!knot_dname_in(cut->name, rr->owner)) {
222 223
		DEBUG_MSG("<= authority: ns outside bailiwick, ignoring\n");
		return state;
224
	}
225

226 227
	/* Update zone cut name */
	if (!knot_dname_is_equal(rr->owner, cut->name)) {
228 229 230 231 232 233 234 235 236 237 238
		/* Remember parent cut and descend to new (keep keys and TA). */
		struct kr_zonecut *parent = mm_alloc(&req->pool, sizeof(*parent));
		if (parent) {
			memcpy(parent, cut, sizeof(*parent));
			kr_zonecut_init(cut, rr->owner, &req->pool);
			cut->key = parent->key;
			cut->trust_anchor = parent->trust_anchor;
			cut->parent = parent;
		} else {
			kr_zonecut_set(cut, rr->owner);
		}
239
		state = KNOT_STATE_DONE;
240 241
	}

242 243 244
	/* Fetch glue for each NS */
	for (unsigned i = 0; i < rr->rrs.rr_count; ++i) {
		const knot_dname_t *ns_name = knot_ns_name(&rr->rrs, i);
245
		int glue_records = has_glue(pkt, ns_name);
246
		/* Glue is mandatory for NS below zone */
247 248 249
		if (!glue_records && knot_dname_in(rr->owner, ns_name)) {
			DEBUG_MSG("<= authority: missing mandatory glue, rejecting\n");
			continue;
250
		}
251
		kr_zonecut_add(cut, ns_name, NULL);
252
		fetch_glue(pkt, ns_name, qry);
253 254
	}

255
	return state;
256 257
}

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
static const knot_dname_t *signature_authority(knot_pkt_t *pkt)
{
	/* Can't find signer for RRSIGs, bail out. */
	if (knot_pkt_qtype(pkt) == KNOT_RRTYPE_RRSIG) {
		return NULL;
	}
	for (knot_section_t i = KNOT_ANSWER; i <= KNOT_AUTHORITY; ++i) {
		const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
		for (unsigned k = 0; k < sec->count; ++k) {
			const knot_rrset_t *rr = knot_pkt_rr(sec, k);
			if (rr->type == KNOT_RRTYPE_RRSIG) {
				return knot_rrsig_signer_name(&rr->rrs, 0);
			}
		}
	}
	return NULL;
}

276
static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
277
{
278
	int result = KNOT_STATE_CONSUME;
279
	struct kr_query *qry = req->current_query;
280
	const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
281

282
#ifdef STRICT_MODE
283 284
	/* AA, terminate resolution chain. */
	if (knot_wire_get_aa(pkt->wire)) {
285
		return KNOT_STATE_CONSUME;
286
	}
287
#else
288 289 290 291 292 293 294 295
	/* Work around servers sending back CNAME with different delegation and no AA. */
	const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
	if (an->count > 0 && ns->count > 0) {
		const knot_rrset_t *rr = knot_pkt_rr(an, 0);
		if (rr->type == KNOT_RRTYPE_CNAME) {
			return KNOT_STATE_CONSUME;
		}
	}
296
#endif
297

298
	/* Update zone cut information. */
Marek Vavruša's avatar
Marek Vavruša committed
299 300
	for (unsigned i = 0; i < ns->count; ++i) {
		const knot_rrset_t *rr = knot_pkt_rr(ns, i);
301
		if (rr->type == KNOT_RRTYPE_NS) {
302
			int state = update_cut(pkt, rr, req);
303 304 305 306
			switch(state) {
			case KNOT_STATE_DONE: result = state; break;
			case KNOT_STATE_FAIL: return state; break;
			default:              /* continue */ break;
307
			}
308
		} else if (rr->type == KNOT_RRTYPE_SOA && knot_dname_is_sub(rr->owner, qry->zone_cut.name)) {
309
			/* SOA below cut in authority indicates different authority, but same NS set. */
310
			qry->zone_cut.name = knot_dname_copy(rr->owner, &req->pool);
311
		}
312 313
	}

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	/* Track difference between current TA and signer name.
	 * This indicates that the NS is auth for both parent-child, and we must update DS/DNSKEY to validate it.
	 * @todo: This has to be checked here before we put the data into packet, there is no "DEFER" or "PAUSE" action yet.
	 */
	const bool track_pc_change = (!(qry->flags & QUERY_CACHED) && (qry->flags & QUERY_DNSSEC_WANT));
	const knot_dname_t *ta_name = qry->zone_cut.trust_anchor ? qry->zone_cut.trust_anchor->owner : NULL;
	const knot_dname_t *signer = signature_authority(pkt);
	if (track_pc_change && ta_name && signer && !knot_dname_is_equal(ta_name, signer)) {
		DEBUG_MSG(">< cut changed, needs revalidation\n");
		if (knot_dname_is_sub(signer, qry->zone_cut.name)) {
			qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
		} else if (!knot_dname_is_equal(signer, qry->zone_cut.name)) {
			/* Key signer is above the current cut, so we can't validate it. This happens when
			   a server is authoritative for both grandparent, parent and child zone.
			   Ascend to parent cut, and refetch authority for signer. */
			if (qry->zone_cut.parent) {
				memcpy(&qry->zone_cut, qry->zone_cut.parent, sizeof(qry->zone_cut));
			} else {
				qry->flags |= QUERY_AWAIT_CUT;
			}
			qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
		} /* else zone cut matches, but DS/DNSKEY doesn't => refetch. */
336
		result = KNOT_STATE_YIELD;
337 338
	}

339
	/* CONSUME => Unhelpful referral.
340
	 * DONE    => Zone cut updated.
341
	 * YIELD   => Bail out. */
342
	return result;
343 344
}

345
static void finalize_answer(knot_pkt_t *pkt, struct kr_query *qry, struct kr_request *req)
346 347
{
	/* Finalize header */
348
	knot_pkt_t *answer = req->answer;
349 350
	knot_wire_set_rcode(answer->wire, knot_wire_get_rcode(pkt->wire));

351 352
	/* Fill in bailiwick records in authority */
	struct kr_zonecut *cut = &qry->zone_cut;
353
	knot_pkt_begin(answer, KNOT_AUTHORITY);
354
	int pkt_class = kr_response_classify(pkt);
355 356 357 358
	if (pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) {
		const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
		for (unsigned i = 0; i < ns->count; ++i) {
			const knot_rrset_t *rr = knot_pkt_rr(ns, i);
359
			if (knot_dname_in(cut->name, rr->owner)) {
360
				update_answer(rr, 0, answer);
361 362 363 364 365
			}
		}
	}
}

366
static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
367
{
368
	struct kr_query *query = req->current_query;
369

370
	/* Response for minimized QNAME.
371 372
	 * NODATA   => may be empty non-terminal, retry (found zone cut)
	 * NOERROR  => found zone cut, retry
373
	 * NXDOMAIN => parent is zone cut, retry as a workaround for bad authoritatives
374
	 */
375
	bool is_final = (query->parent == NULL);
376
	int pkt_class = kr_response_classify(pkt);
377
	if (!knot_dname_is_equal(knot_pkt_qname(pkt), query->sname) &&
378
	    (pkt_class & (PKT_NOERROR|PKT_NXDOMAIN|PKT_REFUSED|PKT_NODATA))) {
379
		DEBUG_MSG("<= found cut, retrying with non-minimized name\n");
380
		query->flags |= QUERY_NO_MINIMIZE;
381
		return KNOT_STATE_CONSUME;
382 383
	}

384
	/* This answer didn't improve resolution chain, therefore must be authoritative (relaxed to negative). */
385 386 387 388 389
	if (!is_authoritative(pkt, query)) {
		if (pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) {
			DEBUG_MSG("<= lame response: non-auth sent negative response\n");
			return KNOT_STATE_FAIL;
		}
390 391
	}

392 393
	/* Process answer type */
	const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
394
	bool follow_chain = (query->stype != KNOT_RRTYPE_CNAME);
395
	const knot_dname_t *cname = query->sname;
396
	for (unsigned i = 0; i < an->count; ++i) {
397
		/* @todo construct a CNAME chain closure and accept all names from that set */ 
Marek Vavruša's avatar
Marek Vavruša committed
398
		const knot_rrset_t *rr = knot_pkt_rr(an, i);
399 400
		if (!knot_dname_is_equal(rr->owner, query->sname) &&
			!(follow_chain && knot_dname_is_equal(rr->owner, cname))) {
401 402 403 404 405 406
			continue;
		}
		unsigned hint = 0;
		if(knot_dname_is_equal(cname, knot_pkt_qname(req->answer))) {
			hint = KNOT_COMPR_HINT_QNAME;
		}
407
		int state = is_final ? update_answer(rr, hint, req->answer) : update_parent(rr, query);
408
		if (state == KNOT_STATE_FAIL) {
409
			return state;
410
		}
411 412 413 414 415 416
		/* Follow chain only within current cut. */
		if (follow_chain) {
			follow_cname_chain(&cname, rr, query);
			if (!knot_dname_in(query->zone_cut.name, cname)) {
				follow_chain = false; 
			}
417
		}
418 419
	}

420 421
	/* Make sure that this is an authoritative naswer (even with AA=0) for other layers */
	knot_wire_set_aa(pkt->wire);
422 423
	/* Either way it resolves current query. */
	query->flags |= QUERY_RESOLVED;
424
	/* Follow canonical name as next SNAME. */
425
	if (!knot_dname_is_equal(cname, query->sname)) {
426
		DEBUG_MSG("<= cname chain, following\n");
427
		struct kr_query *next = kr_rplan_push(&req->rplan, query->parent, cname, query->sclass, query->stype);
428 429 430 431
		if (!next) {
			return KNOT_STATE_FAIL;
		}
		next->flags |= QUERY_AWAIT_CUT;
432 433 434 435 436
		/* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */
		if (kr_ta_covers(&req->ctx->trust_anchors, cname) &&
		    !kr_ta_covers(&req->ctx->negative_anchors, cname)) {
			next->flags |= QUERY_DNSSEC_WANT;
		}
437 438
	} else if (!query->parent) {
		finalize_answer(pkt, query, req);
Marek Vavruša's avatar
Marek Vavruša committed
439
	}
440
	return KNOT_STATE_DONE;
441 442
}

Marek Vavruša's avatar
Marek Vavruša committed
443
/** Error handling, RFC1034 5.3.3, 4d. */
444
static int resolve_error(knot_pkt_t *pkt, struct kr_request *req)
445
{
446
	return KNOT_STATE_FAIL;
447 448
}

449
/* State-less single resolution iteration step, not needed. */
450
static int reset(knot_layer_t *ctx)  { return KNOT_STATE_PRODUCE; }
451 452

/* Set resolution context and parameters. */
453
static int begin(knot_layer_t *ctx, void *module_param)
454
{
455 456 457
	if (ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
		return ctx->state;
	}
458 459 460
	return reset(ctx);
}

461
int kr_make_query(struct kr_query *query, knot_pkt_t *pkt)
462
{
463
	/* Minimize QNAME (if possible). */
464 465
	uint16_t qtype = query->stype;
	const knot_dname_t *qname = minimized_qname(query, &qtype);
466

467
	/* Form a query for the authoritative. */
468
	knot_pkt_clear(pkt);
469
	int ret = knot_pkt_put_question(pkt, qname, query->sclass, qtype);
470
	if (ret != KNOT_EOK) {
471
		return ret;
472 473
	}

474
	/* Query built, expect answer. */
475
	query->id = kr_rand_uint(UINT16_MAX);
476
	knot_wire_set_id(pkt->wire, query->id);
Marek Vavruša's avatar
Marek Vavruša committed
477
	pkt->parsed = pkt->size;
478 479 480 481 482 483 484
	return kr_ok();
}

static int prepare_query(knot_layer_t *ctx, knot_pkt_t *pkt)
{
	assert(pkt && ctx);
	struct kr_request *req = ctx->data;
485
	struct kr_query *query = req->current_query;
486 487 488 489 490 491 492 493 494 495
	if (!query || ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
		return ctx->state;
	}

	/* Make query */
	int ret = kr_make_query(query, pkt);
	if (ret != 0) {
		return KNOT_STATE_FAIL;
	}

496
	return KNOT_STATE_CONSUME;
497 498
}

499 500
static int resolve_badmsg(knot_pkt_t *pkt, struct kr_request *req, struct kr_query *query)
{
501
#ifndef STRICT_MODE
502 503 504 505 506 507 508
	/* Work around broken auths/load balancers */
	if (query->flags & QUERY_SAFEMODE) {
		return resolve_error(pkt, req);
	} else {
		query->flags |= QUERY_SAFEMODE;
		return KNOT_STATE_DONE;
	}
509 510 511
#else
		return resolve_error(pkt, req);
#endif
512 513
}

Marek Vavruša's avatar
Marek Vavruša committed
514
/** Resolve input query or continue resolution with followups.
515 516 517
 *
 *  This roughly corresponds to RFC1034, 5.3.3 4a-d.
 */
518
static int resolve(knot_layer_t *ctx, knot_pkt_t *pkt)
519 520
{
	assert(pkt && ctx);
521
	struct kr_request *req = ctx->data;
522
	struct kr_query *query = req->current_query;
523
	if (!query || (query->flags & QUERY_RESOLVED)) {
524 525
		return ctx->state;
	}
526

527 528 529 530
	/* Check for packet processing errors first.
	 * Note - we *MUST* check if it has at least a QUESTION,
	 * otherwise it would crash on accessing QNAME. */
	if (pkt->parsed < pkt->size || pkt->parsed <= KNOT_WIRE_HEADER_SIZE) {
531
		DEBUG_MSG("<= malformed response\n");
532
		return resolve_badmsg(pkt, req, query);
533
	} else if (!is_paired_to_query(pkt, query)) {
534
		DEBUG_MSG("<= ignoring mismatching response\n");
535 536 537 538
		/* Force TCP, to work around authoritatives messing up question
		 * without yielding to spoofed responses. */
		query->flags |= QUERY_TCP;
		return resolve_badmsg(pkt, req, query);
539
	} else if (knot_wire_get_tc(pkt->wire)) {
540
		DEBUG_MSG("<= truncated response, failover to TCP\n");
541
		if (query) {
542
			/* Fail if already on TCP. */
543
			if (query->flags & QUERY_TCP) {
544
				DEBUG_MSG("<= TC=1 with TCP, bailing out\n");
545
				return resolve_error(pkt, req);
546
			}
547
			query->flags |= QUERY_TCP;
548
		}
549
		return KNOT_STATE_CONSUME;
550
	}
551

552
#ifndef NDEBUG
553
	lookup_table_t *rcode = lookup_by_id(knot_rcode_names, knot_wire_get_rcode(pkt->wire));
554
#endif
555 556

	/* Check response code. */
557 558 559
	switch(knot_wire_get_rcode(pkt->wire)) {
	case KNOT_RCODE_NOERROR:
	case KNOT_RCODE_NXDOMAIN:
560
	case KNOT_RCODE_REFUSED:
561
		break; /* OK */
562 563 564 565
	case KNOT_RCODE_FORMERR:
	case KNOT_RCODE_NOTIMPL:
		DEBUG_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
		return resolve_badmsg(pkt, req, query);
566
	default:
567
		DEBUG_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
568
		return resolve_error(pkt, req);
569 570
	}

571
	/* Resolve authority to see if it's referral or authoritative. */
572
	int state = KNOT_STATE_CONSUME;
573
	state = process_authority(pkt, req);
574
	switch(state) {
575
	case KNOT_STATE_CONSUME: /* Not referral, process answer. */
576
		DEBUG_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
577
		state = process_answer(pkt, req);
578
		break;
579
	case KNOT_STATE_DONE: /* Referral */
580
		DEBUG_MSG("<= referral response, follow\n");
581 582 583
		break;
	default:
		break;
584
	}
585

Marek Vavruša's avatar
Marek Vavruša committed
586
	return state;
587 588
}

Marek Vavruša's avatar
Marek Vavruša committed
589
/** Module implementation. */
590
const knot_layer_api_t *iterate_layer(struct kr_module *module)
591
{
592 593 594
	static const knot_layer_api_t _layer = {
		.begin = &begin,
		.reset = &reset,
595 596
		.consume = &resolve,
		.produce = &prepare_query
597 598
	};
	return &_layer;
599 600
}

601
KR_MODULE_EXPORT(iterate)