Commit 4f1575fe authored by Marek Vavruša's avatar Marek Vavruša

Merge branch 'ns-rtt-cache'

parents 46adaeae 04b2d89c
../licenses/CC0
\ No newline at end of file
/* This is MurmurHash3. The original C++ code was placed in the public domain
* by its author, Austin Appleby. */
#include "libknot/internal/trie/murmurhash3.h"
static inline uint32_t fmix(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
static inline uint32_t rotl32(uint32_t x, int8_t r)
{
return (x << r) | (x >> (32 - r));
}
uint32_t hash(const char* data, size_t len_)
{
const int len = (int) len_;
const int nblocks = len / 4;
uint32_t h1 = 0xc062fb4a;
uint32_t c1 = 0xcc9e2d51;
uint32_t c2 = 0x1b873593;
//----------
// body
const uint32_t * blocks = (const uint32_t*) (data + nblocks * 4);
int i;
for(i = -nblocks; i; i++)
{
uint32_t k1 = blocks[i];
k1 *= c1;
k1 = rotl32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = rotl32(h1, 13);
h1 = h1*5+0xe6546b64;
}
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
uint32_t k1 = 0;
switch(len & 3)
{
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0];
k1 *= c1; k1 = rotl32(k1,15); k1 *= c2; h1 ^= k1;
}
//----------
// finalization
h1 ^= len;
h1 = fmix(h1);
return h1;
}
#pragma once
#include <stdlib.h>
#include <stdint.h>
uint32_t hash(const char* data, size_t len);
......@@ -24,6 +24,7 @@
#include "daemon/engine.h"
#include "daemon/bindings.h"
#include "daemon/ffimodule.h"
#include "lib/nsrep.h"
#include "lib/cache.h"
#include "lib/defines.h"
......@@ -157,6 +158,11 @@ static int init_resolver(struct engine *engine)
{
/* Open resolution context */
engine->resolver.modules = &engine->modules;
/* Open NS reputation cache */
engine->resolver.nsrep = malloc(lru_size(kr_nsrep_lru_t, DEFAULT_NSREP_SIZE));
if (engine->resolver.nsrep) {
lru_init(engine->resolver.nsrep, DEFAULT_NSREP_SIZE);
}
/* Load basic modules */
engine_register(engine, "iterate");
......@@ -238,6 +244,7 @@ void engine_deinit(struct engine *engine)
network_deinit(&engine->net);
kr_cache_close(&engine->resolver.cache);
lru_deinit(engine->resolver.nsrep);
/* Unload modules. */
for (size_t i = 0; i < engine->modules.len; ++i) {
......
......@@ -16,6 +16,14 @@
#pragma once
/* Magic defaults */
#ifndef DEFAULT_NSREP_SIZE
#define DEFAULT_NSREP_SIZE 4096 /**< Default NS reputation cache size */
#endif
#ifndef DEFAULT_RING_SIZE
#define DEFAULT_RING_SIZE 16 /**< Maximum size of the worker ring size */
#endif
/*
* @internal These are forward decls to allow building modules with engine but without Lua.
*/
......
......@@ -147,9 +147,11 @@ int main(int argc, char **argv)
struct worker_ctx worker = {
.engine = &engine,
.loop = loop,
.mm = NULL
.mm = NULL,
};
loop->data = &worker;
array_init(worker.bufs.ring);
worker_reserve(&worker, DEFAULT_RING_SIZE);
/* Bind to sockets. */
if (addr != NULL) {
......@@ -182,6 +184,7 @@ int main(int argc, char **argv)
/* Cleanup. */
fprintf(stderr, "\n[system] quitting\n");
engine_deinit(&engine);
worker_reclaim(&worker);
if (ret != 0) {
ret = EXIT_FAILURE;
......
......@@ -27,6 +27,7 @@
struct qr_task
{
struct kr_request req;
struct worker_ctx *worker;
knot_pkt_t *next_query;
uv_handle_t *next_handle;
uv_timer_t timeout;
......@@ -62,8 +63,15 @@ static int parse_query(knot_pkt_t *query)
static struct qr_task *qr_task_create(struct worker_ctx *worker, uv_handle_t *handle, knot_pkt_t *query, const struct sockaddr *addr)
{
/* Recycle mempool from ring or create it */
mm_ctx_t pool;
mm_ctx_mempool(&pool, MM_DEFAULT_BLKSIZE);
mempool_ring_t *ring = &worker->bufs.ring;
if (ring->len > 0) {
pool = array_tail(*ring);
array_pop(*ring);
} else {
mm_ctx_mempool(&pool, KNOT_WIRE_MAX_PKTSIZE);
}
/* Create worker task */
struct engine *engine = worker->engine;
......@@ -73,6 +81,7 @@ static struct qr_task *qr_task_create(struct worker_ctx *worker, uv_handle_t *ha
mp_delete(pool.ctx);
return NULL;
}
task->worker = worker;
task->req.pool = pool;
task->source.handle = handle;
if (addr) {
......@@ -118,7 +127,15 @@ static void qr_task_free(uv_handle_t *handle)
uv_ref(task->source.handle);
io_start_read(task->source.handle);
}
mp_delete(task->req.pool.ctx);
/* Return mempool to ring or free it if it's full */
struct worker_ctx *worker = task->worker;
mempool_ring_t *ring = &worker->bufs.ring;
if (ring->len < ring->cap) {
mp_flush(task->req.pool.ctx);
array_push(*ring, task->req.pool);
} else {
mp_delete(task->req.pool.ctx);
}
}
static void qr_task_timeout(uv_timer_t *req)
......@@ -262,3 +279,17 @@ int worker_exec(struct worker_ctx *worker, uv_handle_t *handle, knot_pkt_t *quer
/* Consume input and produce next query */
return qr_task_step(task, query);
}
int worker_reserve(struct worker_ctx *worker, size_t ring_maxlen)
{
return array_reserve(worker->bufs.ring, ring_maxlen);
}
void worker_reclaim(struct worker_ctx *worker)
{
mempool_ring_t *ring = &worker->bufs.ring;
for (unsigned i = 0; i < ring->len; ++i) {
mp_delete(ring->at[i].ctx);
}
array_clear(*ring);
}
......@@ -19,6 +19,11 @@
#include <libknot/internal/mempattern.h>
#include "daemon/engine.h"
#include "lib/generic/array.h"
/* @cond internal Array of memory pools. */
typedef array_t(mm_ctx_t) mempool_ring_t;
/* @endcond */
/**
* Query resolution worker.
......@@ -29,6 +34,7 @@ struct worker_ctx {
mm_ctx_t *mm;
struct {
uint8_t wire[KNOT_WIRE_MAX_PKTSIZE];
mempool_ring_t ring;
} bufs;
};
......@@ -43,3 +49,9 @@ struct worker_ctx {
* @return 0, error code
*/
int worker_exec(struct worker_ctx *worker, uv_handle_t *handle, knot_pkt_t *query, const struct sockaddr* addr);
/** Reserve worker buffers */
int worker_reserve(struct worker_ctx *worker, size_t ring_maxlen);
/** Collect worker mempools */
void worker_reclaim(struct worker_ctx *worker);
......@@ -241,35 +241,34 @@ int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint32_t *times
return kr_error(ENOENT);
}
knot_rrset_t kr_cache_materialize(const knot_rrset_t *src, uint32_t drift, mm_ctx_t *mm)
int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t drift, mm_ctx_t *mm)
{
assert(src);
/* Make RRSet copy. */
knot_rrset_t copy;
knot_rrset_init(&copy, NULL, src->type, src->rclass);
copy.owner = knot_dname_copy(src->owner, mm);
if (!copy.owner) {
return copy;
/* Make RRSet copy */
knot_rrset_init(dst, NULL, src->type, src->rclass);
dst->owner = knot_dname_copy(src->owner, mm);
if (!dst->owner) {
return kr_error(ENOMEM);
}
knot_rdata_t *rd = knot_rdataset_at(&src->rrs, 0);
knot_rdata_t *rd_dst = NULL;
for (uint16_t i = 0; i < src->rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&src->rrs, i);
if (knot_rdata_ttl(rd) > drift) {
if (knot_rdataset_add(&copy.rrs, rd, mm) != 0) {
knot_rrset_clear(&copy, mm);
return copy;
/* Append record */
if (knot_rdataset_add(&dst->rrs, rd, mm) != 0) {
knot_rrset_clear(dst, mm);
return kr_error(ENOMEM);
}
/* Fixup TTL from absolute time */
rd_dst = knot_rdataset_at(&dst->rrs, dst->rrs.rr_count - 1);
knot_rdata_set_ttl(rd_dst, knot_rdata_ttl(rd) - drift);
}
rd += knot_rdata_array_size(knot_rdata_rdlen(rd));
}
/* Update TTLs. */
for (uint16_t i = 0; i < copy.rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&copy.rrs, i);
knot_rdata_set_ttl(rd, knot_rdata_ttl(rd) - drift);
}
return copy;
return kr_ok();
}
int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint32_t timestamp)
......
......@@ -156,12 +156,13 @@ int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint32_t *times
/**
* Clone read-only RRSet and adjust TTLs.
* @param dst destination for materialized RRSet
* @param src read-only RRSet (its rdataset may be changed depending on the result)
* @param drift time passed between cache time and now
* @param mm memory context
* @return materialized (or empty) RRSet
* @return 0 or an errcode
*/
knot_rrset_t kr_cache_materialize(const knot_rrset_t *src, uint32_t drift, mm_ctx_t *mm);
int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t drift, mm_ctx_t *mm);
/**
* Insert RRSet into cache, replacing any existing data.
......
......@@ -32,7 +32,7 @@
* Connection limits.
* @cond internal
*/
#define KR_CONN_RTT_MAX 5000 /* Timeout for network activity */
#define KR_CONN_RTT_MAX 3000 /* Timeout for network activity */
#define KR_ITER_LIMIT 50 /* Built-in iterator limit */
/*
......
......@@ -10,6 +10,7 @@ as long as it comes with a test case in `tests/test_generics.c`.
* map_ - a `Crit-bit tree`_ key-value map implementation (public domain) that comes with tests.
* set_ - set abstraction implemented on top of ``map``.
* pack_ - length-prefixed list of objects (i.e. array-list).
* lru_ - LRU-like hash table
array
~~~~~
......@@ -35,4 +36,10 @@ pack
.. doxygenfile:: pack.h
:project: libkresolve
lru
~~~
.. doxygenfile:: lru.h
:project: libkresolve
.. _`Crit-bit tree`: http://cr.yp.to/critbit.html
......@@ -133,10 +133,9 @@ static inline void array_std_free(void *baton, void *p)
/**
* Pop value from the end of the array.
* @return 0 on success, <0 on failure
*/
#define array_pop(array) \
array_del((array), (array).len - 1)
(array).len -= 1
/**
* Remove value at given index.
......
/* Copyright (C) 2013 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file lru.h
* @brief LRU-like cache.
*
* @note This is a naive LRU implementation, if value exists it is treated as old
* if the key collides. This may be improved with double hashing or hopscotch.
*
* # Example usage:
*
* @code{.c}
* // Define new LRU type
* typedef lru_hash(int) lru_int_t;
*
* // Create LRU on stack
* size_t lru_size = lru_size(lru_int_t, 10);
* lru_int_t lru[lru_size];
* lru_init(&lru, 5);
*
* // Insert some values
* *lru_set(&lru, "luke", strlen("luke")) = 42;
* *lru_set(&lru, "leia", strlen("leia")) = 24;
*
* // Retrieve values
* int *ret = lru_get(&lru, "luke", strlen("luke");
* if (ret) printf("luke dropped out!\n");
* else printf("luke's number is %d\n", *ret);
*
* // Set up eviction function, this is going to get called
* // on entry eviction (baton refers to baton in 'lru' structure)
* void on_evict(void *baton, void *data_) {
* int *data = (int *) data;
* printf("number %d dropped out!\n", *data);
* }
* char *enemies[] = {"goro", "raiden", "subzero", "scorpion"};
* for (int i = 0; i < 4; ++i) {
* *lru_set(&lru, enemies[i], strlen(enemies[i])) = i;
* }
*
* // We're done
* lru_deinit(&lru);
* @endcode
*
* \addtogroup generics
* @{
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include "contrib/murmurhash3/murmurhash3.h"
#define lru_slot_struct \
char *key; /**< Slot key */ \
uint32_t len; /**< Slot length */ \
/** @brief Slot header. */
struct lru_slot {
lru_slot_struct
};
/** @brief Return boolean true if slot matches key/len pair. */
static inline int lru_slot_match(struct lru_slot *slot, const char *key, uint32_t len)
{
return (slot->len == len) && (memcmp(slot->key, key, len) == 0);
}
#define lru_slot_offset(table) \
(size_t)((void *)&((table)->slots[0].data) - (void *)&((table)->slots[0]))
/** @brief Callback definitions. */
typedef void (*lru_free_f)(void *baton, void *ptr);
/** @brief LRU structure base. */
#define lru_hash_struct \
uint32_t size; /**< Number of slots */ \
uint32_t stride; /**< Stride of the 'slots' array */ \
lru_free_f evict; /**< Eviction function */ \
void *baton; /**< Passed to eviction function */
/** @internal Object base of any other lru_hash type. */
struct lru_hash_base {
lru_hash_struct
char slots[];
};
/** @breif User-defined hashtable. */
#define lru_hash(type) \
struct { \
lru_hash_struct \
struct { \
lru_slot_struct \
type data; \
} slots[]; \
}
/** @internal Slot data getter */
static inline void *lru_slot_get(struct lru_hash_base *lru, const char *key, uint32_t len, size_t offset)
{
uint32_t id = hash(key, len) % lru->size;
struct lru_slot *slot = (struct lru_slot *)(lru->slots + (id * lru->stride));
if (lru_slot_match(slot, key, len)) {
return ((char *)slot) + offset;
}
return NULL;
}
/** @internal Slot data setter */
static inline void *lru_slot_set(struct lru_hash_base *lru, const char *key, uint32_t len, size_t offset)
{
uint32_t id = hash(key, len) % lru->size;
struct lru_slot *slot = (struct lru_slot *)(lru->slots + (id * lru->stride));
if (!lru_slot_match(slot, key, len)) {
if (slot->key) {
free(slot->key);
if (lru->evict) {
lru->evict(lru->baton, ((char *)slot) + offset);
}
}
memset(slot, 0, lru->stride);
slot->key = malloc(len);
if (!slot->key) {
slot->len = 0;
return NULL;
}
memcpy(slot->key, key, len);
slot->len = len;
}
return ((char *)slot) + offset;
}
/**
* @brief Return size of the LRU structure with given number of slots.
* @param type type of LRU structure
* @param max_slots number of slots
*/
#define lru_size(type, max_slots) \
(sizeof(type) + (max_slots) * sizeof(((type *)NULL)->slots[0]))
/**
* @brief Initialize hash table.
* @param table hash table
* @param max_slots number of slots
*/
#define lru_init(table, max_size) \
(memset((table), 0, sizeof(*table) + (max_size) * sizeof((table)->slots[0])), \
(table)->stride = sizeof((table)->slots[0]), (table)->size = (max_size))
/**
* @brief Free all keys and evict all values.
* @param table hash table
*/
#define lru_deinit(table) if (table) { \
for (uint32_t i = 0; i < (table)->size; ++i) { \
if ((table)->slots[i].key) { \
if ((table)->evict) { \
(table)->evict((table)->baton, &(table)->slots[i].data); \
} \
free((table)->slots[i].key); \
} \
} \
}
/**
* @brief Find key in the hash table and return pointer to it's value.
* @param table hash table
* @param key_ lookup key
* @param len_ key length
* @return pointer to data or NULL
*/
#define lru_get(table, key_, len_) \
(__typeof__(&(table)->slots[0].data)) \
lru_slot_get((struct lru_hash_base *)(table), (key_), (len_), lru_slot_offset(table))
/**
* @brief Return pointer to value (create/replace if needed)
* @param table hash table
* @param key_ lookup key
* @param len_ key length
* @return pointer to data or NULL
*/
#define lru_set(table, key_, len_) \
(__typeof__(&(table)->slots[0].data)) \
lru_slot_set((struct lru_hash_base *)(table), (key_), (len_), lru_slot_offset(table))
/** @} */
......@@ -108,6 +108,8 @@ int kr_response_classify(knot_pkt_t *pkt)
return (an->count == 0) ? PKT_NODATA : PKT_NOERROR;
case KNOT_RCODE_NXDOMAIN:
return PKT_NXDOMAIN;
case KNOT_RCODE_REFUSED:
return PKT_REFUSED;
default:
return PKT_ERROR;
}
......@@ -116,21 +118,18 @@ int kr_response_classify(knot_pkt_t *pkt)
static void follow_cname_chain(const knot_dname_t **cname, const knot_rrset_t *rr,
struct kr_query *cur)
{
/* Follow chain from SNAME. */
if (knot_dname_is_equal(rr->owner, *cname)) {
if (rr->type == KNOT_RRTYPE_CNAME) {
*cname = knot_cname_name(&rr->rrs);
} else {
/* Terminate CNAME chain. */
*cname = cur->sname;
}
if (rr->type == KNOT_RRTYPE_CNAME) {
*cname = knot_cname_name(&rr->rrs);
} else {
/* Terminate CNAME chain. */
*cname = cur->sname;
}
}
static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query, uint16_t index)
static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
{
if (rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA) {
const knot_rdata_t *rdata = knot_rdataset_at(&rr->rrs, index);
const knot_rdata_t *rdata = knot_rdataset_at(&rr->rrs, 0);
int ret = kr_zonecut_add(&query->zone_cut, rr->owner, rdata);
if (ret != 0) {
return KNOT_STATE_FAIL;
......@@ -140,27 +139,17 @@ static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query, uint16_
return KNOT_STATE_CONSUME;
}
static int update_glue(const knot_rrset_t *rr, unsigned hint, struct kr_request *req)
{
return update_nsaddr(rr, kr_rplan_current(&req->rplan), hint);
}
int rr_update_parent(const knot_rrset_t *rr, unsigned hint, struct kr_request *req)
static int update_parent(const knot_rrset_t *rr, struct kr_request *req)
{
struct kr_query *qry = kr_rplan_current(&req->rplan);
return update_nsaddr(rr, qry->parent, hint);
return update_nsaddr(rr, qry->parent);
}
int rr_update_answer(const knot_rrset_t *rr, unsigned hint, struct kr_request *req)
static int update_answer(const knot_rrset_t *rr, unsigned hint, struct kr_request *req)
{
knot_pkt_t *answer = req->answer;
/* Write copied RR to the result packet. */
int ret = knot_pkt_put(answer, KNOT_COMPR_HINT_NONE, rr, hint);
int ret = knot_pkt_put(answer, hint, rr, 0);
if (ret != KNOT_EOK) {
if (hint & KNOT_PF_FREE) {
knot_rrset_clear((knot_rrset_t *)rr, &answer->mm);
}
knot_wire_set_tc(answer->wire);
return KNOT_STATE_DONE;
}
......@@ -168,19 +157,29 @@ int rr_update_answer(const knot_rrset_t *rr, unsigned hint, struct kr_request *r
return KNOT_STATE_DONE;
}
/** Attempt to find glue for given nameserver name (best effort). */
static int fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_request *req)
static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_query *qry)
{
int result = 0;
const knot_pktsection_t *ar = knot_pkt_section(pkt, KNOT_ADDITIONAL);
for (unsigned i = 0; i < ar->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(ar, i);
if (knot_dname_is_equal(ns, rr->owner)) {
(void) update_glue(rr, 0, req);
result += 1;
(void) update_nsaddr(rr, qry);
}
}
return result;
}
/** Attempt to find glue for given nameserver name (best effort). */
static int has_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_request *req)
{
const knot_pktsection_t *ar = knot_pkt_section(pkt, KNOT_ADDITIONAL);
for (unsigned i = 0; i < ar->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(ar, i);
if (knot_dname_is_equal(ns, rr->owner) &&
(rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA)) {
return 1;
}
}
return 0;
}
static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_request *req)
......@@ -205,15 +204,14 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_request
/* Fetch glue for each NS */
for (unsigned i = 0; i < rr->rrs.rr_count; ++i) {
const knot_dname_t *ns_name = knot_ns_name(&rr->rrs, i);
kr_zonecut_add(cut, ns_name, NULL);
int glue_records = fetch_glue(pkt, ns_name, req);
int glue_records = has_glue(pkt, ns_name, req);
/* Glue is mandatory for NS below zone */
if (knot_dname_in(rr->owner, ns_name) ) {
if (glue_records == 0) {
DEBUG_MSG("<= authority: missing mandatory glue, rejecting\n");
kr_zonecut_del(cut, ns_name, NULL);
}
if (!glue_records && knot_dname_in(rr->owner, ns_name)) {
DEBUG_MSG("<= authority: missing mandatory glue, rejecting\n");
continue;
}
kr_zonecut_add(cut, ns_name, NULL);
fetch_glue(pkt, ns_name, query);
}
return state;
......@@ -264,7 +262,7 @@ static void finalize_answer(knot_pkt_t *pkt, struct kr_query *qry, struct kr_req
for (unsigned i = 0; i < ns->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(ns, i);
if (knot_dname_in(cut->name, rr->owner)) {
rr_update_answer(rr, 0, req);
update_answer(rr, 0, req);
}
}
}
......@@ -282,16 +280,18 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
bool is_final = (query->parent == NULL);
int pkt_class = kr_response_classify(pkt);
if (!knot_dname_is_equal(knot_pkt_qname(pkt), query->sname) &&
(pkt_class & (PKT_NOERROR|PKT_NXDOMAIN|PKT_NODATA))) {
(pkt_class & (PKT_NOERROR|PKT_NXDOMAIN|PKT_REFUSED|PKT_NODATA))) {
DEBUG_MSG("<= found cut, retrying with non-minimized name\n");
query->flags |= QUERY_NO_MINIMIZE;
return KNOT_STATE_DONE;
}
/* This answer didn't improve resolution chain, therefore must be authoritative (relaxed to negative). */
if (!is_authoritative(pkt, query) && (pkt_class & (PKT_NXDOMAIN|PKT_NODATA))) {
DEBUG_MSG("<= lame response: non-auth sent negative response\n");
return KNOT_STATE_FAIL;
if (!is_authoritative(pkt, query)) {
if (pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) {
DEBUG_MSG("<= lame response: non-auth sent negative response\n");
return KNOT_STATE_FAIL;
}
}
/* Process answer type */
......@@ -299,13 +299,22 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
const knot_dname_t *cname = query->sname;
for (unsigned i = 0; i < an->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(an, i);
int state = is_final ? rr_update_answer(rr, 0, req) : rr_update_parent(rr, 0, req);
if (!knot_dname_is_equal(rr->owner, cname)) {
continue;
}
unsigned hint = 0;
if(knot_dname_is_equal(cname, knot_pkt_qname(req->answer))) {
hint = KNOT_COMPR_HINT_QNAME;
}
int state = is_final ? update_answer(rr, hint, req) : update_parent(rr, req);
if (state == KNOT_STATE_FAIL) {
return state;
}
follow_cname_chain(&cname, rr, query);
}
/* Make sure that this is an authoritative naswer (even with AA=0) for other layers */
knot_wire_set_aa(pkt->wire);
/* Either way it resolves current query. */
query->flags |= QUERY_RESOLVED;