Commit 0c3f6a26 authored by Vladimír Čunát's avatar Vladimír Čunát

release 1.99.1-alpha: WIP on aggressive cache

parent 563dd2aa
Knot Resolver 1.4.1 (2017-10-xx)
Knot Resolver 1.99.1-alpha (2017-10-26)
=======================================
This is an experimental release meant for testing aggressive caching.
It contains some regressions and might (theoretically) be even vulnerable.
The current focus is to minimize queries into the root zone.
Improvements
------------
- negative answers from validated NSEC (NXDOMAIN, NODATA)
- verbose log is very chatty around cache operations (maybe too much)
Regressions
-----------
- dropped support for alternative cache backends
and for some specific cache operations
- caching doesn't yet work for various cases:
* negative answers without NSEC (i.e. with NSEC3 or insecure)
* +cd queries (needs other internal changes)
* positive wildcard answers
- spurious SERVFAIL on specific combinations of cached records, printing:
<= bad keys, broken trust chain
- make check
- a few Deckard tests are broken, probably due to some problems above
+ unknown ones?
Knot Resolver 1.4.1 (2017-mm-dd)
================================
Bugfixes
......
# Project
MAJOR := 1
MINOR := 4
PATCH := 0
EXTRA :=
MINOR := 99
PATCH := 1
EXTRA := -alpha
ABIVER := 4
BUILDMODE := dynamic
HARDENING := yes
......
......@@ -708,6 +708,7 @@ static int cache_close(lua_State *L)
return 1;
}
#if 0
/** @internal Prefix walk. */
static int cache_prefixed(struct kr_cache *cache, const char *args, knot_db_val_t *results, int maxresults)
{
......@@ -761,6 +762,7 @@ static int cache_remove_prefix(struct kr_cache *cache, const char *args)
}
return ret;
}
#endif
/** Prune expired/invalid records. */
static int cache_prune(lua_State *L)
......@@ -810,7 +812,7 @@ static int cache_clear(lua_State *L)
/* Clear a sub-tree in cache. */
if (args && strlen(args) > 0) {
int ret = cache_remove_prefix(cache, args);
int ret = kr_error(ENOSYS); // FIXME cache_remove_prefix(cache, args);
if (ret < 0) {
format_error(L, kr_strerror(ret));
lua_error(L);
......@@ -892,7 +894,7 @@ static int cache_get(lua_State *L)
const char *args = lua_tostring(L, 1);
/* Retrieve set of keys */
static knot_db_val_t result_set[100];
int ret = cache_prefixed(cache, args, result_set, 100);
int ret = kr_error(ENOSYS); // FIXME cache_prefixed(cache, args, result_set, 100);
if (ret < 0) {
format_error(L, kr_strerror(ret));
lua_error(L);
......
......@@ -604,8 +604,7 @@ static int init_resolver(struct engine *engine)
/* Load basic modules */
engine_register(engine, "iterate", NULL, NULL);
engine_register(engine, "validate", NULL, NULL);
engine_register(engine, "rrcache", NULL, NULL);
engine_register(engine, "pktcache", NULL, NULL);
engine_register(engine, "cache_lmdb", NULL, NULL);
return array_push(engine->backends, kr_cdb_lmdb());
}
......
This diff is collapsed.
......@@ -16,6 +16,7 @@
#pragma once
#include <libknot/consts.h>
#include <libknot/rrset.h>
#include "lib/cdb.h"
#include "lib/defines.h"
......@@ -24,6 +25,9 @@
/** When knot_pkt is passed from cache without ->wire, this is the ->size. */
static const size_t PKT_SIZE_NOWIRE = -1;
// TODO
#define KR_CACHE_KEY_MAXLEN (KNOT_DNAME_MAXLEN + 100)
/** Cache entry tag */
enum kr_cache_tag {
KR_CACHE_RR = 'R',
......@@ -79,6 +83,12 @@ struct kr_cache
uint32_t ttl_min, ttl_max; /**< Maximum TTL of inserted entries */
};
#include "lib/module.h"
int cache_lmdb_peek(kr_layer_t *ctx, knot_pkt_t *pkt);
int cache_lmdb_stash(kr_layer_t *ctx, knot_pkt_t *pkt);
/**
* Open/create cache with provided storage options.
* @param cache cache structure to be initialized
......@@ -110,48 +120,6 @@ static inline bool kr_cache_is_open(struct kr_cache *cache)
return cache->db != NULL;
}
/**
* Peek the cache for asset (name, type, tag)
* @note The 'drift' is the time passed between the inception time and now (in seconds).
* @param cache cache structure
* @param tag asset tag
* @param name asset name
* @param type asset type
* @param entry cache entry, will be set to valid pointer or NULL
* @param timestamp current time (will be replaced with drift if successful)
* @return 0 or an errcode
*/
KR_EXPORT
int kr_cache_peek(struct kr_cache *cache, uint8_t tag, const knot_dname_t *name, uint16_t type,
struct kr_cache_entry **entry, uint32_t *timestamp);
/**
* Insert asset into cache, replacing any existing data.
* @param cache cache structure
* @param tag asset tag
* @param name asset name
* @param type asset type
* @param header filled entry header (count, ttl and timestamp)
* @param data inserted data
* @return 0 or an errcode
*/
KR_EXPORT
int kr_cache_insert(struct kr_cache *cache, uint8_t tag, const knot_dname_t *name, uint16_t type,
struct kr_cache_entry *header, knot_db_val_t data);
/**
* Remove asset from cache.
* @param cache cache structure
* @param tag asset tag
* @param name asset name
* @param type record type
* @return 0 or an errcode
*/
KR_EXPORT
int kr_cache_remove(struct kr_cache *cache, uint8_t tag, const knot_dname_t *name, uint16_t type);
/**
* Clear all items from the cache.
* @param cache cache structure
......@@ -160,90 +128,26 @@ int kr_cache_remove(struct kr_cache *cache, uint8_t tag, const knot_dname_t *nam
KR_EXPORT
int kr_cache_clear(struct kr_cache *cache);
/**
* Prefix scan on cached items.
* @param cache cache structure
* @param tag asset tag
* @param name asset prefix key
* @param vals array of values to store the result
* @param valcnt maximum number of retrieved keys
* @return number of retrieved keys or an error
*/
KR_EXPORT
int kr_cache_match(struct kr_cache *cache, uint8_t tag, const knot_dname_t *name, knot_db_val_t *vals, int valcnt);
/**
* Peek the cache for given key and retrieve it's rank.
* @param cache cache structure
* @param tag asset tag
* @param name asset name
* @param type record type
* @param timestamp current time
* @return rank (0 or positive), or an error (negative number)
*/
KR_EXPORT
int kr_cache_peek_rank(struct kr_cache *cache, uint8_t tag, const knot_dname_t *name, uint16_t type, uint32_t timestamp);
/* ** This interface is temporary. ** */
/**
* Peek the cache for given RRSet (name, type)
* @note The 'drift' is the time passed between the cache time of the RRSet and now (in seconds).
* @param cache cache structure
* @param rr query RRSet (its rdataset may be changed depending on the result)
* @param rank entry rank will be stored in this variable
* @param flags entry flags
* @param timestamp current time (will be replaced with drift if successful)
* @return 0 or an errcode
*/
struct kr_cache_p {
uint32_t time; /**< The time of inception. */
uint32_t ttl; /**< TTL at inception moment. Assuming it fits into int32_t ATM. */
uint8_t rank; /**< See enum kr_rank */
struct {
/* internal: pointer to eh struct */
void *raw_data, *raw_bound;
};
};
KR_EXPORT
int kr_cache_peek_rr(struct kr_cache *cache, knot_rrset_t *rr, uint8_t *rank, uint8_t *flags, uint32_t *timestamp);
/**
* Clone read-only RRSet and adjust TTLs.
* @param dst destination for materialized RRSet
* @param src read-only RRSet (its rdataset may be changed depending on the result)
* @param drift time passed between cache time and now
* @param reorder (pseudo)-random seed to reorder the data or zero
* @param mm memory context
* @return 0 or an errcode
*/
int kr_cache_peek_exact(struct kr_cache *cache, const knot_dname_t *name, uint16_t type,
struct kr_cache_p *peek);
KR_EXPORT
int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t drift,
uint reorder, knot_mm_t *mm);
/**
* Insert RRSet into cache, replacing any existing data.
* @param cache cache structure
* @param rr inserted RRSet
* @param rank rank of the data
* @param flags additional flags for the data
* @param timestamp current time
* @return 0 or an errcode
*/
int32_t kr_cache_ttl(const struct kr_cache_p *peek, uint32_t current_time);
/*TODO: reorder*/
KR_EXPORT
int kr_cache_insert_rr(struct kr_cache *cache, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp);
int kr_cache_materialize(knot_rdataset_t *dst, const struct kr_cache_p *ref,
uint32_t new_ttl, knot_mm_t *pool);
/**
* Peek the cache for the given RRset signature (name, type)
* @note The RRset type must not be RRSIG but instead it must equal the type covered field of the sought RRSIG.
* @param cache cache structure
* @param rr query RRSET (its rdataset and type may be changed depending on the result)
* @param rank entry rank will be stored in this variable
* @param flags entry additional flags
* @param timestamp current time (will be replaced with drift if successful)
* @return 0 or an errcode
*/
KR_EXPORT
int kr_cache_peek_rrsig(struct kr_cache *cache, knot_rrset_t *rr, uint8_t *rank, uint8_t *flags, uint32_t *timestamp);
/**
* Insert the selected RRSIG RRSet of the selected type covered into cache, replacing any existing data.
* @note The RRSet must contain RRSIGS with only the specified type covered.
* @param cache cache structure
* @param rr inserted RRSIG RRSet
* @param rank rank of the data
* @param flags additional flags for the data
* @param timestamp current time
* @return 0 or an errcode
*/
KR_EXPORT
int kr_cache_insert_rrsig(struct kr_cache *cache, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp);
......@@ -51,4 +51,9 @@ struct kr_cdb_api {
int (*match)(knot_db_t *db, knot_db_val_t *key, knot_db_val_t *val, int maxcount);
int (*prune)(knot_db_t *db, int maxcount);
/* new WIP
* On successful return, key->data and val->data point to DB-owned data.
* return: 0 for equality, > 0 for less, < 0 kr_error */
int (*read_leq)(knot_db_t *db, knot_db_val_t *key, knot_db_val_t *val);
};
......@@ -47,8 +47,9 @@ struct lmdb_env
* - non-NULL .rw is always active
*/
struct {
bool ro_active;
bool ro_active, ro_curs_active;
MDB_txn *ro, *rw;
MDB_cursor *ro_curs;
} txn;
};
......@@ -69,6 +70,17 @@ static int lmdb_error(int error)
}
}
/** Conversion between knot and lmdb structs for values. */
static inline knot_db_val_t val_mdb2knot(MDB_val v)
{
return (knot_db_val_t){ .len = v.mv_size, .data = v.mv_data };
}
static inline MDB_val val_knot2mdb(knot_db_val_t v)
{
return (MDB_val){ .mv_size = v.len, .mv_data = v.data };
}
/*! \brief Set the environment map size.
* \note This also sets the maximum database size, see \fn mdb_env_set_mapsize
*/
......@@ -140,6 +152,7 @@ static int txn_get(struct lmdb_env *env, MDB_txn **txn, bool rdonly)
if (env->txn.ro && env->txn.ro_active) {
mdb_txn_reset(env->txn.ro);
env->txn.ro_active = false;
env->txn.ro_curs_active = false;
}
int ret = txn_get_noresize(env, 0/*RW*/, &env->txn.rw);
if (ret == MDB_SUCCESS) {
......@@ -179,10 +192,54 @@ static int cdb_sync(knot_db_t *db)
} else if (env->txn.ro && env->txn.ro_active) {
mdb_txn_reset(env->txn.ro);
env->txn.ro_active = false;
env->txn.ro_curs_active = false;
}
return ret;
}
/** Obtain a read-only cursor (and a read-only transaction). */
static int txn_curs_get(struct lmdb_env *env, MDB_cursor **curs)
{
assert(env && curs);
if (env->txn.ro_curs_active) {
goto success;
}
/* Only in a read-only txn; TODO: it's a bit messy/coupled */
if (env->txn.rw) {
int ret = cdb_sync(env);
if (ret) return ret;
}
MDB_txn *txn = NULL;
int ret = txn_get(env, &txn, true);
if (ret) return ret;
if (env->txn.ro_curs) {
ret = mdb_cursor_renew(txn, env->txn.ro_curs);
} else {
ret = mdb_cursor_open(txn, env->dbi, &env->txn.ro_curs);
}
if (ret) return ret;
env->txn.ro_curs_active = true;
success:
assert(env->txn.ro_curs_active && env->txn.ro && env->txn.ro_active
&& !env->txn.rw);
*curs = env->txn.ro_curs;
assert(*curs);
return kr_ok();
}
static void free_txn_ro(struct lmdb_env *env)
{
if (env->txn.ro) {
mdb_txn_abort(env->txn.ro);
env->txn.ro = NULL;
}
if (env->txn.ro_curs) {
mdb_cursor_close(env->txn.ro_curs);
env->txn.ro_curs = NULL;
}
}
/*! \brief Close the database. */
static void cdb_close_env(struct lmdb_env *env)
{
......@@ -190,10 +247,7 @@ static void cdb_close_env(struct lmdb_env *env)
/* Get rid of any transactions. */
cdb_sync(env);
if (env->txn.ro) {
mdb_txn_abort(env->txn.ro);
env->txn.ro = NULL;
}
free_txn_ro(env);
mdb_env_sync(env->env, 1);
mdb_dbi_close(env->env, env->dbi);
......@@ -348,10 +402,7 @@ static int cdb_clear(knot_db_t *db)
/* We are about to switch to a different file, so end all txns, to be sure. */
(void) cdb_sync(db);
if (env->txn.ro) {
mdb_txn_abort(env->txn.ro);
env->txn.ro = NULL;
}
free_txn_ro(db);
/* Since there is no guarantee that there will be free
* pages to hold whole dirtied db for transaction-safe clear,
......@@ -424,15 +475,14 @@ static int cdb_readv(knot_db_t *db, knot_db_val_t *key, knot_db_val_t *val, int
for (int i = 0; i < maxcount; ++i) {
/* Convert key structs */
MDB_val _key = { .mv_size = key[i].len, .mv_data = key[i].data };
MDB_val _val = { .mv_size = val[i].len, .mv_data = val[i].data };
MDB_val _key = val_knot2mdb(key[i]);
MDB_val _val = val_knot2mdb(val[i]);
ret = mdb_get(txn, env->dbi, &_key, &_val);
if (ret != MDB_SUCCESS) {
return lmdb_error(ret);
}
/* Update the result. */
val[i].data = _val.mv_data;
val[i].len = _val.mv_size;
val[i] = val_mdb2knot(_val);
}
return kr_ok();
}
......@@ -440,8 +490,8 @@ static int cdb_readv(knot_db_t *db, knot_db_val_t *key, knot_db_val_t *val, int
static int cdb_write(struct lmdb_env *env, MDB_txn **txn, knot_db_val_t *key, knot_db_val_t *val, unsigned flags)
{
/* Convert key structs and write */
MDB_val _key = { key->len, key->data };
MDB_val _val = { val->len, val->data };
MDB_val _key = val_knot2mdb(*key);
MDB_val _val = val_knot2mdb(*val);
int ret = mdb_put(*txn, env->dbi, &_key, &_val, flags);
/* Try to recover from doing too much writing in a single transaction. */
......@@ -493,8 +543,8 @@ static int cdb_remove(knot_db_t *db, knot_db_val_t *key, int maxcount)
int ret = txn_get(env, &txn, false);
for (int i = 0; ret == kr_ok() && i < maxcount; ++i) {
MDB_val _key = { key[i].len, key[i].data };
MDB_val val = { 0, NULL };
MDB_val _key = val_knot2mdb(key[i]);
MDB_val val = { };
ret = lmdb_error(mdb_del(txn, env->dbi, &_key, &val));
}
......@@ -522,7 +572,8 @@ static int cdb_match(knot_db_t *db, knot_db_val_t *key, knot_db_val_t *val, int
return lmdb_error(ret);
}
MDB_val cur_key = { key->len, key->data }, cur_val = { 0, NULL };
MDB_val cur_key = val_knot2mdb(*key);
MDB_val cur_val = { };
ret = mdb_cursor_get(cur, &cur_key, &cur_val, MDB_SET_RANGE);
if (ret != MDB_SUCCESS) {
mdb_cursor_close(cur);
......@@ -537,8 +588,7 @@ static int cdb_match(knot_db_t *db, knot_db_val_t *key, knot_db_val_t *val, int
}
/* Add to result set */
if (results < maxcount) {
val[results].len = cur_key.mv_size;
val[results].data = cur_key.mv_data;
val[results] = val_mdb2knot(cur_key);
++results;
} else {
break;
......@@ -602,13 +652,43 @@ static int cdb_prune(knot_db_t *db, int limit)
return ret < 0 ? ret : results;
}
static int cdb_read_leq(knot_db_t *env, knot_db_val_t *key, knot_db_val_t *val)
{
assert(env && key && key->data && val);
MDB_cursor *curs = NULL;
int ret = txn_curs_get(env, &curs);
if (ret) return ret;
MDB_val key2_m = val_knot2mdb(*key);
MDB_val val2_m = { };
ret = mdb_cursor_get(curs, &key2_m, &val2_m, MDB_SET_RANGE);
if (ret) return lmdb_error(ret);
/* test for equality //:unlikely */
if (key2_m.mv_size == key->len
&& memcmp(key2_m.mv_data, key->data, key->len) == 0) {
ret = 0; /* equality */
goto success;
}
/* we must be greater than key; do one step to smaller */
ret = mdb_cursor_get(curs, &key2_m, &val2_m, MDB_PREV);
if (ret) return lmdb_error(ret);
ret = 1;
success:
/* finalize the output */
*key = val_mdb2knot(key2_m);
*val = val_mdb2knot(val2_m);
return ret;
}
const struct kr_cdb_api *kr_cdb_lmdb(void)
{
static const struct kr_cdb_api api = {
"lmdb",
cdb_init, cdb_deinit, cdb_count, cdb_clear, cdb_sync,
cdb_readv, cdb_writev, cdb_remove,
cdb_match, cdb_prune
cdb_match, cdb_prune,
cdb_read_leq
};
return &api;
......
#include "lib/module.h"
#include "lib/cache.h"
/** Module implementation. */
const kr_layer_api_t *cache_lmdb_layer(struct kr_module *module)
{
static const kr_layer_api_t _layer = {
.produce = &cache_lmdb_peek,
.consume = &cache_lmdb_stash,
};
return &_layer;
}
KR_MODULE_EXPORT(cache_lmdb)
......@@ -178,8 +178,10 @@ static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
}
/** @internal From \a pkt, fetch glue records for name \a ns, and update the cut etc. */
static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_request *req)
static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, bool in_bailiwick,
struct kr_request *req, const struct kr_query *qry)
{
ranked_rr_array_t *selected[] = kr_request_selected(req);
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
for (unsigned k = 0; k < sec->count; ++k) {
......@@ -191,6 +193,12 @@ static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_reques
(rr->type != KNOT_RRTYPE_AAAA)) {
continue;
}
uint8_t rank = (in_bailiwick && i == KNOT_ANSWER)
? (KR_RANK_INITIAL | KR_RANK_AUTH) : KR_RANK_OMIT;
(void) kr_ranked_rrarray_add(selected[i], rr, rank,
false, qry->uid, &req->pool);
if ((rr->type == KNOT_RRTYPE_A) &&
(req->ctx->options.NO_IPV4)) {
continue;
......@@ -205,7 +213,7 @@ static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_reques
}
/** Attempt to find glue for given nameserver name (best effort). */
static int has_glue(knot_pkt_t *pkt, const knot_dname_t *ns)
static bool has_glue(knot_pkt_t *pkt, const knot_dname_t *ns)
{
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
......@@ -213,11 +221,11 @@ static int has_glue(knot_pkt_t *pkt, const knot_dname_t *ns)
const knot_rrset_t *rr = knot_pkt_rr(sec, k);
if (knot_dname_is_equal(ns, rr->owner) &&
(rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA)) {
return 1;
return true;
}
}
}
return 0;
return false;
}
/** @internal Update the cut with another NS(+glue) record.
......@@ -267,24 +275,31 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr,
/* Fetch glue for each NS */
for (unsigned i = 0; i < rr->rrs.rr_count; ++i) {
const knot_dname_t *ns_name = knot_ns_name(&rr->rrs, i);
int glue_records = has_glue(pkt, ns_name);
/* Glue is mandatory for NS below zone */
if (!glue_records && knot_dname_in(rr->owner, ns_name)) {
VERBOSE_MSG("<= authority: missing mandatory glue, rejecting\n");
if (knot_dname_in(rr->owner, ns_name) && !has_glue(pkt, ns_name)) {
VERBOSE_MSG("<= authority: missing mandatory glue, skipping NS ");
WITH_VERBOSE {
kr_dname_print(ns_name, "", "\n");
}
continue;
}
kr_zonecut_add(cut, ns_name, NULL);
int ret = kr_zonecut_add(cut, ns_name, NULL);
assert(!ret);
/* Choose when to use glue records. */
bool in_bailiwick = knot_dname_in(current_cut, ns_name);
bool do_fetch;
if (qry->flags.PERMISSIVE) {
fetch_glue(pkt, ns_name, req);
do_fetch = true;
} else if (qry->flags.STRICT) {
/* Strict mode uses only mandatory glue. */
if (knot_dname_in(cut->name, ns_name))
fetch_glue(pkt, ns_name, req);
do_fetch = knot_dname_in(cut->name, ns_name);
} else {
/* Normal mode uses in-bailiwick glue. */
if (knot_dname_in(current_cut, ns_name))
fetch_glue(pkt, ns_name, req);
do_fetch = in_bailiwick;
}
if (do_fetch) {
fetch_glue(pkt, ns_name, in_bailiwick, req, qry);
}
}
......@@ -301,10 +316,12 @@ static uint8_t get_initial_rank(const knot_rrset_t *rr, const struct kr_query *q
uint16_t type = kr_rrset_type_maysig(rr);
if (qry->flags.CACHED) {
assert(rr->additional); // FIXME TMP
return rr->additional ? *(uint8_t *)rr->additional : KR_RANK_OMIT;
/* ^^ Current use case for "cached" RRs without rank: hints module. */
}
if (answer || type == KNOT_RRTYPE_DS
|| type == KNOT_RRTYPE_SOA /* needed for aggressive negative caching */
|| type == KNOT_RRTYPE_NSEC || type == KNOT_RRTYPE_NSEC3) {
return KR_RANK_INITIAL | KR_RANK_AUTH;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -3,8 +3,7 @@ libkres_SOURCES := \
lib/generic/map.c \
lib/layer/iterate.c \
lib/layer/validate.c \
lib/layer/rrcache.c \
lib/layer/pktcache.c \
lib/layer/cache_lmdb.c \
lib/dnssec/nsec.c \
lib/dnssec/nsec3.c \
lib/dnssec/signature.c \
......
......@@ -26,13 +26,11 @@
/* List of embedded modules */
const kr_layer_api_t *iterate_layer(struct kr_module *module);
const kr_layer_api_t *validate_layer(struct kr_module *module);
const kr_layer_api_t *rrcache_layer(struct kr_module *module);
const kr_layer_api_t *pktcache_layer(struct kr_module *module);
const kr_layer_api_t *cache_lmdb_layer(struct kr_module *module);
static const struct kr_module embedded_modules[] = {
{ "iterate", NULL, NULL, NULL, iterate_layer, NULL, NULL, NULL },
{ "validate", NULL, NULL, NULL, validate_layer, NULL, NULL, NULL },
{ "rrcache", NULL, NULL, NULL, rrcache_layer, NULL, NULL, NULL },
{ "pktcache", NULL, NULL, NULL, pktcache_layer, NULL, NULL, NULL },
{ "cache_lmdb", NULL, NULL, NULL, cache_lmdb_layer, NULL, NULL, NULL },
};
/** Library extension. */
......
......@@ -165,6 +165,8 @@ static int invalidate_ns(struct kr_rplan *rplan, struct kr_query *qry)
*/
static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct kr_cache *cache, uint32_t timestamp)
{
return; // FIXME cleanup, etc.
#if 0
if (qry->flags.NO_MINIMIZE) {
return;
}
......@@ -194,6 +196,7 @@ static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct k
target = knot_wire_next_label(target, NULL);
}
kr_cache_sync(cache);
#endif
}
static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name,
......@@ -593,6 +596,7 @@ static int answer_finalize(struct kr_request *request, int state)
* Be conservative. Primary approach: check ranks of all RRs in wire.
* Only "negative answers" need special handling. */
bool secure = (last != NULL); /* suspicious otherwise */
VERBOSE_MSG(NULL, "AD: secure (start)\n");
if (last && (last->flags.STUB)) {
secure = false; /* don't trust forwarding for now */
}
......@@ -614,6 +618,7 @@ static int answer_finalize(struct kr_request *request, int state)
}
}
VERBOSE_MSG(NULL, "AD: secure (between ANS and AUTH)\n");
/* Write authority records. */
if (answer->current < KNOT_AUTHORITY) {
knot_pkt_begin(answer, KNOT_AUTHORITY);
......@@ -640,6 +645,7 @@ static int answer_finalize(struct kr_request *request, int state)
/* AD: "negative answers" need more handling. */
if (last && secure) {
VERBOSE_MSG(NULL, "AD: secure (1)\n");
if (kr_response_classify(answer) != PKT_NOERROR
/* Additionally check for CNAME chains that "end in NODATA",
* as those would also be PKT_NOERROR. */
......@@ -715,6 +721,7 @@ int kr_resolve_begin(struct kr_request *request, struct kr_context *ctx, knot_pk
array_init(request->additional);
array_init(request->answ_selected);
array_init(request->auth_selected);
array_init(request->add_selected);
request->answ_validated = false;
request->auth_validated = false;
......@@ -1239,6 +1246,7 @@ static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
/** @internal Check current zone cut status and credibility, spawn subrequests if needed. */
static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot_pkt_t *packet)
/* TODO: using cache on this point in this way just isn't nice; remove in time */
{
/* Stub mode, just forward and do not solve cut. */
if (qry->flags.STUB) {
......@@ -1415,7 +1423,11 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
}
kr_nsrep_elect(qry, request->ctx);
if (qry->ns.score > KR_NS_MAX_SCORE) {
VERBOSE_MSG(qry, "=> no valid NS left\n");
if (!qry->zone_cut.nsset.root) {
VERBOSE_MSG(qry, "=> no NS with an address\n");
} else {
VERBOSE_MSG(qry, "=> no valid NS left\n");
}
ITERATE_LAYERS(request, qry, reset);
kr_rplan_pop(rplan, qry);
return KR_STATE_PRODUCE;
......
......@@ -194,6 +194,7 @@ struct kr_request {
int state;
ranked_rr_array_t answ_selected;
ranked_rr_array_t auth_selected;
ranked_rr_array_t add_selected;
rr_array_t additional;
bool answ_validated; /**< internal to validator; beware of caching, etc. */
bool auth_validated; /**< see answ_validated ^^ ; TODO */
......@@ -202,6 +203,13 @@ struct kr_request {
knot_mm_t pool;
};
/** Initializer for an array of *_selected. */
#define kr_request_selected(req) { \
[KNOT_ANSWER] = &(req)->answ_selected, \
[KNOT_AUTHORITY] = &(req)->auth_selected, \
[KNOT_ADDITIONAL] = &(req)->add_selected, \
}
/**
* Begin name resolution.
*
......
......@@ -797,3 +797,42 @@ void kr_qry_print(const struct kr_query *qry, const char *prefix, const char *po
kr_rrtype_print(qry->stype, " ", postfix);
}