Commit 810e2fec authored by Vladimír Čunát's avatar Vladimír Čunát

cache: basic preparation for stale-serving

parent c4f61dfd
......@@ -207,21 +207,29 @@ struct entry_h * entry_h_consistent(knot_db_val_t data, uint16_t ktype)
}
int32_t get_new_ttl(const struct entry_h *entry, uint32_t current_time)
int32_t get_new_ttl(const struct entry_h *entry, const struct kr_query *qry,
const knot_dname_t *owner, uint16_t type)
{
int32_t diff = current_time - entry->time;
int32_t diff = qry->timestamp.tv_sec - entry->time;
if (diff < 0) {
/* We may have obtained the record *after* the request started. */
diff = 0;
}
int32_t res = entry->ttl - diff;
//VERBOSE_MSG(NULL, "TTL remains: %d\n", (int)res);
if (res < 0 && owner && false/*qry->flags.SERVE_STALE*/) {
/* Stale-serving decision. FIXME: modularize or make configurable, etc. */
if (res + 3600 * 24 > 0) {
VERBOSE_MSG(qry, "stale TTL accepted: %d -> 1\n", (int)res);
return 1;
}
}
return res;
}
int32_t kr_cache_ttl(const struct kr_cache_p *peek, uint32_t current_time)
int32_t kr_cache_ttl(const struct kr_cache_p *peek, const struct kr_query *qry,
const knot_dname_t *name, uint16_t type)
{
const struct entry_h *eh = peek->raw_data;
return get_new_ttl(eh, current_time);
return get_new_ttl(eh, qry, name, type);
}
......@@ -374,12 +382,14 @@ static int cache_peek_real(kr_layer_t *ctx, knot_pkt_t *pkt)
kr_dname_print(k->zname, "", "\n");
}
break;
case KNOT_RRTYPE_CNAME:
case KNOT_RRTYPE_CNAME: {
const uint32_t new_ttl = get_new_ttl(val_cut.data, qry,
qry->sname, KNOT_RRTYPE_CNAME);
ret = answer_simple_hit(ctx, pkt, KNOT_RRTYPE_CNAME, val_cut.data,
val_cut.data + val_cut.len,
get_new_ttl(val_cut.data, qry->timestamp.tv_sec));
/* TODO: ^^ cumbersome code */
val_cut.data + val_cut.len, new_ttl);
/* TODO: ^^ cumbersome code; we also recompute the TTL */
return ret == kr_ok() ? KR_STATE_DONE : ctx->state;
}
case KNOT_RRTYPE_DNAME:
VERBOSE_MSG(qry, "=> DNAME not supported yet\n"); // LATER
......@@ -520,7 +530,8 @@ static int cache_peek_real(kr_layer_t *ctx, knot_pkt_t *pkt)
return ctx->state;
// LATER: recovery in case of error, perhaps via removing the entry?
}
int32_t new_ttl = get_new_ttl(eh, qry->timestamp.tv_sec);
int32_t new_ttl = get_new_ttl(eh, qry, qry->sname, qry->stype);
/* ^^ here we use the *expanded* wildcard name */
if (new_ttl < 0 || eh->rank < lowest_rank || eh->is_packet) {
/* Wildcard record with stale TTL, bad rank or packet. */
VERBOSE_MSG(qry, "=> wildcard: skipping %s, rank 0%0.2o, new TTL %d\n",
......@@ -551,7 +562,7 @@ static int cache_peek_real(kr_layer_t *ctx, knot_pkt_t *pkt)
return ctx->state;
}
/* Check if the record is OK. */
int32_t new_ttl = get_new_ttl(eh, qry->timestamp.tv_sec);
int32_t new_ttl = get_new_ttl(eh, qry, k->zname, KNOT_RRTYPE_SOA);
if (new_ttl < 0 || eh->rank < lowest_rank || eh->is_packet) {
VERBOSE_MSG(qry, "=> SOA unfit %s: ",
eh->is_packet ? "packet" : "RR");
......@@ -866,7 +877,7 @@ static int found_exact_hit(kr_layer_t *ctx, knot_pkt_t *pkt, knot_db_val_t val,
// LATER(optim): pehaps optimize the zone cut search
}
int32_t new_ttl = get_new_ttl(eh, qry->timestamp.tv_sec);
int32_t new_ttl = get_new_ttl(eh, qry, qry->sname, qry->stype);
if (new_ttl < 0 || eh->rank < lowest_rank) {
/* Positive record with stale TTL or bad rank.
* LATER(optim.): It's unlikely that we find a negative one,
......@@ -1007,7 +1018,7 @@ static knot_db_val_t closest_NS(kr_layer_t *ctx, struct key *k)
assert(false);
goto next_label;
}
int32_t new_ttl = get_new_ttl(eh, qry->timestamp.tv_sec);
int32_t new_ttl = get_new_ttl(eh, qry, k->zname, type);
if (new_ttl < 0
/* Not interested in negative or bogus. */
|| eh->is_packet
......
......@@ -106,8 +106,10 @@ struct kr_cache_p {
KR_EXPORT
int kr_cache_peek_exact(struct kr_cache *cache, const knot_dname_t *name, uint16_t type,
struct kr_cache_p *peek);
/* Parameters (qry, name, type) are used for timestamp and stale-serving decisions. */
KR_EXPORT
int32_t kr_cache_ttl(const struct kr_cache_p *peek, uint32_t current_time);
int32_t kr_cache_ttl(const struct kr_cache_p *peek, const struct kr_query *qry,
const knot_dname_t *name, uint16_t type);
/*TODO: reorder*/
KR_EXPORT
int kr_cache_materialize(knot_rdataset_t *dst, const struct kr_cache_p *ref,
......
......@@ -158,8 +158,10 @@ int entry_h_splice(
/* If equal rank was accepted, spoofing a *single* answer would be
* enough to e.g. override NS record in AUTHORITY section.
* This way they would have to hit the first answer
* (whenever TTL nears expiration). */
int32_t old_ttl = get_new_ttl(eh_orig, qry->timestamp.tv_sec);
* (whenever TTL nears expiration).
* Stale-serving is NOT considered, but TTL 1 would be considered
* as expiring anyway, ... */
int32_t old_ttl = get_new_ttl(eh_orig, qry, NULL, 0);
if (old_ttl > 0 && !is_expiring(old_ttl, eh_orig->ttl)
&& rank <= eh_orig->rank) {
WITH_VERBOSE {
......
......@@ -144,9 +144,14 @@ static inline bool is_expiring(uint32_t orig_ttl, uint32_t new_ttl)
return 100 * (nttl - 5) < orig_ttl;
}
/** Returns signed result so you can inspect how much stale the RR is. */
int32_t get_new_ttl(const struct entry_h *entry, uint32_t current_time);
/** Returns signed result so you can inspect how much stale the RR is.
*
* @param owner name for stale-serving decisions. You may pass NULL to disable stale.
* FIXME: NSEC uses zone name ATM.
* @param type for stale-serving.
*/
int32_t get_new_ttl(const struct entry_h *entry, const struct kr_query *qry,
const knot_dname_t *owner, uint16_t type);
/* RRset (de)materialization; implementation in ./entry_rr.c */
......
......@@ -177,7 +177,9 @@ static const char * find_leq_NSEC1(struct kr_cache *cache, const struct kr_query
* in case we searched before the very first one in the zone. */
return "range search found inconsistent entry";
}
int32_t new_ttl_ = get_new_ttl(eh, qry->timestamp.tv_sec);
/* FIXME(stale): passing just zone name instead of owner, as we don't
* have it reconstructed at this point. */
int32_t new_ttl_ = get_new_ttl(eh, qry, k->zname, KNOT_RRTYPE_NSEC);
if (new_ttl_ < 0 || !kr_rank_test(eh->rank, KR_RANK_SECURE)) {
return "range search found stale or insecure entry";
/* TODO: remove the stale record *and* retry,
......
......@@ -234,7 +234,7 @@ static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name
* try to fetch ta & keys even if initial cut name not covered by TA */
bool secured = !is_insecured;
int ret = kr_zonecut_find_cached(req->ctx, &cut_found, requested_name,
qry->timestamp.tv_sec, &secured);
qry, &secured);
if (ret == kr_error(ENOENT)) {
/* No cached cut found, start from SBELT
* and issue priming query. */
......
......@@ -269,14 +269,16 @@ int kr_zonecut_set_sbelt(struct kr_context *ctx, struct kr_zonecut *cut)
}
/** Fetch address for zone cut. Any rank is accepted (i.e. glue as well). */
static void fetch_addr(struct kr_zonecut *cut, struct kr_cache *cache, const knot_dname_t *ns, uint16_t rrtype, uint32_t timestamp)
static void fetch_addr(struct kr_zonecut *cut, struct kr_cache *cache,
const knot_dname_t *ns, uint16_t rrtype,
const struct kr_query *qry)
// LATER(optim.): excessive data copying
{
struct kr_cache_p peek = {};
if (kr_cache_peek_exact(cache, ns, rrtype, &peek) != 0) {
return;
}
int32_t new_ttl = kr_cache_ttl(&peek, timestamp);
int32_t new_ttl = kr_cache_ttl(&peek, qry, ns, rrtype);
if (new_ttl < 0) {
return;
}
......@@ -295,7 +297,7 @@ static void fetch_addr(struct kr_zonecut *cut, struct kr_cache *cache, const kno
/** Fetch best NS for zone cut. */
static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
const knot_dname_t *name, uint32_t timestamp,
const knot_dname_t *name, const struct kr_query *qry,
uint8_t * restrict rank)
{
struct kr_cache_p peek = {};
......@@ -303,7 +305,7 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
if (ret != 0) {
return ret;
}
int32_t new_ttl = kr_cache_ttl(&peek, timestamp);
int32_t new_ttl = kr_cache_ttl(&peek, qry, name, KNOT_RRTYPE_NS);
if (new_ttl < 0) {
return kr_error(ESTALE);
}
......@@ -328,10 +330,10 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
(const char *)ns_name, knot_dname_size(ns_name));
unsigned reputation = (cached) ? *cached : 0;
if (!(reputation & KR_NS_NOIP4) && !(ctx->options.NO_IPV4)) {
fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_A, timestamp);
fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_A, qry);
}
if (!(reputation & KR_NS_NOIP6) && !(ctx->options.NO_IPV6)) {
fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_AAAA, timestamp);
fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_AAAA, qry);
}
}
......@@ -343,7 +345,8 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
* Fetch secure RRSet of given type.
*/
static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache,
const knot_dname_t *owner, uint16_t type, knot_mm_t *pool, uint32_t timestamp)
const knot_dname_t *owner, uint16_t type, knot_mm_t *pool,
const struct kr_query *qry)
{
if (!rr) {
return kr_error(ENOENT);
......@@ -357,7 +360,7 @@ static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache,
if (!kr_rank_test(peek.rank, KR_RANK_SECURE)) {
return kr_error(ENOENT);
}
int32_t new_ttl = kr_cache_ttl(&peek, timestamp);
int32_t new_ttl = kr_cache_ttl(&peek, qry, owner, type);
if (new_ttl < 0) {
return kr_error(ESTALE);
}
......@@ -383,8 +386,9 @@ static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache,
return kr_ok();
}
int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut, const knot_dname_t *name,
uint32_t timestamp, bool * restrict secured)
int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut,
const knot_dname_t *name, const struct kr_query *qry,
bool * restrict secured)
{
kr_log_verbose("[ ][ *c ] kr_zonecut_find_cached\n");
if (!ctx || !cut || !name) {
......@@ -401,7 +405,7 @@ int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut, const
/* Fetch NS first and see if it's insecure. */
uint8_t rank = 0;
const bool is_root = (label[0] == '\0');
if (fetch_ns(ctx, cut, label, timestamp, &rank) == 0) {
if (fetch_ns(ctx, cut, label, qry, &rank) == 0) {
/* Flag as insecure if cached as this */
if (kr_rank_test(rank, KR_RANK_INSECURE)) {
*secured = false;
......@@ -409,9 +413,9 @@ int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut, const
/* Fetch DS and DNSKEY if caller wants secure zone cut */
if (*secured || is_root) {
fetch_secure_rrset(&cut->trust_anchor, &ctx->cache, label,
KNOT_RRTYPE_DS, cut->pool, timestamp);
KNOT_RRTYPE_DS, cut->pool, qry);
fetch_secure_rrset(&cut->key, &ctx->cache, label,
KNOT_RRTYPE_DNSKEY, cut->pool, timestamp);
KNOT_RRTYPE_DNSKEY, cut->pool, qry);
}
update_cut_name(cut, label);
mm_free(cut->pool, qname);
......
......@@ -142,10 +142,11 @@ int kr_zonecut_set_sbelt(struct kr_context *ctx, struct kr_zonecut *cut);
* @param ctx resolution context (to fetch data from LRU caches)
* @param cut zone cut to be populated
* @param name QNAME to start finding zone cut for
* @param timestamp transaction timestamp
* @param qry query for timestamp and stale-serving decisions
* @param secured set to true if want secured zone cut, will return false if it is provably insecure
* @return 0 or error code (ENOENT if it doesn't find anything)
*/
KR_EXPORT
int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut, const knot_dname_t *name,
uint32_t timestamp, bool * restrict secured);
int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut,
const knot_dname_t *name, const struct kr_query *qry,
bool * restrict secured);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment