Commit 52674e98 authored by Vladimír Čunát's avatar Vladimír Čunát

.

parent ccf2790e
This diff is collapsed.
......@@ -87,7 +87,7 @@ struct kr_cache
#include "lib/module.h"
int cache_lmdb_peek(kr_layer_t *ctx, knot_pkt_t *pkt);
int cache_lmdb_stash(kr_layer_t *ctx, knot_pkt_t *pkt);
/**
* Open/create cache with provided storage options.
......@@ -171,6 +171,29 @@ int kr_cache_remove(struct kr_cache *cache, uint8_t tag, const knot_dname_t *nam
*/
KR_EXPORT
int kr_cache_clear(struct kr_cache *cache);
/* ** This interface is temporary. ** */
struct kr_cache_p {
uint32_t time; /**< The time of inception. */
uint32_t ttl; /**< TTL at inception moment. Assuming it fits into int32_t ATM. */
uint8_t rank; /**< See enum kr_rank */
struct {
uint8_t *data, *data_bound;
};
};
KR_EXPORT
int kr_cache_peek_exact(struct kr_cache *cache, const knot_dname_t *name, uint16_t type,
struct kr_cache_p *peek);
KR_EXPORT
int32_t kr_cache_ttl(const struct kr_cache_p *peek, uint32_t current_time);
/*TODO: reorder*/
KR_EXPORT
int kr_cache_materialize(knot_rdataset_t *dst, const struct kr_cache_p *ref,
uint32_t new_ttl, knot_mm_t *pool);
#if 0
/**
......
......@@ -8,7 +8,7 @@ const kr_layer_api_t *cache_lmdb_layer(struct kr_module *module)
{
static const kr_layer_api_t _layer = {
.produce = &cache_lmdb_peek,
//.consume = &cache_stash
.consume = &cache_lmdb_stash,
};
return &_layer;
......
......@@ -268,21 +268,26 @@ int kr_zonecut_set_sbelt(struct kr_context *ctx, struct kr_zonecut *cut)
return ret;
}
/** Fetch address for zone cut. */
/** Fetch address for zone cut. Any rank is accepted (i.e. glue as well). */
static void fetch_addr(struct kr_zonecut *cut, struct kr_cache *cache, const knot_dname_t *ns, uint16_t rrtype, uint32_t timestamp)
// LATER(optim.): excessive data copying
{
uint8_t rank = 0;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)ns, rrtype, KNOT_CLASS_IN);
if (kr_cache_peek_rr(cache, &cached_rr, &rank, NULL, &timestamp) != 0) {
struct kr_cache_p peek = {};
if (kr_cache_peek_exact(cache, ns, rrtype, &peek) != 0) {
return;
}
int32_t new_ttl = kr_cache_ttl(&peek, timestamp);
if (new_ttl < 0) {
return;
}
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, /*const-cast*/(knot_dname_t *)ns, rrtype, KNOT_CLASS_IN);
kr_cache_materialize(&cached_rr.rrs, &peek, new_ttl, cut->pool);
knot_rdata_t *rd = cached_rr.rrs.data;
for (uint16_t i = 0; i < cached_rr.rrs.rr_count; ++i) {
if (knot_rdata_ttl(rd) > timestamp) {
(void) kr_zonecut_add(cut, ns, rd);
}
(void) kr_zonecut_add(cut, ns, rd);
rd = kr_rdataset_next(rd);
}
}
......@@ -292,30 +297,31 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
const knot_dname_t *name, uint32_t timestamp,
uint8_t * restrict rank, uint8_t * restrict flags)
{
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)name, KNOT_RRTYPE_NS, KNOT_CLASS_IN);
int ret = kr_cache_peek_rr(&ctx->cache, &cached_rr, rank, flags, &drift);
struct kr_cache_p peek = {};
int ret = kr_cache_peek_exact(&ctx->cache, name, KNOT_RRTYPE_NS, &peek);
if (ret != 0) {
return ret;
}
int32_t new_ttl = kr_cache_ttl(&peek, timestamp);
if (new_ttl < 0) {
return kr_error(ESTALE);
}
/* Note: we accept *any* rank from the cache. We assume that nothing
* completely untrustworthy could get into the cache, e.g out-of-bailiwick
* records that weren't validated.
*/
/* Materialize as we'll going to do more cache lookups. */
knot_rrset_t rr_copy;
ret = kr_cache_materialize(&rr_copy, &cached_rr, drift, 0, cut->pool);
/* Materialize the rdataset temporarily, for simplicity. */
knot_rdataset_t ns_rds = {};
ret = kr_cache_materialize(&ns_rds, &peek, new_ttl, cut->pool);
if (ret != 0) {
return ret;
}
/* Insert name servers for this zone cut, addresses will be looked up
* on-demand (either from cache or iteratively) */
for (unsigned i = 0; i < rr_copy.rrs.rr_count; ++i) {
const knot_dname_t *ns_name = knot_ns_name(&rr_copy.rrs, i);
kr_zonecut_add(cut, ns_name, NULL);
for (unsigned i = 0; i < ns_rds.rr_count; ++i) {
const knot_dname_t *ns_name = knot_ns_name(&ns_rds, i);
(void) kr_zonecut_add(cut, ns_name, NULL);
/* Fetch NS reputation and decide whether to prefetch A/AAAA records. */
unsigned *cached = lru_get_try(ctx->cache_rep,
(const char *)ns_name, knot_dname_size(ns_name));
......@@ -328,7 +334,7 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
}
}
knot_rrset_clear(&rr_copy, cut->pool);
knot_rdataset_clear(&ns_rds, cut->pool);
return kr_ok();
}
......@@ -341,27 +347,33 @@ static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache,
if (!rr) {
return kr_error(ENOENT);
}
uint8_t rank = 0;
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)owner, type, KNOT_CLASS_IN);
int ret = kr_cache_peek_rr(cache, &cached_rr, &rank, NULL, &drift);
/* peek, check rank and TTL */
struct kr_cache_p peek = {};
int ret = kr_cache_peek_exact(cache, owner, type, &peek);
if (ret != 0) {
return ret;
}
const bool rankOK = kr_rank_test(rank, KR_RANK_SECURE);
if (!rankOK) {
if (!kr_rank_test(peek.rank, KR_RANK_SECURE)) {
return kr_error(ENOENT);
}
int32_t new_ttl = kr_cache_ttl(&peek, timestamp);
if (new_ttl < 0) {
return kr_error(ESTALE);
}
/* materialize a new RRset */
knot_rrset_free(rr, pool);
*rr = mm_alloc(pool, sizeof(knot_rrset_t));
if (*rr == NULL) {
return kr_error(ENOMEM);
}
ret = kr_cache_materialize(*rr, &cached_rr, drift, 0, pool);
owner = knot_dname_copy(/*const-cast*/(knot_dname_t *)owner, pool);
if (!owner) {
mm_free(pool, *rr);
*rr = NULL;
return kr_error(ENOMEM);
}
knot_rrset_init(*rr, /*const-cast*/(knot_dname_t *)owner, type, KNOT_CLASS_IN);
ret = kr_cache_materialize(&(*rr)->rrs, &peek, new_ttl, pool);
if (ret != 0) {
knot_rrset_free(rr, pool);
return ret;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment