Commit 50acb952 authored by Marek Vavruša's avatar Marek Vavruša

lib/cache: updated API to insert/retrieve entry rank

parent c455e7a3
......@@ -24,6 +24,8 @@
#include <libknot/internal/namedb/namedb_lmdb.h>
#include <libknot/errcode.h>
#include <libknot/descriptor.h>
#include <libknot/dname.h>
#include <libknot/rrtype/rrsig.h>
#include "lib/cache.h"
#include "lib/defines.h"
......@@ -284,7 +286,7 @@ int kr_cache_clear(struct kr_cache_txn *txn)
return txn_api(txn)->clear(&txn->t);
}
int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint32_t *timestamp)
int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp)
{
if (!txn || !rr || !timestamp) {
return kr_error(EINVAL);
......@@ -335,7 +337,7 @@ int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t dr
return kr_ok();
}
int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint32_t timestamp)
int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp)
{
if (!txn || !rr) {
return kr_error(EINVAL);
......@@ -350,6 +352,7 @@ int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint32_
struct kr_cache_entry header = {
.timestamp = timestamp,
.ttl = 0,
.rank = rank,
.count = rr->rrs.rr_count
};
knot_rdata_t *rd = rr->rrs.data;
......@@ -364,7 +367,7 @@ int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint32_
return kr_cache_insert(txn, KR_CACHE_RR, rr->owner, rr->type, &header, data);
}
int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint32_t *timestamp)
int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp)
{
if (!txn || !rr || !timestamp) {
return kr_error(EINVAL);
......@@ -382,7 +385,7 @@ int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint32_t *ti
return kr_ok();
}
int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t typec, uint32_t timestamp)
int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp)
{
if (!txn || !rr) {
return kr_error(EINVAL);
......@@ -397,6 +400,7 @@ int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint
struct kr_cache_entry header = {
.timestamp = timestamp,
.ttl = 0,
.rank = rank,
.count = rr->rrs.rr_count
};
for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
......@@ -406,6 +410,7 @@ int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint
}
}
uint16_t covered = knot_rrsig_type_covered(&rr->rrs, 0);
namedb_val_t data = { rr->rrs.data, knot_rdataset_size(&rr->rrs) };
return kr_cache_insert(txn, KR_CACHE_SIG, rr->owner, typec, &header, data);
return kr_cache_insert(txn, KR_CACHE_SIG, rr->owner, covered, &header, data);
}
......@@ -164,10 +164,11 @@ int kr_cache_clear(struct kr_cache_txn *txn);
* @note The 'drift' is the time passed between the cache time of the RRSet and now (in seconds).
* @param txn transaction instance
* @param rr query RRSet (its rdataset may be changed depending on the result)
* @param rank entry rank will be stored in this variable
* @param timestamp current time (will be replaced with drift if successful)
* @return 0 or an errcode
*/
int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint32_t *timestamp);
int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp);
/**
* Clone read-only RRSet and adjust TTLs.
......@@ -183,28 +184,30 @@ int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t dr
* Insert RRSet into cache, replacing any existing data.
* @param txn transaction instance
* @param rr inserted RRSet
* @param rank rank of the data
* @param timestamp current time
* @return 0 or an errcode
*/
int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint32_t timestamp);
int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp);
/**
* Peek the cache for the given RRset signature (name, type)
* @note The RRset type must not be RRSIG but instead it must equal the type covered field of the sought RRSIG.
* @param txn transaction instance
* @param rr query RRSET (its rdataset and type may be changed depending on the result)
* @param rank entry rank will be stored in this variable
* @param timestamp current time (will be replaced with drift if successful)
* @return 0 or an errcode
*/
int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint32_t *timestamp);
int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp);
/**
* Insert the selected RRSIG RRSet of the selected type covered into cache, replacing any existing data.
* @note The RRSet must contain RRSIGS with only the specified type covered.
* @param txn transaction instance
* @param rr inserted RRSIG RRSet
* @param typec type covered of the RDATA
* @param rank rank of the data
* @param timestamp current time
* @return 0 or an errcode
*/
int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t typec, uint32_t timestamp);
int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp);
......@@ -41,13 +41,14 @@ static int loot_rr(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_dname_t
{
/* Check if record exists in cache */
int ret = 0;
uint16_t rank = 0;
uint32_t drift = qry->timestamp.tv_sec;
knot_rrset_t cache_rr;
knot_rrset_init(&cache_rr, (knot_dname_t *)name, rrtype, rrclass);
if (fetch_rrsig) {
ret = kr_cache_peek_rrsig(txn, &cache_rr, &drift);
ret = kr_cache_peek_rrsig(txn, &cache_rr, &rank, &drift);
} else {
ret = kr_cache_peek_rr(txn, &cache_rr, &drift);
ret = kr_cache_peek_rr(txn, &cache_rr, &rank, &drift);
}
if (ret != 0) {
return ret;
......@@ -138,21 +139,14 @@ struct stash_baton
uint32_t min_ttl;
};
static int commit_rrsig(struct stash_baton *baton, knot_rrset_t *rr)
static int commit_rrsig(struct stash_baton *baton, uint16_t rank, knot_rrset_t *rr)
{
/* If not doing secure resolution, ignore (unvalidated) RRSIGs. */
if (!(baton->qry->flags & QUERY_DNSSEC_WANT)) {
return kr_ok();
}
/* Commit covering RRSIG to a separate cache namespace. */
uint16_t covered = knot_rrsig_type_covered(&rr->rrs, 0);
unsigned drift = baton->timestamp;
knot_rrset_t query_rrsig;
knot_rrset_init(&query_rrsig, rr->owner, covered, rr->rclass);
if (kr_cache_peek_rrsig(baton->txn, &query_rrsig, &drift) == 0) {
return kr_ok();
}
return kr_cache_insert_rrsig(baton->txn, rr, covered, baton->timestamp);
return kr_cache_insert_rrsig(baton->txn, rr, rank, baton->timestamp);
}
static int commit_rr(const char *key, void *val, void *data)
......@@ -169,17 +163,19 @@ static int commit_rr(const char *key, void *val, void *data)
}
/* Save RRSIG in a special cache. */
uint16_t rank = KEY_FLAG_RANK(key);
if (baton->qry->flags & QUERY_DNSSEC_WANT)
rank |= KR_RANK_SECURE;
if (baton->qry->flags & QUERY_DNSSEC_INSECURE)
rank |= KR_RANK_INSECURE;
if (KEY_COVERING_RRSIG(key)) {
return commit_rrsig(baton, rr);
return commit_rrsig(baton, rank, rr);
}
/* Check if already cached */
/** @todo This should check if less trusted data is in the cache,
for that the cache would need to trace data trust level.
*/
knot_rrset_t query_rr;
knot_rrset_init(&query_rr, rr->owner, rr->type, rr->rclass);
return kr_cache_insert_rr(baton->txn, rr, baton->timestamp);
return kr_cache_insert_rr(baton->txn, rr, rank, baton->timestamp);
}
static int stash_commit(map_t *stash, struct kr_query *qry, struct kr_cache_txn *txn, struct kr_request *req)
......
......@@ -282,9 +282,10 @@ int kr_zonecut_set_sbelt(struct kr_context *ctx, struct kr_zonecut *cut)
/** Fetch address for zone cut. */
static void fetch_addr(struct kr_zonecut *cut, const knot_dname_t *ns, uint16_t rrtype, struct kr_cache_txn *txn, uint32_t timestamp)
{
uint16_t rank = 0;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)ns, rrtype, KNOT_CLASS_IN);
if (kr_cache_peek_rr(txn, &cached_rr, &timestamp) != 0) {
if (kr_cache_peek_rr(txn, &cached_rr, &rank, &timestamp) != 0) {
return;
}
......@@ -300,10 +301,11 @@ static void fetch_addr(struct kr_zonecut *cut, const knot_dname_t *ns, uint16_t
/** Fetch best NS for zone cut. */
static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut, const knot_dname_t *name, struct kr_cache_txn *txn, uint32_t timestamp)
{
uint16_t rank = 0;
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)name, KNOT_RRTYPE_NS, KNOT_CLASS_IN);
int ret = kr_cache_peek_rr(txn, &cached_rr, &drift);
int ret = kr_cache_peek_rr(txn, &cached_rr, &rank, &drift);
if (ret != 0) {
return ret;
}
......@@ -337,11 +339,11 @@ static int fetch_rrset(knot_rrset_t **rr, const knot_dname_t *owner, uint16_t ty
return kr_error(ENOENT);
}
uint16_t rank = 0;
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)owner, type, KNOT_CLASS_IN);
int ret = kr_cache_peek_rr(txn, &cached_rr, &drift);
int ret = kr_cache_peek_rr(txn, &cached_rr, &rank, &drift);
if (ret != 0) {
return ret;
}
......
......@@ -266,10 +266,10 @@ static void test_invalid(void **state)
assert_int_not_equal(kr_cache_txn_commit(NULL), 0);
assert_int_not_equal(kr_cache_peek(NULL, KR_CACHE_USER, dname, KNOT_RRTYPE_TSIG, NULL, &timestamp), 0);
assert_int_not_equal(kr_cache_peek(&global_txn, KR_CACHE_USER, NULL, KNOT_RRTYPE_TSIG, &entry, &timestamp), 0);
assert_int_not_equal(kr_cache_peek_rr(NULL, NULL, NULL), 0);
assert_int_not_equal(kr_cache_peek_rr(&global_txn, NULL, NULL), 0);
assert_int_not_equal(kr_cache_insert_rr(&global_txn, NULL, 0), 0);
assert_int_not_equal(kr_cache_insert_rr(NULL, NULL, 0), 0);
assert_int_not_equal(kr_cache_peek_rr(NULL, NULL, NULL, NULL), 0);
assert_int_not_equal(kr_cache_peek_rr(&global_txn, NULL, NULL, NULL), 0);
assert_int_not_equal(kr_cache_insert_rr(&global_txn, NULL, 0, 0), 0);
assert_int_not_equal(kr_cache_insert_rr(NULL, NULL, 0, 0), 0);
assert_int_not_equal(kr_cache_insert(NULL, KR_CACHE_USER, dname,
KNOT_RRTYPE_TSIG, &global_fake_ce, global_namedb_data), 0);
assert_int_not_equal(kr_cache_insert(&global_txn, KR_CACHE_USER, NULL,
......@@ -287,7 +287,7 @@ static void test_insert_rr(void **state)
{
test_random_rr(&global_rr, CACHE_TTL);
struct kr_cache_txn *txn = test_txn_write(state);
int ret = kr_cache_insert_rr(txn, &global_rr, CACHE_TIME);
int ret = kr_cache_insert_rr(txn, &global_rr, 0, CACHE_TIME);
if (ret == KNOT_EOK) {
ret = kr_cache_txn_commit(txn);
} else {
......@@ -338,8 +338,9 @@ static void test_query(void **state)
struct kr_cache_txn *txn = test_txn_rdonly(state);
for (uint32_t timestamp = CACHE_TIME; timestamp < CACHE_TIME + CACHE_TTL; ++timestamp) {
uint16_t rank = 0;
uint32_t drift = timestamp;
int query_ret = kr_cache_peek_rr(txn, &cache_rr, &drift);
int query_ret = kr_cache_peek_rr(txn, &cache_rr, &rank, &drift);
bool rr_equal = knot_rrset_equal(&global_rr, &cache_rr, KNOT_RRSET_COMPARE_WHOLE);
assert_int_equal(query_ret, KNOT_EOK);
assert_true(rr_equal);
......@@ -351,12 +352,13 @@ static void test_query(void **state)
/* Test cache read (simulate aged entry) */
static void test_query_aged(void **state)
{
uint16_t rank = 0;
uint32_t timestamp = CACHE_TIME + CACHE_TTL + 1;
knot_rrset_t cache_rr;
knot_rrset_init(&cache_rr, global_rr.owner, global_rr.type, global_rr.rclass);
struct kr_cache_txn *txn = test_txn_rdonly(state);
int ret = kr_cache_peek_rr(txn, &cache_rr, &timestamp);
int ret = kr_cache_peek_rr(txn, &cache_rr, &rank, &timestamp);
assert_int_equal(ret, kr_error(ESTALE));
kr_cache_txn_abort(txn);
}
......@@ -364,6 +366,7 @@ static void test_query_aged(void **state)
/* Test cache removal */
static void test_remove(void **state)
{
uint16_t rank = 0;
uint32_t timestamp = CACHE_TIME;
knot_rrset_t cache_rr;
knot_rrset_init(&cache_rr, global_rr.owner, global_rr.type, global_rr.rclass);
......@@ -371,7 +374,7 @@ static void test_remove(void **state)
struct kr_cache_txn *txn = test_txn_write(state);
int ret = kr_cache_remove(txn, KR_CACHE_RR, cache_rr.owner, cache_rr.type);
assert_int_equal(ret, KNOT_EOK);
ret = kr_cache_peek_rr(txn, &cache_rr, &timestamp);
ret = kr_cache_peek_rr(txn, &cache_rr, &rank, &timestamp);
assert_int_equal(ret, KNOT_ENOENT);
kr_cache_txn_commit(txn);
}
......@@ -386,7 +389,7 @@ static void test_fill(void **state)
for (unsigned i = 0; i < CACHE_SIZE; ++i) {
knot_rrset_t rr;
test_random_rr(&rr, CACHE_TTL);
ret = kr_cache_insert_rr(txn, &rr, CACHE_TTL - 1);
ret = kr_cache_insert_rr(txn, &rr, 0, CACHE_TTL - 1);
if (ret != KNOT_EOK) {
break;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment