Commit 9843c55c authored by Marek Vavruša's avatar Marek Vavruša Committed by Marek Vavruša

lib/cache: relaxed API to allow other assets than RR

each asset is tagged by a byte defining its type and importance
this is a groundwork for negative cache and packet cache
abstracted the code in preparation for different backends
parent 0fa3868f
......@@ -28,23 +28,34 @@
#include "lib/cache.h"
#include "lib/defines.h"
/* Key size */
#define KEY_SIZE (sizeof(uint8_t) + KNOT_DNAME_MAXLEN + sizeof(uint16_t))
/** Used cache storage engine (default LMDB) */
const namedb_api_t *(*kr_cache_storage)(void) = namedb_lmdb_api;
#define db_api kr_cache_storage()
/** Generic storage options */
union storage_opts {
struct namedb_lmdb_opts lmdb;
};
namedb_t *kr_cache_open(const char *handle, mm_ctx_t *mm, size_t maxsize)
{
if (handle == NULL || maxsize == 0) {
if (!handle || maxsize == 0) {
return NULL;
}
struct namedb_lmdb_opts opts = NAMEDB_LMDB_OPTS_INITIALIZER;
opts.mapsize = maxsize;
opts.path = handle;
union storage_opts opts;
memset(&opts, 0, sizeof(opts));
if (db_api == namedb_lmdb_api()) {
opts.lmdb.mapsize = maxsize;
opts.lmdb.path = handle;
}
namedb_t *db = NULL;
int ret = db_api->init(&db, mm, &opts);
if (ret != KNOT_EOK) {
if (ret != 0) {
return NULL;
}
......@@ -53,15 +64,15 @@ namedb_t *kr_cache_open(const char *handle, mm_ctx_t *mm, size_t maxsize)
void kr_cache_close(namedb_t *cache)
{
if (cache != NULL) {
if (cache) {
db_api->deinit(cache);
}
}
int kr_cache_txn_begin(namedb_t *cache, namedb_txn_t *txn, unsigned flags)
{
if (cache == NULL || txn == NULL) {
return KNOT_EINVAL;
if (!cache || !txn) {
return kr_error(EINVAL);
}
return db_api->txn_begin(cache, txn, flags);
......@@ -69,12 +80,12 @@ int kr_cache_txn_begin(namedb_t *cache, namedb_txn_t *txn, unsigned flags)
int kr_cache_txn_commit(namedb_txn_t *txn)
{
if (txn == NULL) {
return KNOT_EINVAL;
if (!txn) {
return kr_error(EINVAL);
}
int ret = db_api->txn_commit(txn);
if (ret != KNOT_EOK) {
if (ret != 0) {
kr_cache_txn_abort(txn);
}
return ret;
......@@ -82,22 +93,25 @@ int kr_cache_txn_commit(namedb_txn_t *txn)
void kr_cache_txn_abort(namedb_txn_t *txn)
{
if (txn != NULL) {
if (txn) {
db_api->txn_abort(txn);
}
}
static size_t cache_key(uint8_t *buf, const knot_dname_t *name, uint16_t type)
/** @internal Composed key as { u8 tag, u8[1-255] name, u16 type } */
static size_t cache_key(uint8_t *buf, uint8_t tag, const knot_dname_t *name, uint16_t type)
{
size_t len = knot_dname_to_wire(buf, name, KNOT_DNAME_MAXLEN);
memcpy(buf + len, &type, sizeof(uint16_t));
return len + sizeof(uint16_t);
knot_dname_lf(buf, name, NULL);
size_t len = buf[0] + 1;
memcpy(buf + len, &type, sizeof(type));
buf[0] = tag;
return len + sizeof(type);
}
static struct kr_cache_rrset *cache_rr(namedb_txn_t *txn, const knot_dname_t *name, uint16_t type)
static struct kr_cache_entry *cache_entry(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name, uint16_t type)
{
uint8_t keybuf[KNOT_DNAME_MAXLEN + sizeof(uint16_t)];
size_t key_len = cache_key(keybuf, name, type);
uint8_t keybuf[KEY_SIZE];
size_t key_len = cache_key(keybuf, tag, name, type);
/* Look up and return value */
namedb_val_t key = { keybuf, key_len };
......@@ -107,49 +121,121 @@ static struct kr_cache_rrset *cache_rr(namedb_txn_t *txn, const knot_dname_t *na
return NULL;
}
return (struct kr_cache_rrset *)val.data;
return (struct kr_cache_entry *)val.data;
}
int kr_cache_peek(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp)
struct kr_cache_entry *kr_cache_peek(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name,
uint16_t type, uint32_t *timestamp)
{
if (txn == NULL || rr == NULL) {
return KNOT_EINVAL;
if (!txn || !tag || !name) {
return NULL;
}
/* Check if the RRSet is in the cache. */
struct kr_cache_rrset *found_rr = cache_rr(txn, rr->owner, rr->type);
if (found_rr != NULL) {
struct kr_cache_entry *entry = cache_entry(txn, tag, name, type);
if (!entry) {
return NULL;
}
/* No time constraint */
if (!timestamp) {
return entry;
} else if (*timestamp <= entry->timestamp) {
/* John Connor record cached in the future. */
*timestamp = 0;
return entry;
} else {
/* Check if the record is still valid. */
uint32_t drift = *timestamp - entry->timestamp;
if (drift < entry->ttl) {
*timestamp = drift;
return entry;
}
}
/* Assign data and return success. */
rr->rrs.rr_count = found_rr->count;
rr->rrs.data = found_rr->data;
return NULL;
}
/* No time constraint */
if (timestamp == NULL) {
return KNOT_EOK;
}
static void entry_write(struct kr_cache_entry *dst, struct kr_cache_entry *header, namedb_val_t data)
{
assert(dst);
memcpy(dst, header, sizeof(*header));
memcpy(dst->data, data.data, data.len);
}
/* John Connor record cached from the future. */
if (*timestamp < found_rr->timestamp) {
*timestamp = 0;
return KNOT_EOK;
}
int kr_cache_insert(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name, uint16_t type,
struct kr_cache_entry *header, namedb_val_t data)
{
if (!txn || !name || !tag || !header) {
return kr_error(EINVAL);
}
/* Check if at least one RR is still valid. */
uint32_t drift = *timestamp - found_rr->timestamp;
for (unsigned i = 0; i < rr->rrs.rr_count; ++i) {
const knot_rdata_t *rd = knot_rdataset_at(&rr->rrs, i);
if (knot_rdata_ttl(rd) > drift) {
*timestamp = drift;
return KNOT_EOK;
}
/* Insert key */
uint8_t keybuf[KEY_SIZE];
size_t key_len = cache_key(keybuf, tag, name, type);
namedb_val_t key = { keybuf, key_len };
namedb_val_t entry = { NULL, sizeof(*header) + data.len };
/* LMDB can do late write and avoid copy */
if (db_api == namedb_lmdb_api()) {
int ret = db_api->insert(txn, &key, &entry, 0);
if (ret != 0) {
return ret;
}
entry_write(entry.data, header, data);
} else {
/* Other backends must prepare contiguous data first */
entry.data = malloc(entry.len);
if (!entry.data) {
return kr_error(ENOMEM);
}
entry_write(entry.data, header, data);
int ret = db_api->insert(txn, &key, &entry, 0);
free(entry.data);
if (ret != 0) {
return ret;
}
}
return kr_ok();
}
return KNOT_ENOENT;
int kr_cache_remove(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name, uint16_t type)
{
if (!txn || !tag || !name ) {
return kr_error(EINVAL);
}
uint8_t keybuf[KEY_SIZE];
size_t key_len = cache_key(keybuf, tag, name, type);
namedb_val_t key = { keybuf, key_len };
return db_api->del(txn, &key);
}
int kr_cache_clear(namedb_txn_t *txn)
{
if (!txn) {
return kr_error(EINVAL);
}
return db_api->clear(txn);
}
int kr_cache_peek_rr(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp)
{
if (!txn || !rr || !timestamp) {
return kr_error(EINVAL);
}
/* Check if the RRSet is in the cache. */
struct kr_cache_entry *entry = kr_cache_peek(txn, KR_CACHE_RR, rr->owner, rr->type, timestamp);
if (entry) {
rr->rrs.rr_count = entry->count;
rr->rrs.data = entry->data;
return kr_ok();
}
/* Not found. */
return KNOT_ENOENT;
return kr_error(ENOENT);
}
knot_rrset_t kr_cache_materialize(const knot_rrset_t *src, uint32_t drift, mm_ctx_t *mm)
......@@ -160,14 +246,14 @@ knot_rrset_t kr_cache_materialize(const knot_rrset_t *src, uint32_t drift, mm_ct
knot_rrset_t copy;
knot_rrset_init(&copy, NULL, src->type, src->rclass);
copy.owner = knot_dname_copy(src->owner, mm);
if (copy.owner == NULL) {
if (!copy.owner) {
return copy;
}
for (uint16_t i = 0; i < src->rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&src->rrs, i);
if (knot_rdata_ttl(rd) > drift) {
if (knot_rdataset_add(&copy.rrs, rd, mm) != KNOT_EOK) {
if (knot_rdataset_add(&copy.rrs, rd, mm) != 0) {
knot_rrset_clear(&copy, mm);
return copy;
}
......@@ -183,54 +269,30 @@ knot_rrset_t kr_cache_materialize(const knot_rrset_t *src, uint32_t drift, mm_ct
return copy;
}
int kr_cache_insert(namedb_txn_t *txn, const knot_rrset_t *rr, uint32_t timestamp)
int kr_cache_insert_rr(namedb_txn_t *txn, const knot_rrset_t *rr, uint32_t timestamp)
{
if (txn == NULL || rr == NULL) {
return KNOT_EINVAL;
if (!txn || !rr) {
return kr_error(EINVAL);
}
/* Ignore empty records. */
/* Ignore empty records */
if (knot_rrset_empty(rr)) {
return KNOT_EOK;
}
uint8_t keybuf[KNOT_DNAME_MAXLEN + sizeof(uint16_t)];
size_t key_len = cache_key(keybuf, rr->owner, rr->type);
namedb_val_t key = { keybuf, key_len };
namedb_val_t val = { NULL, sizeof(struct kr_cache_rrset) + knot_rdataset_size(&rr->rrs) };
int ret = db_api->insert(txn, &key, &val, 0);
if (ret != KNOT_EOK) {
return ret;
return kr_ok();
}
/* Write cached record. */
struct kr_cache_rrset *cache_rr = val.data;
cache_rr->timestamp = timestamp;
cache_rr->count = rr->rrs.rr_count;
memcpy(cache_rr->data, rr->rrs.data, knot_rdataset_size(&rr->rrs));
return KNOT_EOK;
}
int kr_cache_remove(namedb_txn_t *txn, const knot_rrset_t *rr)
{
if (txn == NULL || rr == NULL) {
return KNOT_EINVAL;
}
uint8_t keybuf[KNOT_DNAME_MAXLEN + sizeof(uint16_t)];
size_t key_len = cache_key(keybuf, rr->owner, rr->type);
namedb_val_t key = { keybuf, key_len };
return db_api->del(txn, &key);
}
int kr_cache_clear(namedb_txn_t *txn)
{
if (txn == NULL) {
return KNOT_EINVAL;
/* Prepare header to write */
struct kr_cache_entry header = {
.timestamp = timestamp,
.ttl = 0,
.count = rr->rrs.rr_count
};
for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&rr->rrs, i);
if (knot_rdata_ttl(rd) > header.ttl) {
header.ttl = knot_rdata_ttl(rd);
}
}
return db_api->clear(txn);
namedb_val_t data = { rr->rrs.data, knot_rdataset_size(&rr->rrs) };
return kr_cache_insert(txn, KR_CACHE_RR, rr->owner, rr->type, &header, data);
}
......@@ -19,12 +19,20 @@
#include <libknot/rrset.h>
#include <libknot/internal/namedb/namedb.h>
/** Cache entry tag */
enum kr_cache_tag {
KR_CACHE_RR = 0x01,
KR_CACHE_PKT = 0x02,
KR_CACHE_USER = 0xF0
};
/**
* Serialized form of the RRSet with inception timestamp.
* Serialized form of the RRSet with inception timestamp and maximum TTL.
*/
struct kr_cache_rrset
struct kr_cache_entry
{
uint32_t timestamp;
uint32_t ttl;
uint16_t count;
uint8_t data[];
};
......@@ -34,7 +42,7 @@ extern const namedb_api_t *(*kr_cache_storage)(void);
/**
* Open/create persistent cache in given path.
* @param handle Path to existing directory where the DB should be created.
* @param handle Configuration string (e.g. path to existing directory where the DB should be created)
* @param mm Memory context.
* @param maxsize Maximum database size (bytes)
* @return database instance or NULL
......@@ -54,15 +62,14 @@ void kr_cache_close(namedb_t *cache);
* @param cache database instance
* @param txn transaction instance to be initialized (output)
* @param flags transaction flags (see namedb.h in libknot)
* @return KNOT_E*
* @return 0 or an errcode
*/
int kr_cache_txn_begin(namedb_t *cache, namedb_txn_t *txn, unsigned flags);
/**
* Commit existing transaction.
* @param txn transaction instance
* @return KNOT_E*
* @return 0 or an errcode
*/
int kr_cache_txn_commit(namedb_txn_t *txn);
......@@ -73,14 +80,56 @@ int kr_cache_txn_commit(namedb_txn_t *txn);
void kr_cache_txn_abort(namedb_txn_t *txn);
/**
* Peek the cache for given RRSet (name, type, class)
* Peek the cache for asset (name, type, tag)
* @note The 'drift' is the time passed between the cache time of the RRSet and now (in seconds).
* @param txn transaction instance
* @param tag asset tag
* @param name asset name
* @param type asset type
* @param timestamp current time (will be replaced with drift if successful)
* @return cache entry or NULL
*/
struct kr_cache_entry *kr_cache_peek(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name,
uint16_t type, uint32_t *timestamp);
/**
* Insert asset into cache, replacing any existing data.
* @param txn transaction instance
* @param tag asset tag
* @param name asset name
* @param type asset type
* @param header filled entry header (count, ttl and timestamp)
* @return 0 or an errcode
*/
int kr_cache_insert(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name, uint16_t type,
struct kr_cache_entry *header, namedb_val_t data);
/**
* Remove asset from cache.
* @param txn transaction instance
* @param tag asset tag
* @param name asset name
* @param type record type
* @return 0 or an errcode
*/
int kr_cache_remove(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name, uint16_t type);
/**
* Clear all items from the cache.
* @param txn transaction instance
* @return 0 or an errcode
*/
int kr_cache_clear(namedb_txn_t *txn);
/**
* Peek the cache for given RRSet (name, type)
* @note The 'drift' is the time passed between the cache time of the RRSet and now (in seconds).
* @param txn transaction instance
* @param rr query RRSet (its rdataset may be changed depending on the result)
* @param timestamp current time (will be replaced with drift if successful)
* @return KNOT_E*
* @return 0 or an errcode
*/
int kr_cache_peek(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp);
int kr_cache_peek_rr(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp);
/**
* Clone read-only RRSet and adjust TTLs.
......@@ -96,21 +145,6 @@ knot_rrset_t kr_cache_materialize(const knot_rrset_t *src, uint32_t drift, mm_ct
* @param txn transaction instance
* @param rr inserted RRSet
* @param timestamp current time
* @return KNOT_E*
*/
int kr_cache_insert(namedb_txn_t *txn, const knot_rrset_t *rr, uint32_t timestamp);
/**
* Remove RRSet from cache.
* @param txn transaction instance
* @param rr removed RRSet
* @return KNOT_E*
*/
int kr_cache_remove(namedb_txn_t *txn, const knot_rrset_t *rr);
/**
* Clear all items from the cache.
* @param txn transaction instance
* @return KNOT_E*
* @return 0 or an errcode
*/
int kr_cache_clear(namedb_txn_t *txn);
int kr_cache_insert_rr(namedb_txn_t *txn, const knot_rrset_t *rr, uint32_t timestamp);
......@@ -59,7 +59,7 @@ static int read_cache_rr(namedb_txn_t *txn, knot_rrset_t *cache_rr, uint32_t tim
rr_callback_t cb, struct kr_request *req)
{
/* Query cache for requested record */
if (kr_cache_peek(txn, cache_rr, &timestamp) != KNOT_EOK) {
if (kr_cache_peek_rr(txn, cache_rr, &timestamp) != KNOT_EOK) {
return KNOT_STATE_NOOP;
}
......@@ -161,7 +161,7 @@ static int write_cache_rr(const knot_pktsection_t *section, knot_rrset_t *rr, na
/* Check if already cached. */
knot_rrset_t query_rr;
knot_rrset_init(&query_rr, rr->owner, rr->type, rr->rclass);
if (kr_cache_peek(txn, &query_rr, &timestamp) == KNOT_EOK) {
if (kr_cache_peek_rr(txn, &query_rr, &timestamp) == KNOT_EOK) {
return KNOT_EOK;
}
......@@ -171,7 +171,7 @@ static int write_cache_rr(const knot_pktsection_t *section, knot_rrset_t *rr, na
rr->type = KNOT_RRTYPE_CNAME;
while((merge_in_section(rr, section, 0, pool)) == KNOT_EOK) {
/* Cache the merged RRSet */
ret = kr_cache_insert(txn, rr, timestamp);
ret = kr_cache_insert_rr(txn, rr, timestamp);
if (ret != KNOT_EOK) {
return ret;
}
......@@ -185,7 +185,7 @@ static int write_cache_rr(const knot_pktsection_t *section, knot_rrset_t *rr, na
rr->type = orig_rrtype;
ret = merge_in_section(rr, section, 0, pool);
if (ret == KNOT_EOK) {
kr_cache_insert(txn, rr, timestamp);
kr_cache_insert_rr(txn, rr, timestamp);
knot_rdataset_clear(&rr->rrs, pool);
}
......
......@@ -193,7 +193,7 @@ static void fetch_addr(struct kr_zonecut *cut, const knot_dname_t *ns, uint16_t
{
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)ns, rrtype, KNOT_CLASS_IN);
if (kr_cache_peek(txn, &cached_rr, &timestamp) != 0) {
if (kr_cache_peek_rr(txn, &cached_rr, &timestamp) != 0) {
return;
}
......@@ -211,7 +211,7 @@ static int fetch_ns(struct kr_zonecut *cut, const knot_dname_t *name, namedb_txn
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)name, KNOT_RRTYPE_NS, KNOT_CLASS_IN);
int ret = kr_cache_peek(txn, &cached_rr, &drift);
int ret = kr_cache_peek_rr(txn, &cached_rr, &drift);
if (ret != 0) {
return ret;
}
......
......@@ -35,22 +35,10 @@
* Properties.
*/
/** Return boolean true if a record in the RR set is expired. */
static int is_expired(struct kr_cache_rrset *rr, uint32_t drift)
/** Return boolean true if a record is expired. */
static bool is_expired(struct kr_cache_entry *entry, uint32_t drift)
{
/* Initialize set. */
knot_rdataset_t rrs;
rrs.rr_count = rr->count;
rrs.data = rr->data;
for (unsigned i = 0; i < rrs.rr_count; ++i) {
const knot_rdata_t *rd = knot_rdataset_at(&rrs, i);
if (knot_rdata_ttl(rd) <= drift) {
return 1;
}
}
return 0;
return entry->ttl >= drift;
}
/**
......@@ -83,8 +71,8 @@ static char* prune(void *env, struct kr_module *module, const char *args)
break;
}
/* Prune expired records. */
struct kr_cache_rrset *rr = val.data;
if (is_expired(rr, now - rr->timestamp)) {
struct kr_cache_entry *entry = val.data;
if (is_expired(entry, now - entry->timestamp)) {
storage->del(&txn, &key);
pruned += 1;
}
......
......@@ -32,18 +32,19 @@ const char *global_env;
/* Test invalid parameters. */
static void test_invalid(void **state)
{
assert_null(kr_cache_open(NULL, NULL, 0));
assert_null(kr_cache_open(global_env, NULL, 0));
assert_int_not_equal(kr_cache_txn_begin(NULL, &global_txn, 0), KNOT_EOK);
assert_int_not_equal(kr_cache_txn_begin(&global_env, NULL, 0), KNOT_EOK);
assert_int_not_equal(kr_cache_txn_commit(NULL), KNOT_EOK);
assert_int_not_equal(kr_cache_peek(NULL, NULL, NULL), KNOT_EOK);
assert_int_not_equal(kr_cache_peek(&global_txn, NULL, NULL), KNOT_EOK);
assert_int_not_equal(kr_cache_insert(&global_txn, NULL, 0), KNOT_EOK);
assert_int_not_equal(kr_cache_insert(NULL, NULL, 0), KNOT_EOK);
assert_int_not_equal(kr_cache_remove(&global_txn, NULL), KNOT_EOK);
assert_int_not_equal(kr_cache_remove(NULL, NULL), KNOT_EOK);
assert_int_not_equal(kr_cache_clear(NULL), KNOT_EOK);
assert_null((void *)kr_cache_open(NULL, NULL, 0));
assert_null((void *)kr_cache_open(global_env, NULL, 0));
assert_int_not_equal(kr_cache_txn_begin(NULL, &global_txn, 0), 0);
assert_int_not_equal(kr_cache_txn_begin(&global_env, NULL, 0), 0);
assert_int_not_equal(kr_cache_txn_commit(NULL), 0);
assert_int_not_equal(kr_cache_peek_rr(NULL, NULL, NULL), 0);
assert_int_not_equal(kr_cache_peek_rr(&global_txn, NULL, NULL), 0);
assert_int_not_equal(kr_cache_insert_rr(&global_txn, NULL, 0), 0);
assert_int_not_equal(kr_cache_insert_rr(NULL, NULL, 0), 0);
assert_int_not_equal(kr_cache_remove(&global_txn, 0, NULL, 0), 0);
assert_int_not_equal(kr_cache_remove(&global_txn, KR_CACHE_RR, NULL, 0), 0);
assert_int_not_equal(kr_cache_remove(NULL, 0, NULL, 0), 0);
assert_int_not_equal(kr_cache_clear(NULL), 0);
}
/* Test cache open */
......@@ -73,7 +74,7 @@ static namedb_txn_t *test_txn_write(void **state)
static namedb_txn_t *test_txn_rdonly(void **state)
{
assert_non_null(*state);
assert_int_equal(kr_cache_txn_begin(*state, &global_txn, NAMEDB_RDONLY), KNOT_EOK);
assert_int_equal(kr_cache_txn_begin(*state, &global_txn, NAMEDB_RDONLY), 0);
return &global_txn;
}
......@@ -83,7 +84,7 @@ static void test_insert(void **state)
test_random_rr(&global_rr, CACHE_TTL);
namedb_txn_t *txn = test_txn_write(state);
int ret = kr_cache_insert(txn, &global_rr, CACHE_TIME);
int ret = kr_cache_insert_rr(txn, &global_rr, CACHE_TIME);
if (ret == KNOT_EOK) {
ret = kr_cache_txn_commit(txn);
} else {
......@@ -104,7 +105,7 @@ static void test_query(void **state)
for (uint32_t timestamp = CACHE_TIME; timestamp < CACHE_TIME + CACHE_TTL; ++timestamp) {
uint32_t drift = timestamp;
int query_ret = kr_cache_peek(txn, &cache_rr, &drift);
int query_ret = kr_cache_peek_rr(txn, &cache_rr, &drift);
bool rr_equal = knot_rrset_equal(&global_rr, &cache_rr, KNOT_RRSET_COMPARE_WHOLE);
assert_int_equal(query_ret, KNOT_EOK);
assert_true(rr_equal);
......@@ -121,7 +122,7 @@ static void test_query_aged(void **state)
knot_rrset_init(&cache_rr, global_rr.owner, global_rr.type, global_rr.rclass);
namedb_txn_t *txn = test_txn_rdonly(state);
int ret = kr_cache_peek(txn, &cache_rr, &timestamp);
int ret = kr_cache_peek_rr(txn, &cache_rr, &timestamp);
assert_int_equal(ret, KNOT_ENOENT);
kr_cache_txn_abort(txn);
}
......@@ -134,9 +135,9 @@ static void test_remove(void **state)
knot_rrset_init(&cache_rr, global_rr.owner, global_rr.type, global_rr.rclass);
namedb_txn_t *txn = test_txn_write(state);
int ret = kr_cache_remove(txn, &cache_rr);
int ret = kr_cache_remove(txn, KR_CACHE_RR, cache_rr.owner, cache_rr.type);
assert_int_equal(ret, KNOT_EOK);
ret = kr_cache_peek(txn, &cache_rr, &timestamp);
ret = kr_cache_peek_rr(txn, &cache_rr, &timestamp);
assert_int_equal(ret, KNOT_ENOENT);
kr_cache_txn_commit(txn);
}
......@@ -151,7 +152,7 @@ static void test_fill(void **state)
for (unsigned i = 0; i < CACHE_SIZE; ++i) {
knot_rrset_t rr;
test_random_rr(&rr, CACHE_TTL);
ret = kr_cache_insert(txn, &rr, CACHE_TTL - 1);
ret = kr_cache_insert_rr(txn, &rr, CACHE_TTL - 1);
if (ret != KNOT_EOK) {
break;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment