Commit b00ee5fa authored by Vladimír Čunát's avatar Vladimír Čunát

TTL changes: moved in libknot from rdata to rrset

To work on RRSIG TTLs, libknot >= 2.7.1 is needed.
parent 1e83594e
......@@ -357,7 +357,7 @@ static void roothints_add(zs_scanner_t *zs)
}
if (zs->r_type == KNOT_RRTYPE_A || zs->r_type == KNOT_RRTYPE_AAAA) {
knot_rdata_t rdata[RDATA_ARR_MAX];
knot_rdata_init(rdata, zs->r_data_length, zs->r_data, zs->r_ttl);
knot_rdata_init(rdata, zs->r_data_length, zs->r_data);
kr_zonecut_add(hints, zs->r_owner, rdata);
}
}
......
......@@ -112,7 +112,6 @@ printf "\tchar _stub[];\n};\n"
knot_rdataset_merge
knot_rrset_add_rdata
knot_rrset_init_empty
knot_rrset_ttl
knot_rrset_txt_dump
knot_rrset_txt_dump_data
knot_rrset_size
......
......@@ -361,7 +361,7 @@ ffi.metatype( knot_rrset_t, {
end,
ttl = function(rr)
assert(ffi.istype(knot_rrset_t, rr))
return tonumber(knot.knot_rrset_ttl(rr))
return tonumber(rr.ttl)
end,
class = function(rr, val)
assert(ffi.istype(knot_rrset_t, rr))
......@@ -419,9 +419,10 @@ ffi.metatype( knot_rrset_t, {
return tonumber(rr.rrs.rr_count)
end,
-- Add binary RDATA to the RR set
add_rdata = function (rr, rdata, rdlen, ttl)
add_rdata = function (rr, rdata, rdlen, no_ttl)
assert(ffi.istype(knot_rrset_t, rr))
local ret = knot.knot_rrset_add_rdata(rr, rdata, tonumber(rdlen), tonumber(ttl or 0), nil)
assert(no_ttl == nil, 'add_rdata() can not accept TTL anymore')
local ret = knot.knot_rrset_add_rdata(rr, rdata, tonumber(rdlen), nil)
if ret ~= 0 then return nil, knot_error_t(ret) end
return true
end,
......@@ -834,8 +835,8 @@ local function rr2str(rr, style)
-- Construct a single-RR temporary set while minimizing copying.
local ret
do
local rrs = knot_rrset_t(rr.owner, rr.type, kres.class.IN)
rrs:add_rdata(rr.rdata, #rr.rdata, rr.ttl)
local rrs = knot_rrset_t(rr.owner, rr.type, kres.class.IN, rr.ttl)
rrs:add_rdata(rr.rdata, #rr.rdata)
ret = rrs:txt_dump(style)
end
......
......@@ -633,14 +633,14 @@ static int zi_record_store(zs_scanner_t *s)
zone_import_ctx_t *z_import = (zone_import_ctx_t *)s->process.data;
knot_rrset_t *new_rr = knot_rrset_new(s->r_owner, s->r_type, s->r_class,
&z_import->pool);
s->r_ttl, &z_import->pool);
if (!new_rr) {
kr_log_error("[zscanner] line %"PRIu64": error creating rrset\n",
s->line_counter);
return -1;
}
int res = knot_rrset_add_rdata(new_rr, s->r_data, s->r_data_length,
s->r_ttl, &z_import->pool);
&z_import->pool);
if (res != KNOT_EOK) {
kr_log_error("[zscanner] line %"PRIu64": error adding rdata to rrset\n",
s->line_counter);
......
......@@ -514,17 +514,8 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
if (ret) return kr_ok(); /* some aren't really errors */
assert(val_new_entry.data);
/* Compute TTL, just in case they weren't equal. */
uint32_t ttl = -1;
const knot_rdataset_t *rdatasets[] = { &rr->rrs, rds_sigs, NULL };
for (int j = 0; rdatasets[j]; ++j) {
knot_rdata_t *rd = rdatasets[j]->data;
assert(rdatasets[j]->rr_count);
for (uint16_t l = 0; l < rdatasets[j]->rr_count; ++l) {
ttl = MIN(ttl, knot_rdata_ttl(rd));
rd = kr_rdataset_next(rd);
}
} /* TODO: consider expirations of RRSIGs as well, just in case. */
const uint32_t ttl = rr->ttl;
/* FIXME: consider TTLs and expirations of RRSIGs as well, just in case. */
/* Write the entry itself. */
struct entry_h *eh = val_new_entry.data;
......
......@@ -134,6 +134,6 @@ int32_t kr_cache_ttl(const struct kr_cache_p *peek, const struct kr_query *qry,
/*TODO: reorder*/
KR_EXPORT
int kr_cache_materialize(knot_rdataset_t *dst, const struct kr_cache_p *ref,
uint32_t new_ttl, knot_mm_t *pool);
knot_mm_t *pool);
......@@ -38,8 +38,7 @@ static uint32_t packet_ttl(const knot_pkt_t *pkt, bool is_negative)
if (is_negative) {
/* Use SOA minimum TTL for negative answers. */
if (rr->type == KNOT_RRTYPE_SOA) {
return MIN(knot_rrset_ttl(rr),
knot_soa_minimum(&rr->rrs));
return MIN(rr->ttl, knot_soa_minimum(rr->rrs.rdata));
} else {
continue; /* Use SOA only for negative answers. */
}
......@@ -47,13 +46,7 @@ static uint32_t packet_ttl(const knot_pkt_t *pkt, bool is_negative)
if (knot_rrtype_is_metatype(rr->type)) {
continue; /* Skip metatypes. */
}
/* Find minimum TTL in the record set */
knot_rdata_t *rd = rr->rrs.data;
for (uint16_t j = 0; j < rr->rrs.rr_count; ++j) {
has_ttl = true;
ttl = MIN(ttl, knot_rdata_ttl(rd));
rd = kr_rdataset_next(rd);
}
ttl = MIN(ttl, rr->ttl);
}
}
/* If no valid TTL present, go with zero (will get clamped to minimum). */
......@@ -200,21 +193,15 @@ int answer_from_pkt(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
for (unsigned k = 0; k < sec->count; ++k) {
const knot_rrset_t *rr = knot_pkt_rr(sec, k);
knot_rdata_t *rd = rr->rrs.data;
for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
/* We need to be careful:
* due to enforcing minimum TTL on packet,
* some records may be below that value.
knot_rrset_t *rrs = // vv FIXME??
/*const-cast*/(knot_rrset_t *)knot_pkt_rr(sec, k);
/* We need to be careful: due to enforcing minimum TTL
* on packet, some records may be below that value.
* We keep those records at TTL 0. */
uint32_t ttl = knot_rdata_ttl(rd);
if (drift <= ttl) {
ttl -= drift;
if (rrs->ttl >= drift) {
rrs->ttl -= drift;
} else {
ttl = 0;
}
knot_rdata_set_ttl(rd, ttl);
rd = kr_rdataset_next(rd);
rrs->ttl = 0;
}
}
}
......
......@@ -48,8 +48,9 @@ int rdataset_dematerialize(const knot_rdataset_t *rds, uint8_t * restrict data)
* Return the number of bytes consumed or an error code.
*/
static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t * const data,
const uint8_t *data_bound, uint32_t ttl, knot_mm_t *pool)
const uint8_t *data_bound, knot_mm_t *pool)
{
/* FIXME: rdataset_t and cache's rdataset have the same binary format now */
assert(rds && data && data_bound && data_bound > data && !rds->data);
assert(pool); /* not required, but that's our current usage; guard leaks */
const uint8_t *d = data; /* iterates over the cache data */
......@@ -74,8 +75,8 @@ static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t *
d += sizeof(len) + len;
rdata_len_sum += len;
}
/* Each item in knot_rdataset_t needs TTL (4B) + rdlength (2B) + rdata */
rds->data = mm_alloc(pool, rdata_len_sum + ((size_t)rds->rr_count) * (4 + 2));
/* Each item in knot_rdataset_t needs rdlength (2B) + rdata */
rds->data = mm_alloc(pool, rdata_len_sum + (size_t)rds->rr_count * 2);
if (!rds->data) {
return kr_error(ENOMEM);
}
......@@ -86,20 +87,20 @@ static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t *
uint16_t len;
memcpy(&len, d, sizeof(len));
d += sizeof(len);
knot_rdata_init(d_out, len, d, ttl);
knot_rdata_init(d_out, len, d);
d += len;
//d_out = kr_rdataset_next(d_out);
d_out += 4 + 2 + len; /* TTL + rdlen + rdata */
d_out += 2 + len; /* rdlen + rdata */
}
//VERBOSE_MSG(NULL, "materialized from %d B\n", (int)(d - data));
return d - data;
}
int kr_cache_materialize(knot_rdataset_t *dst, const struct kr_cache_p *ref,
uint32_t new_ttl, knot_mm_t *pool)
knot_mm_t *pool)
{
struct entry_h *eh = ref->raw_data;
return rdataset_materialize(dst, eh->data, ref->raw_bound, new_ttl, pool);
return rdataset_materialize(dst, eh->data, ref->raw_bound, pool);
}
......@@ -118,12 +119,12 @@ int entry2answer(struct answer *ans, int id,
}
/* Materialize the base RRset. */
knot_rrset_t *rr = ans->rrsets[id].set.rr
= knot_rrset_new(owner, type, KNOT_CLASS_IN, ans->mm);
= knot_rrset_new(owner, type, KNOT_CLASS_IN, new_ttl, ans->mm);
if (!rr) {
assert(!ENOMEM);
return kr_error(ENOMEM);
}
int ret = rdataset_materialize(&rr->rrs, eh->data, eh_bound, new_ttl, ans->mm);
int ret = rdataset_materialize(&rr->rrs, eh->data, eh_bound, ans->mm);
if (ret < 0) goto fail;
size_t data_off = ret;
ans->rrsets[id].set.rank = eh->rank;
......@@ -132,7 +133,7 @@ int entry2answer(struct answer *ans, int id,
bool want_rrsigs = true; /* LATER(optim.): might be omitted in some cases. */
if (want_rrsigs) {
ret = rdataset_materialize(&ans->rrsets[id].sig_rds, eh->data + data_off,
eh_bound, new_ttl, ans->mm);
eh_bound, ans->mm);
if (ret < 0) goto fail;
/* Sanity check: we consumed exactly all data. */
int unused_bytes = eh_bound - (uint8_t *)eh->data - data_off - ret;
......
......@@ -91,6 +91,7 @@ int pkt_append(knot_pkt_t *pkt, const struct answer_rrset *rrset, uint8_t rank)
pkt->rr[pkt->rrset_count] = (knot_rrset_t){
.owner = knot_dname_copy(rrset->set.rr->owner, &pkt->mm),
/* ^^ well, another copy isn't really needed */
.ttl = rrset->set.rr->ttl,
.type = KNOT_RRTYPE_RRSIG,
.rclass = KNOT_CLASS_IN,
.rrs = *rdss[i],
......
......@@ -443,7 +443,7 @@ int nsec1_src_synth(struct key *k, struct answer *ans, const knot_dname_t *clenc
assert(nsec_rr);
const uint32_t new_ttl_log =
kr_verbose_status ? knot_rrset_ttl(nsec_rr) : -1;
kr_verbose_status ? nsec_rr->ttl : -1;
uint8_t *bm = NULL;
uint16_t bm_size;
knot_nsec_bitmap(&nsec_rr->rrs, &bm, &bm_size);
......
......@@ -83,11 +83,11 @@ static int insert_ta(map_t *trust_anchors, const knot_dname_t *name,
bool is_new_key = false;
knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, name);
if (!ta_rr) {
ta_rr = knot_rrset_new(name, KNOT_RRTYPE_DS, KNOT_CLASS_IN, NULL);
ta_rr = knot_rrset_new(name, KNOT_RRTYPE_DS, KNOT_CLASS_IN, ttl, NULL);
is_new_key = true;
}
/* Merge-in new key data */
if (!ta_rr || (rdlen > 0 && knot_rrset_add_rdata(ta_rr, rdata, rdlen, ttl, NULL) != 0)) {
if (!ta_rr || (rdlen > 0 && knot_rrset_add_rdata(ta_rr, rdata, rdlen, NULL) != 0)) {
knot_rrset_free(&ta_rr, NULL);
return kr_error(ENOMEM);
}
......
......@@ -156,7 +156,7 @@ static int invalidate_ns(struct kr_rplan *rplan, struct kr_query *qry)
size_t addr_len = kr_inaddr_len(&qry->ns.addr[0].ip);
/* @warning _NOT_ thread-safe */
static knot_rdata_t rdata_arr[RDATA_ARR_MAX];
knot_rdata_init(rdata_arr, addr_len, (const uint8_t *)addr, 0);
knot_rdata_init(rdata_arr, addr_len, (const uint8_t *)addr);
return kr_zonecut_del(&qry->zone_cut, qry->ns.name, rdata_arr);
} else {
return kr_zonecut_del_all(&qry->zone_cut, qry->ns.name);
......
......@@ -308,12 +308,12 @@ int kr_pkt_put(knot_pkt_t *pkt, const knot_dname_t *name, uint32_t ttl,
}
/* Create empty RR */
knot_rrset_t rr;
knot_rrset_init(&rr, knot_dname_copy(name, &pkt->mm), rtype, rclass);
knot_rrset_init(&rr, knot_dname_copy(name, &pkt->mm), rtype, rclass, ttl);
/* Create RDATA
* @warning _NOT_ thread safe.
*/
static knot_rdata_t rdata_arr[RDATA_ARR_MAX];
knot_rdata_init(rdata_arr, rdlen, rdata, ttl);
knot_rdata_init(rdata_arr, rdlen, rdata);
knot_rdataset_add(&rr.rrs, rdata_arr, &pkt->mm);
/* Append RR */
return knot_pkt_put(pkt, 0, &rr, KNOT_PF_FREE);
......
......@@ -298,8 +298,9 @@ static void fetch_addr(struct kr_zonecut *cut, struct kr_cache *cache,
}
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, /*const-cast*/(knot_dname_t *)ns, rrtype, KNOT_CLASS_IN);
if (kr_cache_materialize(&cached_rr.rrs, &peek, new_ttl, cut->pool) < 0) {
knot_rrset_init(&cached_rr, /*const-cast*/(knot_dname_t *)ns, rrtype,
KNOT_CLASS_IN, new_ttl);
if (kr_cache_materialize(&cached_rr.rrs, &peek, cut->pool) < 0) {
return;
}
knot_rdata_t *rd = cached_rr.rrs.data;
......@@ -331,7 +332,7 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
}
/* Materialize the rdataset temporarily, for simplicity. */
knot_rdataset_t ns_rds = { 0, NULL };
ret = kr_cache_materialize(&ns_rds, &peek, new_ttl, cut->pool);
ret = kr_cache_materialize(&ns_rds, &peek, cut->pool);
if (ret < 0) {
return ret;
}
......@@ -393,8 +394,9 @@ static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache,
*rr = NULL;
return kr_error(ENOMEM);
}
knot_rrset_init(*rr, /*const-cast*/(knot_dname_t *)owner, type, KNOT_CLASS_IN);
ret = kr_cache_materialize(&(*rr)->rrs, &peek, new_ttl, pool);
knot_rrset_init(*rr, /*const-cast*/(knot_dname_t *)owner, type,
KNOT_CLASS_IN, new_ttl);
ret = kr_cache_materialize(&(*rr)->rrs, &peek, pool);
if (ret < 0) {
knot_rrset_free(rr, pool);
return ret;
......
......@@ -87,14 +87,14 @@ static int satisfy_reverse(struct kr_zonecut *hints, knot_pkt_t *pkt, struct kr_
}
knot_dname_t *qname = knot_dname_copy(qry->sname, &pkt->mm);
knot_rrset_t rr;
knot_rrset_init(&rr, qname, KNOT_RRTYPE_PTR, KNOT_CLASS_IN);
knot_rrset_init(&rr, qname, KNOT_RRTYPE_PTR, KNOT_CLASS_IN, 0);
/* Append address records from hints */
uint8_t *addr = pack_last(*addr_set);
if (addr != NULL) {
size_t len = pack_obj_len(addr);
void *addr_val = pack_obj_val(addr);
knot_rrset_add_rdata(&rr, addr_val, len, 0, &pkt->mm);
knot_rrset_add_rdata(&rr, addr_val, len, &pkt->mm);
}
return put_answer(pkt, qry, &rr, use_nodata);
......@@ -109,7 +109,7 @@ static int satisfy_forward(struct kr_zonecut *hints, knot_pkt_t *pkt, struct kr_
}
knot_dname_t *qname = knot_dname_copy(qry->sname, &pkt->mm);
knot_rrset_t rr;
knot_rrset_init(&rr, qname, qry->stype, qry->sclass);
knot_rrset_init(&rr, qname, qry->stype, qry->sclass, 0);
size_t family_len = sizeof(struct in_addr);
if (rr.type == KNOT_RRTYPE_AAAA) {
family_len = sizeof(struct in6_addr);
......@@ -121,7 +121,7 @@ static int satisfy_forward(struct kr_zonecut *hints, knot_pkt_t *pkt, struct kr_
size_t len = pack_obj_len(addr);
void *addr_val = pack_obj_val(addr);
if (len == family_len) {
knot_rrset_add_rdata(&rr, addr_val, len, 0, &pkt->mm);
knot_rrset_add_rdata(&rr, addr_val, len, &pkt->mm);
}
addr = pack_obj_next(addr);
}
......@@ -190,7 +190,7 @@ static const knot_rdata_t * addr2rdata(const char *addr) {
static knot_rdata_t rdata_arr[RDATA_ARR_MAX];
size_t addr_len = kr_inaddr_len((struct sockaddr *)&ss);
const uint8_t *raw_addr = (const uint8_t *)kr_inaddr((struct sockaddr *)&ss);
knot_rdata_init(rdata_arr, addr_len, raw_addr, 0);
knot_rdata_init(rdata_arr, addr_len, raw_addr);
return rdata_arr;
}
......@@ -280,7 +280,7 @@ static int add_reverse_pair(struct kr_zonecut *hints, const char *name, const ch
/* Build RDATA */
knot_rdata_t rdata[RDATA_ARR_MAX];
knot_rdata_init(rdata, knot_dname_size(ptr_name), ptr_name, 0);
knot_rdata_init(rdata, knot_dname_size(ptr_name), ptr_name);
return kr_zonecut_add(hints, key, rdata);
}
......@@ -297,7 +297,7 @@ static int del_pair(struct hints_data *data, const char *name, const char *addr)
return kr_error(EINVAL);
}
knot_rdata_t ptr_rdata[RDATA_ARR_MAX];
knot_rdata_init(ptr_rdata, knot_dname_size(key), key, 0);
knot_rdata_init(ptr_rdata, knot_dname_size(key), key);
if (addr) {
/* Remove the pair. */
......
......@@ -37,7 +37,7 @@ local function test_rrset_functions()
same(rr_text:gsub('%s+', ' '), 'com. 1 TXT "hello"', 'rrset to text works')
same(kres.dname2str(todname('com.')), 'com.', 'domain name conversion works')
-- test creating rrset
rr = kres.rrset(todname('com.'), kres.type.A, kres.class.IN)
rr = kres.rrset(todname('com.'), kres.type.A, kres.class.IN, 66)
ok(ffi.istype(kres.rrset, rr), 'created an empty RR')
same(rr:owner(), '\3com\0', 'created RR has correct owner')
same(rr:class(), kres.class.IN, 'created RR has correct class')
......@@ -46,21 +46,21 @@ local function test_rrset_functions()
same(rr.type, kres.type.A, 'created RR has correct type')
-- test adding rdata
same(rr:wire_size(), 0, 'empty RR wire size is zero')
ok(rr:add_rdata('\1\2\3\4', 4, 66), 'adding RDATA works')
ok(rr:add_rdata('\1\2\3\4', 4), 'adding RDATA works')
same(rr:wire_size(), 5 + 4 + 4 + 2 + 4, 'RR wire size works after adding RDATA')
-- test conversion to text
local expect = 'com. 66 A 1.2.3.4\n'
same(rr:txt_dump(), expect, 'RR to text works')
-- create a dummy rrsig
local rrsig = kres.rrset(todname('com.'), kres.type.RRSIG, kres.class.IN)
rrsig:add_rdata('\0\1', 2, 0)
local rrsig = kres.rrset(todname('com.'), kres.type.RRSIG, kres.class.IN, 0)
rrsig:add_rdata('\0\1', 2)
same(rr:rdcount(), 1, 'add_rdata really added RDATA')
-- check rrsig matching
same(rr.type, rrsig:type_covered(), 'rrsig type covered matches covered RR type')
ok(rr:is_covered_by(rrsig), 'rrsig is covering a record')
-- test rrset merging
local copy = kres.rrset(rr:owner(), rr.type)
ok(copy:add_rdata('\4\3\2\1', 4, 66), 'adding second RDATA works')
local copy = kres.rrset(rr:owner(), rr.type, kres.class.IN, 66)
ok(copy:add_rdata('\4\3\2\1', 4), 'adding second RDATA works')
ok(rr:merge_rdata(copy), 'merge_rdata works')
same(rr:rdcount(), 2, 'RDATA count is correct after merge_rdata')
expect = 'com. 66 A 1.2.3.4\n' ..
......@@ -134,8 +134,8 @@ local function test_packet_functions()
local copy = kres.packet(512)
copy:question(todname('hello'), kres.class.IN, kres.type.A)
copy:begin(kres.section.ANSWER)
local rr = kres.rrset(pkt:qname(), kres.type.A)
rr:add_rdata('\4\3\2\1', 4, 66)
local rr = kres.rrset(pkt:qname(), kres.type.A, kres.class.IN, 66)
rr:add_rdata('\4\3\2\1', 4)
ok(copy:put_rr(rr), 'adding RR sets directly works')
ok(copy:recycle())
......
......@@ -44,8 +44,8 @@ local function test_context_cache()
same({s.hit, s.miss, s.insert, s.delete}, {0, 0, 0, 0}, 'context cache stats works')
-- insert a record into cache
local rdata = '\1\2\3\4'
local rr = kres.rrset('\3com\0', kres.type.A, kres.class.IN)
rr:add_rdata(rdata, #rdata, 66)
local rr = kres.rrset('\3com\0', kres.type.A, kres.class.IN, 66)
rr:add_rdata(rdata, #rdata)
ok(c:insert(rr, nil, 0, 0), 'cache insertion works')
ok(c:sync(), 'cache sync works')
same(s.insert, 1, 'cache insertion increments counters')
......
......@@ -112,10 +112,10 @@ static inline void test_random_rr(knot_rrset_t *rr, uint32_t ttl)
/* Create payload */
tmp_buf[0] = num;
test_randstr((char *)(tmp_buf + 1), tmp_buf[0] + 1);
knot_rdata_init(rdata_buf, num + 1, tmp_buf, ttl);
knot_rdata_init(rdata_buf, num + 1, tmp_buf);
/* Assign static buffers. */
knot_rrset_init(rr, owner_buf, KNOT_RRTYPE_TXT, KNOT_CLASS_IN);
knot_rrset_init(rr, owner_buf, KNOT_RRTYPE_TXT, KNOT_CLASS_IN, ttl);
rr->rrs.rr_count = 1;
rr->rrs.data = rdata_buf;
}
......
......@@ -242,7 +242,7 @@ static void test_materialize(void **state)
global_rr.owner = NULL;
knot_rrset_init(&output_rr, NULL, 0, 0);
kr_cache_materialize(&output_rr, &global_rr, 0, 0, &global_mm);
kr_cache_materialize(&output_rr, &global_rr, 0, &global_mm);
res_cmp_ok_empty = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_HEADER);
res_cmp_fail_empty = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_WHOLE);
knot_rrset_clear(&output_rr, &global_mm);
......@@ -252,14 +252,14 @@ static void test_materialize(void **state)
knot_rrset_init(&output_rr, NULL, 0, 0);
will_return (knot_rdataset_gather, 0);
kr_cache_materialize(&output_rr, &global_rr, 0, 0, &global_mm);
kr_cache_materialize(&output_rr, &global_rr, 0, &global_mm);
res_cmp_ok = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_WHOLE);
knot_rrset_clear(&output_rr, &global_mm);
assert_true(res_cmp_ok);
knot_rrset_init(&output_rr, NULL, 0, 0);
will_return (knot_rdataset_gather, KNOT_ENOMEM);
kr_cache_materialize(&output_rr, &global_rr, 0, 0, &global_mm);
kr_cache_materialize(&output_rr, &global_rr, 0, &global_mm);
res_cmp_fail = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_WHOLE);
knot_rrset_clear(&output_rr, &global_mm);
assert_false(res_cmp_fail);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment