Commit a370d6ac authored by Vladimír Čunát's avatar Vladimír Čunát

knot_rdataset_t field renames

parent aac0d54c
......@@ -27,8 +27,8 @@ typedef struct {
uint8_t data[];
} knot_rdata_t;
typedef struct {
uint16_t rr_count;
knot_rdata_t *data;
uint16_t count;
knot_rdata_t *rdata;
} knot_rdataset_t;
typedef struct {
knot_dname_t *_owner;
......
......@@ -330,7 +330,7 @@ end
-- RR sets created in Lua must have a destructor to release allocated memory
local function rrset_free(rr)
if rr._owner ~= nil then ffi.C.free(rr._owner) end
if rr:rdcount() > 0 then ffi.C.free(rr.rrs.data) end
if rr:rdcount() > 0 then ffi.C.free(rr.rrs.rdata) end
end
-- Metatype for RR set. Beware, the indexing is 0-based (rdata, get, tostring).
......@@ -417,7 +417,7 @@ ffi.metatype( knot_rrset_t, {
-- Return RDATA count for this RR set
rdcount = function(rr)
assert(ffi.istype(knot_rrset_t, rr))
return tonumber(rr.rrs.rr_count)
return tonumber(rr.rrs.count)
end,
-- Add binary RDATA to the RR set
add_rdata = function (rr, rdata, rdlen, no_ttl)
......
......@@ -290,7 +290,7 @@ static int zi_put_glue(zone_import_ctx_t *z_import, knot_pkt_t *pkt,
knot_rrset_t *rr)
{
int err = 0;
for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
for (uint16_t i = 0; i < rr->rrs.count; ++i) {
const knot_dname_t *ns_name = knot_ns_name(&rr->rrs, i);
err = zi_rrset_find_put(z_import, pkt, ns_name,
rr->rclass, KNOT_RRTYPE_A, 0);
......
......@@ -447,7 +447,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
switch (rr->type) {
case KNOT_RRTYPE_NSEC3:
/* Skip "suspicious" or opt-out NSEC3 sets. */
if (rr->rrs.rr_count != 1) return kr_ok();
if (rr->rrs.count != 1) return kr_ok();
if (KNOT_NSEC3_FLAG_OPT_OUT & knot_nsec3_flags(&rr->rrs, 0)) {
if (has_optout) *has_optout = true;
return kr_ok();
......@@ -458,7 +458,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
/* Skip any NSEC*s that aren't validated. */
return kr_ok();
}
if (!rr_sigs || !rr_sigs->rrs.rr_count || !rr_sigs->rrs.data) {
if (!rr_sigs || !rr_sigs->rrs.count || !rr_sigs->rrs.rdata) {
assert(!EINVAL);
return kr_error(EINVAL);
}
......@@ -475,7 +475,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
}
assert(rr->type == KNOT_RRTYPE_NSEC3);
const knot_rdata_t * const rdata = rr->rrs.data;
const knot_rdata_t * const rdata = rr->rrs.rdata;
if (rdata->len <= 4) return kr_error(EILSEQ); /*< data from outside; less trust */
const int np_dlen = nsec_p_rdlen(rdata->data);
if (np_dlen > rdata->len) return kr_error(EILSEQ);
......@@ -557,7 +557,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
VERBOSE_MSG(qry, "=> stashed %s%s %s, rank 0%.2o, "
"%d B total, incl. %d RRSIGs\n",
(wild_labels ? "*." : ""), encl_str, type_str, rank,
(int)val_new_entry.len, (rr_sigs ? rr_sigs->rrs.rr_count : 0)
(int)val_new_entry.len, (rr_sigs ? rr_sigs->rrs.count : 0)
);
} }
......
......@@ -32,12 +32,12 @@ int rdataset_dematerialize(const knot_rdataset_t *rds, uint8_t * restrict data)
assert(data);
return kr_error(EINVAL);
}
const uint16_t rr_count = rds ? rds->rr_count : 0;
const uint16_t rr_count = rds ? rds->count : 0;
memcpy(data, &rr_count, sizeof(rr_count));
data += sizeof(rr_count);
if (rr_count) {
size_t size = knot_rdataset_size(rds);
memcpy(data, rds->data, size);
memcpy(data, rds->rdata, size);
data += size;
}
//VERBOSE_MSG(NULL, "dematerialized to %d B\n", (int)(data - data0));
......@@ -51,7 +51,7 @@ int rdataset_dematerialize(const knot_rdataset_t *rds, uint8_t * restrict data)
static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t * const data,
const uint8_t *data_bound, knot_mm_t *pool)
{
assert(rds && data && data_bound && data_bound > data && !rds->data
assert(rds && data && data_bound && data_bound > data && !rds->rdata
/*&& !((size_t)data & 1)*/);
assert(pool); /* not required, but that's our current usage; guard leaks */
const uint8_t *d = data; /* iterates over the cache data */
......@@ -59,15 +59,15 @@ static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t *
uint16_t rr_count;
memcpy(&rr_count, d, sizeof(rr_count));
d += sizeof(rr_count);
rds->rr_count = rr_count;
rds->count = rr_count;
if (!rr_count) { /* avoid mm_alloc(pool, 0); etc. */
return d - data;
}
}
/* First sum up the sizes for wire format length. */
const knot_rdataset_t rds_tmp = {
.rr_count = rds->rr_count,
.data = (knot_rdata_t *)d,
.count = rds->count,
.rdata = (knot_rdata_t *)d,
};
size_t rds_size = knot_rdataset_size(&rds_tmp); /* TODO: we might overrun here already,
but we need to trust cache anyway...*/
......@@ -75,11 +75,11 @@ static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t *
VERBOSE_MSG(NULL, "materialize: EILSEQ!\n");
return kr_error(EILSEQ);
}
rds->data = mm_alloc(pool, rds_size);
if (!rds->data) {
rds->rdata = mm_alloc(pool, rds_size);
if (!rds->rdata) {
return kr_error(ENOMEM);
}
memcpy(rds->data, d, rds_size);
memcpy(rds->rdata, d, rds_size);
d += rds_size;
//VERBOSE_MSG(NULL, "materialized from %d B\n", (int)(d - data));
return d - data;
......@@ -98,7 +98,7 @@ int entry2answer(struct answer *ans, int id,
const knot_dname_t *owner, uint16_t type, uint32_t new_ttl)
{
/* We assume it's zeroed. Do basic sanity check. */
if (ans->rrsets[id].set.rr || ans->rrsets[id].sig_rds.data
if (ans->rrsets[id].set.rr || ans->rrsets[id].sig_rds.rdata
|| (type == KNOT_RRTYPE_NSEC && ans->nsec_p.raw)
|| (type == KNOT_RRTYPE_NSEC3 && !ans->nsec_p.raw)
)
......
......@@ -296,9 +296,9 @@ static inline int rdataset_dematerialize_size(const knot_rdataset_t *rds)
static inline int rdataset_dematerialized_size(const uint8_t *data)
{
knot_rdataset_t rds;
memcpy(&rds.rr_count, data, sizeof(rds.rr_count));
rds.data = (knot_rdata_t *)(data + sizeof(rds.rr_count));
return sizeof(rds.rr_count) + knot_rdataset_size(&rds);
memcpy(&rds.count, data, sizeof(rds.count));
rds.rdata = (knot_rdata_t *)(data + sizeof(rds.count));
return sizeof(rds.count) + knot_rdataset_size(&rds);
}
/** Serialize an rdataset. */
......
......@@ -71,13 +71,13 @@ static int pkt_alloc_space(knot_pkt_t *pkt, int count)
int pkt_append(knot_pkt_t *pkt, const struct answer_rrset *rrset, uint8_t rank)
{
/* allocate space, to be sure */
int rrset_cnt = (rrset->set.rr->rrs.rr_count > 0) + (rrset->sig_rds.rr_count > 0);
int rrset_cnt = (rrset->set.rr->rrs.count > 0) + (rrset->sig_rds.count > 0);
int ret = pkt_alloc_space(pkt, rrset_cnt);
if (ret) return kr_error(ret);
/* write both sets */
const knot_rdataset_t *rdss[2] = { &rrset->set.rr->rrs, &rrset->sig_rds };
for (int i = 0; i < rrset_cnt; ++i) {
assert(rdss[i]->rr_count);
assert(rdss[i]->count);
/* allocate rank */
uint8_t *rr_rank = mm_alloc(&pkt->mm, sizeof(*rr_rank));
if (!rr_rank) return kr_error(ENOMEM);
......
......@@ -141,7 +141,7 @@ int kr_rrset_validate(kr_rrset_validation_ctx_t *vctx, const knot_rrset_t *cover
return kr_error(EINVAL);
}
for (unsigned i = 0; i < vctx->keys->rrs.rr_count; ++i) {
for (unsigned i = 0; i < vctx->keys->rrs.count; ++i) {
int ret = kr_rrset_validate_with_key(vctx, covered, i, NULL);
if (ret == 0) {
return ret;
......@@ -181,7 +181,7 @@ static int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx,
if (key == NULL) {
const knot_rdata_t *krr = knot_rdataset_at(&keys->rrs, key_pos);
int ret = kr_dnssec_key_from_rdata(&created_key, keys->owner,
krr->data, krr->len);
krr->rdata, krr->len);
if (ret != 0) {
vctx->result = ret;
return vctx->result;
......@@ -204,7 +204,7 @@ static int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx,
if ((covered->rclass != rrsig->rclass) || !knot_dname_is_equal(covered->owner, rrsig->owner)) {
continue;
}
for (uint16_t j = 0; j < rrsig->rrs.rr_count; ++j) {
for (uint16_t j = 0; j < rrsig->rrs.count; ++j) {
int val_flgs = 0;
int trim_labels = 0;
if (knot_rrsig_type_covered(&rrsig->rrs, j) != covered->type) {
......@@ -259,7 +259,7 @@ static int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx,
static bool kr_ds_algo_support(const knot_rrset_t *ta)
{
for (uint16_t i = 0; i < ta->rrs.rr_count; ++i) {
for (uint16_t i = 0; i < ta->rrs.count; ++i) {
if (dnssec_algorithm_digest_support(knot_ds_digest_type(&ta->rrs, i))
&& dnssec_algorithm_key_support(knot_ds_alg(&ta->rrs, i))) {
return true;
......@@ -273,7 +273,7 @@ int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rrset_t *ta)
const knot_pkt_t *pkt = vctx->pkt;
const knot_rrset_t *keys = vctx->keys;
const bool ok = pkt && keys && ta && ta->rrs.rr_count && ta->rrs.data
const bool ok = pkt && keys && ta && ta->rrs.count && ta->rrs.rdata
&& ta->type == KNOT_RRTYPE_DS;
if (!ok) {
assert(false);
......@@ -290,16 +290,16 @@ int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rrset_t *ta)
* The supplied DS record has been authenticated.
* It has been validated or is part of a configured trust anchor.
*/
for (uint16_t i = 0; i < keys->rrs.rr_count; ++i) {
for (uint16_t i = 0; i < keys->rrs.count; ++i) {
/* RFC4035 5.3.1, bullet 8 */ /* ZSK */
/* LATER(optim.): more efficient way to iterate than _at() */
const knot_rdata_t *krr = knot_rdataset_at(&keys->rrs, i);
if (!kr_dnssec_key_zsk(krr->data) || kr_dnssec_key_revoked(krr->data)) {
if (!kr_dnssec_key_zsk(krr->rdata) || kr_dnssec_key_revoked(krr->rdata)) {
continue;
}
struct dseckey *key = NULL;
if (kr_dnssec_key_from_rdata(&key, keys->owner, krr->data, krr->len) != 0) {
if (kr_dnssec_key_from_rdata(&key, keys->owner, krr->rdata, krr->len) != 0) {
continue;
}
if (kr_authenticate_referral(ta, (dnssec_key_t *) key) != 0) {
......
......@@ -240,7 +240,7 @@ static int coverign_rrsig_labels(const knot_rrset_t *nsec, const knot_pktsection
continue;
}
for (uint16_t j = 0; j < rrset->rrs.rr_count; ++j) {
for (uint16_t j = 0; j < rrset->rrs.count; ++j) {
if (knot_rrsig_type_covered(&rrset->rrs, j) != KNOT_RRTYPE_NSEC) {
continue;
}
......
......@@ -64,11 +64,11 @@ int kr_authenticate_referral(const knot_rrset_t *ref, const dnssec_key_t *key)
/* Try all possible DS records */
int ret = 0;
knot_rdata_t *rd = ref->rrs.data;
for (uint16_t i = 0; i < ref->rrs.rr_count; ++i) {
knot_rdata_t *rd = ref->rrs.rdata;
for (uint16_t i = 0; i < ref->rrs.count; ++i) {
dnssec_binary_t ds_rdata = {
.size = rd->len,
.data = rd->data
.data = rd->rdata
};
ret = authenticate_ds(key, &ds_rdata, knot_ds_digest_type(&ref->rrs, i));
if (ret == 0) { /* Found a good DS */
......@@ -206,7 +206,7 @@ static int sign_ctx_add_records(dnssec_sign_ctx_t *ctx, const knot_rrset_t *cove
* for each RR in covered.
*/
uint8_t *beginp = wire_buffer;
for (uint16_t i = 0; i < covered->rrs.rr_count; ++i) {
for (uint16_t i = 0; i < covered->rrs.count; ++i) {
/* RR(i) = name | type | class | OrigTTL | RDATA length | RDATA */
for (int j = 0; j < trim_labels; ++j) {
assert(beginp[0]);
......
......@@ -157,7 +157,7 @@ static bool is_valid_addr(const uint8_t *addr, size_t len)
static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query, int *glue_cnt)
{
if (rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA) {
const knot_rdata_t *rdata = rr->rrs.data;
const knot_rdata_t *rdata = rr->rrs.rdata;
char name_str[KR_DNAME_STR_MAXLEN];
char addr_str[INET6_ADDRSTRLEN];
WITH_VERBOSE(query) {
......@@ -286,7 +286,7 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr,
}
/* Fetch glue for each NS */
for (unsigned i = 0; i < rr->rrs.rr_count; ++i) {
for (unsigned i = 0; i < rr->rrs.count; ++i) {
const knot_dname_t *ns_name = knot_ns_name(&rr->rrs, i);
/* Glue is mandatory for NS below zone */
if (knot_dname_in(rr->owner, ns_name) && !has_glue(pkt, ns_name)) {
......
......@@ -828,7 +828,7 @@ static void check_wildcard(kr_layer_t *ctx)
int owner_labels = knot_dname_labels(rrsigs->owner, NULL);
for (int k = 0; k < rrsigs->rrs.rr_count; ++k) {
for (int k = 0; k < rrsigs->rrs.count; ++k) {
if (knot_rrsig_labels(&rrsigs->rrs, k) != owner_labels) {
qry->flags.DNSSEC_WEXPAND = true;
}
......
......@@ -411,7 +411,7 @@ static int edns_erase_and_reserve(knot_pkt_t *pkt)
}
size_t len = knot_rrset_size(pkt->opt_rr);
int16_t rr_removed = pkt->opt_rr->rrs.rr_count;
int16_t rr_removed = pkt->opt_rr->rrs.count;
/* Decrease rrset counters. */
pkt->rrset_count -= 1;
pkt->sections[pkt->current].count -= 1;
......
......@@ -365,7 +365,7 @@ char *kr_module_call(struct kr_context *ctx, const char *module, const char *pro
/** Return the (covered) type of an nonempty RRset. */
static inline uint16_t kr_rrset_type_maysig(const knot_rrset_t *rr)
{
assert(rr && rr->rrs.rr_count && rr->rrs.data);
assert(rr && rr->rrs.count && rr->rrs.rdata);
uint16_t type = rr->type;
if (type == KNOT_RRTYPE_RRSIG)
type = knot_rrsig_type_covered(&rr->rrs, 0);
......
......@@ -299,8 +299,8 @@ static void fetch_addr(struct kr_zonecut *cut, struct kr_cache *cache,
if (kr_cache_materialize(&cached_rr.rrs, &peek, cut->pool) < 0) {
return;
}
knot_rdata_t *rd = cached_rr.rrs.data;
for (uint16_t i = 0; i < cached_rr.rrs.rr_count; ++i) {
knot_rdata_t *rd = cached_rr.rrs.rdata;
for (uint16_t i = 0; i < cached_rr.rrs.count; ++i) {
(void) kr_zonecut_add(cut, ns, rd);
rd = knot_rdataset_next(rd);
}
......@@ -335,7 +335,7 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
/* Insert name servers for this zone cut, addresses will be looked up
* on-demand (either from cache or iteratively) */
for (unsigned i = 0; i < ns_rds.rr_count; ++i) {
for (unsigned i = 0; i < ns_rds.count; ++i) {
const knot_dname_t *ns_name = knot_ns_name(&ns_rds, i);
(void) kr_zonecut_add(cut, ns_name, NULL);
/* Fetch NS reputation and decide whether to prefetch A/AAAA records. */
......
......@@ -23,7 +23,7 @@ local function check_time_callback(pkt, req)
for i = 1, #section do
local rr = section[i]
if rr.type == kres.type.RRSIG then
for k = 0, rr.rrs.rr_count - 1 do
for k = 0, rr.rrs.count - 1 do
seen_rrsigs = seen_rrsigs + 1
inception = ffi.C.kr_rrsig_sig_inception(rr.rrs, k)
expiration = ffi.C.kr_rrsig_sig_expiration(rr.rrs, k)
......
......@@ -51,7 +51,7 @@ local function address_callback(pkt, req)
for i = 1, #section do
local rr = section[i]
if rr.type == kres.type.A or rr.type == kres.type.AAAA then
for k = 0, rr.rrs.rr_count-1 do
for k = 0, rr.rrs.count-1 do
table.insert(internal.nsset[rr:owner()], rr:rdata(k))
end
end
......@@ -89,8 +89,8 @@ local function priming_callback(pkt, req)
local rr = section[i]
if rr.type == kres.type.NS then
internal.min_ttl = math.min(internal.min_ttl, rr:ttl())
internal.to_resolve = internal.to_resolve + 2 * rr.rrs.rr_count
for k = 0, rr.rrs.rr_count-1 do
internal.to_resolve = internal.to_resolve + 2 * rr.rrs.count
for k = 0, rr.rrs.count-1 do
local nsname_text = rr:tostring(k)
local nsname_wire = rr:rdata(k) -- FIXME: something is wrong
internal.nsset[nsname_wire] = {}
......
......@@ -86,7 +86,7 @@ for _, qtype in ipairs(qtypes) do
local section = pkt:rrsets(kres.section.ANSWER)
for i = 1, #section do
local rr = section[i]
for k = 1, rr.rrs.rr_count do
for k = 1, rr.rrs.count do
local rdata = rr:tostring(k - 1)
local owner = kres.dname2str(rr:owner())
if qverbose then
......
......@@ -117,7 +117,7 @@ static inline void test_random_rr(knot_rrset_t *rr, uint32_t ttl)
/* Assign static buffers. */
knot_rrset_init(rr, owner_buf, KNOT_RRTYPE_TXT, KNOT_CLASS_IN, ttl);
rr->rrs.rr_count = 1;
rr->rrs.data = rdata;
rr->rrs.count = 1;
rr->rrs.rdata = rdata;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment