Commit 435c50b6 authored by Grigorii Demidov's avatar Grigorii Demidov Committed by Ondřej Surý

layer/validate: refactoring

parent 7daf9ecc
......@@ -62,6 +62,20 @@ typedef struct {
size_t len;
size_t cap;
} rr_array_t;
struct ranked_rr_array_entry {
uint8_t rank;
uint32_t qry_uid;
_Bool cached;
_Bool yielded;
_Bool to_wire;
knot_rrset_t *rr;
};
typedef struct ranked_rr_array_entry ranked_rr_array_entry_t;
typedef struct {
ranked_rr_array_entry_t **at;
size_t len;
size_t cap;
} ranked_rr_array_t;
struct kr_zonecut {
knot_dname_t *name;
knot_rrset_t *key;
......@@ -80,6 +94,7 @@ struct kr_rplan {
kr_qarray_t resolved;
struct kr_request *request;
knot_mm_t *pool;
uint32_t next_uid;
};
struct kr_request {
struct kr_context *ctx;
......@@ -98,11 +113,15 @@ struct kr_request {
} upstream;
uint32_t options;
int state;
rr_array_t authority;
ranked_rr_array_t answ_selected;
ranked_rr_array_t auth_selected;
rr_array_t additional;
_Bool answ_validated;
_Bool auth_validated;
struct kr_rplan rplan;
int has_tls;
knot_mm_t pool;
uint32_t next_uid;
};
struct knot_rrset {
knot_dname_t *_owner;
......
......@@ -34,9 +34,9 @@ typedef void (*map_free_f)(void *baton, void *ptr);
map_t
# libkres
rr_array_t
#struct ranked_rr_array_entry # TODO: prepared for vld-refactoring
#ranked_rr_array_entry_t
#ranked_rr_array_t
struct ranked_rr_array_entry
ranked_rr_array_entry_t
ranked_rr_array_t
struct kr_zonecut
kr_qarray_t
struct kr_rplan
......
......@@ -83,7 +83,7 @@ static int validate_rrsig_rr(int *flags, int cov_labels,
/* bullet 2 */
const knot_dname_t *signer_name = knot_rrsig_signer_name(&rrsigs->rrs, sig_pos);
if (!signer_name || !knot_dname_is_equal(signer_name, zone_name)) {
return kr_error(EINVAL);
return kr_error(EAGAIN);
}
/* bullet 4 */
{
......@@ -174,10 +174,9 @@ int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx,
--covered_labels;
}
const knot_pktsection_t *sec = knot_pkt_section(pkt, section_id);
for (unsigned i = 0; i < sec->count; ++i) {
for (uint16_t i = 0; i < vctx->rrs->len; ++i) {
/* Consider every RRSIG that matches owner and covers the class/type. */
const knot_rrset_t *rrsig = knot_pkt_rr(sec, i);
const knot_rrset_t *rrsig = vctx->rrs->at[i]->rr;
if (rrsig->type != KNOT_RRTYPE_RRSIG) {
continue;
}
......@@ -190,9 +189,14 @@ int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx,
if (knot_rrsig_type_covered(&rrsig->rrs, j) != covered->type) {
continue;
}
if (validate_rrsig_rr(&val_flgs, covered_labels, rrsig, j,
int ret = validate_rrsig_rr(&val_flgs, covered_labels, rrsig, j,
keys, key_pos, keytag,
zone_name, timestamp) != 0) {
zone_name, timestamp);
if (ret == kr_error(EAGAIN)) {
kr_dnssec_key_free(&created_key);
vctx->result = ret;
return ret;
} else if (ret != 0) {
continue;
}
if (val_flgs & FLG_WILDCARD_EXPANSION) {
......@@ -202,6 +206,7 @@ int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx,
}
}
if (kr_check_signature(rrsig, j, (dnssec_key_t *) key, covered, trim_labels) != 0) {
kr_dnssec_key_free(&created_key);
continue;
}
if (val_flgs & FLG_WILDCARD_EXPANSION) {
......
......@@ -17,6 +17,7 @@
#pragma once
#include "lib/defines.h"
#include "lib/utils.h"
#include <libknot/packet/pkt.h>
/**
......@@ -46,12 +47,14 @@ struct dseckey;
/** DNSSEC validation context. */
struct kr_rrset_validation_ctx {
const knot_pkt_t *pkt; /*!< Packet to be validated. */
ranked_rr_array_t *rrs; /*!< List of preselected RRs to be validated. */
knot_section_t section_id; /*!< Section to work with. */
const knot_rrset_t *keys; /*!< DNSKEY RRSet. */
const knot_dname_t *zone_name; /*!< Name of the zone containing the RRSIG RRSet. */
uint32_t timestamp; /*!< Validation time. */
bool has_nsec3; /*!< Whether to use NSEC3 validation. */
uint32_t flags; /*!< Output - Flags. */
uint32_t err_cnt; /*!< Output - Number of validation failures. */
int result; /*!< Output - 0 or error code. */
};
......
......@@ -697,7 +697,7 @@ int kr_nsec3_no_data(const knot_pkt_t *pkt, knot_section_t section_id,
* Denial of existance can not be proven.
* Set error code to proceed unsecure.
*/
ret = kr_error(DNSSEC_NOT_FOUND);
ret = kr_error(DNSSEC_OUT_OF_RANGE);
}
return ret;
......@@ -721,6 +721,7 @@ int kr_nsec3_ref_to_unsigned(const knot_pkt_t *pkt)
if (ns->type != KNOT_RRTYPE_NS) {
continue;
}
bool nsec3_found = false;
flags = 0;
for (unsigned j = 0; j < sec->count; ++j) {
const knot_rrset_t *nsec3 = knot_pkt_rr(sec, j);
......@@ -730,6 +731,7 @@ int kr_nsec3_ref_to_unsigned(const knot_pkt_t *pkt)
if (nsec3->type != KNOT_RRTYPE_NSEC3) {
continue;
}
nsec3_found = true;
/* nsec3 found, check if owner name matches
* the delegation name
*/
......@@ -757,6 +759,9 @@ int kr_nsec3_ref_to_unsigned(const knot_pkt_t *pkt)
return kr_ok();
}
}
if (!nsec3_found) {
return kr_error(DNSSEC_NOT_FOUND);
}
if (flags & FLG_NAME_MATCHED) {
/* nsec3 which owner matches
* the delegation name was found,
......@@ -777,7 +782,7 @@ int kr_nsec3_ref_to_unsigned(const knot_pkt_t *pkt)
}
if (has_optout(covering_next_nsec3)) {
return kr_error(DNSSEC_NOT_FOUND);
return kr_error(DNSSEC_OUT_OF_RANGE);
} else {
return kr_error(EINVAL);
}
......
......@@ -48,8 +48,10 @@ int kr_nsec3_wildcard_answer_response_check(const knot_pkt_t *pkt, knot_section_
* @param sname Queried domain name.
* @param stype Queried type.
* @return 0 or error code:
* DNSSEC_NOT_FOUND - denial of existence can't be proven
* due to opt-out, otherwise - bogus.
* DNSSEC_NOT_FOUND - neither ds nor nsec records
* were not found.
* DNSSEC_OUT_OF_RANGE - denial of existence can't be proven
* due to opt-out, otherwise - bogus.
*/
int kr_nsec3_no_data(const knot_pkt_t *pkt, knot_section_t section_id,
const knot_dname_t *sname, uint16_t stype);
......@@ -59,9 +61,9 @@ int kr_nsec3_no_data(const knot_pkt_t *pkt, knot_section_t section_id,
* @note No RRSIGs are validated.
* @param pkt Packet structure to be processed.
* @return 0 or error code:
* DNSSEC_NOT_FOUND - denial of existence can't be proven
* due to opt-out.
* EEXIST - ds record was found.
* EINVAL - bogus.
* DNSSEC_OUT_OF_RANGE - denial of existence can't be proven
* due to opt-out.
* EEXIST - ds record was found.
* EINVAL - bogus.
*/
int kr_nsec3_ref_to_unsigned(const knot_pkt_t *pkt);
This diff is collapsed.
......@@ -254,6 +254,10 @@ static void stash_glue(map_t *stash, knot_pkt_t *pkt, const knot_dname_t *ns_nam
/* @internal DS is special and is present only parent-side */
static void stash_ds(struct kr_request *req, knot_pkt_t *pkt, map_t *stash, knot_mm_t *pool)
{
ranked_rr_array_t *arr= &req->auth_selected;
if (!arr->len) {
return;
}
struct kr_query *qry = req->current_query;
uint8_t rank = KR_RANK_AUTH;
if (knot_wire_get_cd(req->answer->wire)) {
......@@ -261,17 +265,29 @@ static void stash_ds(struct kr_request *req, knot_pkt_t *pkt, map_t *stash, knot
* save it to the BAD cache */
rank = KR_RANK_BAD;
}
const knot_pktsection_t *authority = knot_pkt_section(pkt, KNOT_AUTHORITY);
for (unsigned i = 0; i < authority->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(authority, i);
/* uncached entries are located at the end */
for (ssize_t i = arr->len - 1; i >= 0; --i) {
ranked_rr_array_entry_t *entry = arr->at[i];
const knot_rrset_t *rr = entry->rr;
if (entry->qry_uid != qry->uid) {
continue;
}
if (entry->cached) {
continue;
}
if (rr->type == KNOT_RRTYPE_DS || rr->type == KNOT_RRTYPE_RRSIG) {
kr_rrmap_add(stash, rr, rank, pool);
entry->cached = true;
}
}
}
static int stash_authority(struct kr_request *req, knot_pkt_t *pkt, map_t *stash, knot_mm_t *pool)
{
ranked_rr_array_t *arr= &req->auth_selected;
if (!arr->len) {
return kr_ok();
}
struct kr_query *qry = req->current_query;
uint8_t rank = KR_RANK_NONAUTH;
if (knot_wire_get_cd(req->answer->wire)) {
......@@ -279,11 +295,14 @@ static int stash_authority(struct kr_request *req, knot_pkt_t *pkt, map_t *stash
* save authority to the BAD cache */
rank = KR_RANK_BAD;
}
const knot_pktsection_t *authority = knot_pkt_section(pkt, KNOT_AUTHORITY);
for (unsigned i = 0; i < authority->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(authority, i);
/* Cache in-bailiwick data only */
if (!knot_dname_in(qry->zone_cut.name, rr->owner)) {
/* uncached entries are located at the end */
for (ssize_t i = arr->len - 1; i >= 0; --i) {
ranked_rr_array_entry_t *entry = arr->at[i];
const knot_rrset_t *rr = entry->rr;
if (entry->qry_uid != qry->uid) {
continue;
}
if (entry->cached) {
continue;
}
/* Look up glue records for NS */
......@@ -293,19 +312,18 @@ static int stash_authority(struct kr_request *req, knot_pkt_t *pkt, map_t *stash
stash_glue(stash, pkt, ns_name, pool);
}
}
/* Stash record */
kr_rrmap_add(stash, rr, rank, pool);
entry->cached = true;
}
return kr_ok();
}
static int stash_answer(struct kr_request *req, knot_pkt_t *pkt, map_t *stash, knot_mm_t *pool)
{
ranked_rr_array_t *arr= &req->answ_selected;
struct kr_query *qry = req->current_query;
/* Work with QNAME, as minimised name data is cacheable. */
const knot_dname_t *cname_begin = knot_pkt_qname(pkt);
if (!cname_begin) {
cname_begin = qry->sname;
if (!arr->len) {
return kr_ok();
}
uint8_t rank = KR_RANK_AUTH;
if (knot_wire_get_cd(req->answer->wire)) {
......@@ -313,44 +331,19 @@ static int stash_answer(struct kr_request *req, knot_pkt_t *pkt, map_t *stash, k
* Save answer to the BAD cache. */
rank = KR_RANK_BAD;
}
/* Stash direct answers (equal to current QNAME/CNAME),
* accept out-of-order RRSIGS. */
const knot_pktsection_t *answer = knot_pkt_section(pkt, KNOT_ANSWER);
const knot_dname_t *cname = NULL;
const knot_dname_t *next_cname = cname_begin;
unsigned cname_chain_len = 0;
do {
cname = next_cname;
next_cname = NULL;
for (unsigned i = 0; i < answer->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(answer, i);
if (!knot_dname_is_equal(rr->owner, cname)) {
continue;
}
kr_rrmap_add(stash, rr, rank, pool);
/* Follow CNAME chain in current cut (if SECURE). */
if ((qry->flags & QUERY_DNSSEC_WANT) && rr->type == KNOT_RRTYPE_CNAME) {
cname_chain_len += 1;
next_cname = knot_cname_name(&rr->rrs);
if (next_cname && !knot_dname_in(qry->zone_cut.name, next_cname)) {
next_cname = NULL;
}
/* Check if the same CNAME was already resolved */
if (next_cname) {
char key[KR_RRKEY_LEN];
int ret = kr_rrkey(key, next_cname, rr->type, rank);
if (ret != 0 || map_get(stash, key)) {
VERBOSE_MSG(qry, "<= cname chain loop\n");
next_cname = NULL;
}
}
if (cname_chain_len > answer->count || cname_chain_len > KR_CNAME_CHAIN_LIMIT) {
VERBOSE_MSG(qry, "<= too long cname chain\n");
next_cname = NULL;
}
}
/* uncached entries are located at the end */
for (ssize_t i = arr->len - 1; i >= 0; --i) {
ranked_rr_array_entry_t *entry = arr->at[i];
if (entry->qry_uid != qry->uid) {
continue;
}
} while (next_cname);
if (entry->cached) {
continue;
}
const knot_rrset_t *rr = entry->rr;
kr_rrmap_add(stash, rr, rank, pool);
entry->cached = true;
}
return kr_ok();
}
......
This diff is collapsed.
......@@ -37,6 +37,16 @@
#define VERBOSE_MSG(qry, fmt...) QRVERBOSE((qry), "resl", fmt)
static void set_yield(ranked_rr_array_t *array, const uint32_t qry_uid, const bool yielded)
{
for (unsigned i = 0; i < array->len; ++i) {
ranked_rr_array_entry_t *entry = array->at[i];
if (entry->qry_uid == qry_uid) {
entry->yielded = yielded;
}
}
}
/**
* @internal Defer execution of current query.
* The current layer state and input will be pushed to a stack and resumed on next iteration.
......@@ -53,6 +63,8 @@ static int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt)
pickle->pkt = pkt_copy;
pickle->next = qry->deferred;
qry->deferred = pickle;
set_yield(&req->answ_selected, qry->uid, true);
set_yield(&req->auth_selected, qry->uid, true);
return kr_ok();
}
return kr_error(ENOMEM);
......@@ -340,6 +352,23 @@ static void write_extra_records(rr_array_t *arr, knot_pkt_t *answer)
}
}
static void write_extra_ranked_records(ranked_rr_array_t *arr, knot_pkt_t *answer)
{
for (size_t i = 0; i < arr->len; ++i) {
ranked_rr_array_entry_t * entry = arr->at[i];
if (!entry->to_wire) {
continue;
}
knot_rrset_t *rr = entry->rr;
if (!knot_pkt_has_dnssec(answer)) {
if (rr->type != knot_pkt_qtype(answer) && knot_rrtype_is_dnssec(rr->type)) {
continue;
}
}
knot_pkt_put(answer, 0, rr, 0);
}
}
/** @internal Add an EDNS padding RR into the answer if requested and required. */
static int answer_padding(struct kr_request *request)
{
......@@ -401,11 +430,20 @@ static int answer_finalize(struct kr_request *request, int state)
}
}
if (request->answ_selected.len > 0) {
assert(answer->current <= KNOT_ANSWER);
/* Write answer records. */
if (answer->current < KNOT_ANSWER) {
knot_pkt_begin(answer, KNOT_ANSWER);
}
write_extra_ranked_records(&request->answ_selected, answer);
}
/* Write authority records. */
if (answer->current < KNOT_AUTHORITY) {
knot_pkt_begin(answer, KNOT_AUTHORITY);
}
write_extra_records(&request->authority, answer);
write_extra_ranked_records(&request->auth_selected, answer);
/* Write additional records. */
knot_pkt_begin(answer, KNOT_ADDITIONAL);
write_extra_records(&request->additional, answer);
......@@ -472,8 +510,11 @@ int kr_resolve_begin(struct kr_request *request, struct kr_context *ctx, knot_pk
request->options = ctx->options;
request->state = KR_STATE_CONSUME;
request->current_query = NULL;
array_init(request->authority);
array_init(request->additional);
array_init(request->answ_selected);
array_init(request->auth_selected);
request->answ_validated = false;
request->auth_validated = false;
/* Expect first query */
kr_rplan_init(&request->rplan, request, &request->pool);
......@@ -806,6 +847,15 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
return KR_STATE_FAIL;
}
}
/* qry->zone_cut.name can change, check it again
* to prevent unnecessary DS & DNSKEY queries */
if (!(qry->flags & QUERY_DNSSEC_INSECURE) &&
!kr_ta_covers(negative_anchors, qry->zone_cut.name) &&
kr_ta_covers(trust_anchors, qry->zone_cut.name)) {
qry->flags |= QUERY_DNSSEC_WANT;
} else {
qry->flags &= ~QUERY_DNSSEC_WANT;
}
/* Update minimized QNAME if zone cut changed */
if (qry->zone_cut.name[0] != '\0' && !(qry->flags & QUERY_NO_MINIMIZE)) {
if (kr_make_query(qry, packet) != 0) {
......@@ -840,6 +890,8 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
VERBOSE_MSG(qry, "=> resuming yielded answer\n");
struct kr_layer_pickle *pickle = qry->deferred;
request->state = KR_STATE_YIELD;
set_yield(&request->answ_selected, qry->uid, false);
set_yield(&request->auth_selected, qry->uid, false);
RESUME_LAYERS(layer_id(request, pickle->api), request, qry, consume, pickle->pkt);
qry->deferred = pickle->next;
} else {
......
......@@ -72,6 +72,16 @@
* @endcode
*/
/** Validation rank */
typedef enum kr_validation_rank {
KR_VLDRANK_INITIAL = 0, /* Entry was just added; not validated yet. */
KR_VLDRANK_INSECURE = 1, /* Entry is DNSSEC insecure (e.g. RRSIG not exists). */
KR_VLDRANK_BAD = 2, /* Matching RRSIG found, but validation fails. */
KR_VLDRANK_MISMATCH = 3, /* RRSIG signer name is */
KR_VLDRANK_UNKNOWN = 4, /* Unknown */
KR_VLDRANK_SECURE = 5 /* Entry is DNSSEC valid (e.g. RRSIG exists). */
} kr_validation_rank_t;
/** @cond internal Array of modules. */
typedef array_t(struct kr_module *) module_array_t;
/* @endcond */
......@@ -114,27 +124,31 @@ struct kr_context
* @note All data for this request must be allocated from the given pool.
*/
struct kr_request {
struct kr_context *ctx;
knot_pkt_t *answer;
struct kr_query *current_query; /**< Current evaluated query. */
struct {
const knot_rrset_t *key;
const struct sockaddr *addr;
const struct sockaddr *dst_addr;
const knot_pkt_t *packet;
const knot_rrset_t *opt;
} qsource;
struct {
unsigned rtt; /**< Current upstream RTT */
const struct sockaddr *addr; /**< Current upstream address */
} upstream; /**< Upstream information, valid only in consume() phase */
uint32_t options;
int state;
rr_array_t authority;
rr_array_t additional;
struct kr_rplan rplan;
int has_tls;
knot_mm_t pool;
struct kr_context *ctx;
knot_pkt_t *answer;
struct kr_query *current_query; /**< Current evaluated query. */
struct {
const knot_rrset_t *key;
const struct sockaddr *addr;
const struct sockaddr *dst_addr;
const knot_pkt_t *packet;
const knot_rrset_t *opt;
} qsource;
struct {
unsigned rtt; /**< Current upstream RTT */
const struct sockaddr *addr; /**< Current upstream address */
} upstream; /**< Upstream information, valid only in consume() phase */
uint32_t options;
int state;
ranked_rr_array_t answ_selected;
ranked_rr_array_t auth_selected;
rr_array_t additional;
bool answ_validated;
bool auth_validated;
struct kr_rplan rplan;
int has_tls;
knot_mm_t pool;
uint32_t next_uid;
};
/**
......
......@@ -40,7 +40,7 @@ const knot_lookup_t *kr_query_flag_names(void)
return query_flag_names;
}
static struct kr_query *query_create(knot_mm_t *pool, const knot_dname_t *name)
static struct kr_query *query_create(knot_mm_t *pool, const knot_dname_t *name, uint32_t uid)
{
struct kr_query *qry = mm_alloc(pool, sizeof(struct kr_query));
if (qry == NULL) {
......@@ -57,6 +57,7 @@ static struct kr_query *query_create(knot_mm_t *pool, const knot_dname_t *name)
}
knot_dname_to_lower(qry->sname);
qry->uid = uid;
return qry;
}
......@@ -79,6 +80,7 @@ int kr_rplan_init(struct kr_rplan *rplan, struct kr_request *request, knot_mm_t
rplan->request = request;
array_init(rplan->pending);
array_init(rplan->resolved);
rplan->next_uid = 0;
return KNOT_EOK;
}
......@@ -121,10 +123,11 @@ static struct kr_query *kr_rplan_push_query(struct kr_rplan *rplan,
return NULL;
}
struct kr_query *qry = query_create(rplan->pool, name);
struct kr_query *qry = query_create(rplan->pool, name, rplan->next_uid);
if (qry == NULL) {
return NULL;
}
rplan->next_uid += 1;
/* Class and type must be set outside this function. */
qry->flags = rplan->request->options;
qry->parent = parent;
......
......@@ -81,6 +81,7 @@ struct kr_query {
struct kr_zonecut zone_cut;
struct kr_nsrep ns;
struct kr_layer_pickle *deferred;
uint32_t uid;
};
/** @cond internal Array of queries. */
......@@ -99,6 +100,7 @@ struct kr_rplan {
kr_qarray_t resolved; /**< List of resolved queries. */
struct kr_request *request; /**< Parent resolution request. */
knot_mm_t *pool; /**< Temporary memory pool. */
uint32_t next_uid;
};
/**
......
......@@ -25,6 +25,7 @@
#include <libknot/descriptor.h>
#include <libknot/dname.h>
#include <libknot/rrtype/rrsig.h>
#include <libknot/rrset-dump.h>
#include "lib/defines.h"
#include "lib/utils.h"
......@@ -411,6 +412,69 @@ int kr_rrarray_add(rr_array_t *array, const knot_rrset_t *rr, knot_mm_t *pool)
return kr_ok();
}
int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr,
uint8_t rank, bool to_wire, uint32_t qry_uid, knot_mm_t *pool)
{
/* rr always has one record per rrset
* check if another rrset with the same
* rclass/type/owner combination exists within current query
* and merge if needed */
for (ssize_t i = array->len - 1; i >= 0; --i) {
ranked_rr_array_entry_t *stashed = array->at[i];
if (stashed->yielded) {
break;
}
if (stashed->qry_uid != qry_uid) {
break;
}
if (stashed->rr->rclass == rr->rclass &&
stashed->rr->type == rr->type &&
knot_dname_is_equal(stashed->rr->owner, rr->owner)) {
assert(stashed->rank == rank &&
stashed->cached == false &&
stashed->to_wire == to_wire);
/* Merge */
return knot_rdataset_merge(&stashed->rr->rrs, &rr->rrs, pool);
}
}
/* No stashed rrset found, add */
int ret = array_reserve_mm(*array, array->len + 1, kr_memreserve, pool);
if (ret != 0) {
return kr_error(ENOMEM);
}
ranked_rr_array_entry_t *entry = mm_alloc(pool, sizeof(ranked_rr_array_entry_t));
if (!entry) {
return kr_error(ENOMEM);
}
knot_rrset_t *copy = knot_rrset_copy(rr, pool);
if (!copy) {
return kr_error(ENOMEM);
}
entry->qry_uid = qry_uid;
entry->rr = copy;
entry->rank = rank;
entry->cached = false;
entry->yielded = false;
entry->to_wire = to_wire;
array_push(*array, entry);
return kr_ok();
}
int kr_ranked_rrarray_set_wire(ranked_rr_array_t *array, bool to_wire, uint32_t qry_uid)
{
for (size_t i = 0; i < array->len; ++i) {
ranked_rr_array_entry_t *entry = array->at[i];
if (entry->qry_uid == qry_uid) {
entry->to_wire = to_wire;
}
}
return kr_ok();
}
static char *callprop(struct kr_module *module, const char *prop, const char *input, void *env)
{
if (!module || !prop) {
......@@ -438,3 +502,61 @@ char *kr_module_call(struct kr_context *ctx, const char *module, const char *pro
}
return NULL;
}
void kr_rrset_print(const knot_rrset_t *rr)
{
char rrtext[KNOT_DNAME_MAXLEN * 2] = {0};
knot_rrset_txt_dump(rr, rrtext, sizeof(rrtext), &KNOT_DUMP_STYLE_DEFAULT);
printf("%s", rrtext);
}
void kr_pkt_print(knot_pkt_t *pkt)
{
char snames[3][11] = {"ANSWER","AUTHORITY","ADDITIONAL"};
char rrtype[32];
char qname[KNOT_DNAME_MAXLEN];
uint8_t pkt_rcode = knot_wire_get_rcode(pkt->wire);
const knot_lookup_t *rcode = NULL;
rcode = knot_lookup_by_id(knot_rcode_names, pkt_rcode);
printf("RCODE: %s FLAGS: ", rcode != NULL ? rcode->name : "unknown");
if (knot_wire_get_aa(pkt->wire))
printf("AA ");
if (knot_wire_get_rd(pkt->wire))
printf("RD ");
if (knot_wire_get_tc(pkt->wire))
printf("TC ");
if (knot_wire_get_qr(pkt->wire))
printf("QR ");
if (knot_wire_get_cd(pkt->wire))
printf("CD ");
if (knot_wire_get_ad(pkt->wire))
printf("AD ");
if (knot_wire_get_ra(pkt->wire))
printf("RA ");
printf("\n");
knot_dname_to_str(qname, knot_pkt_qname(pkt), KNOT_DNAME_MAXLEN);
knot_rrtype_to_string(knot_pkt_qtype(pkt), rrtype, sizeof(rrtype));
printf("QUESTION\n%s\t\t%s\n", qname, rrtype);
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
printf("%s\n", snames[i - KNOT_ANSWER]);
for (unsigned k = 0; k < sec->count; ++k) {
const knot_rrset_t *rr = knot_pkt_rr(sec, k);
kr_rrset_print(rr);
}
}
}
void kr_dname_print(const knot_dname_t *name, const char *prefix, const char *postfix)
{
char str[KNOT_DNAME_MAXLEN];
knot_dname_to_str(str, name, KNOT_DNAME_MAXLEN);
printf ("%s%s%s", prefix, str, postfix);
}
void kr_rrtype_print(const uint16_t rrtype, const char *prefix, const char *postfix)
{
char str[32];
knot_rrtype_to_string(rrtype, str, 32);
printf ("%s%s%s", prefix, str, postfix);
}
......@@ -91,6 +91,16 @@ static inline long time_diff(struct timeval *begin, struct timeval *end) {
/** @cond internal Array types */
struct kr_context;
typedef array_t(knot_rrset_t *) rr_array_t;
struct ranked_rr_array_entry {
uint8_t rank;
uint32_t qry_uid;
bool cached;
bool yielded;
bool to_wire;
knot_rrset_t *rr;
};
typedef struct ranked_rr_array_entry ranked_rr_array_entry_t;
typedef array_t(ranked_rr_array_entry_t *) ranked_rr_array_t;
/* @endcond */
/** @internal RDATA array maximum size. */
......@@ -175,6 +185,17 @@ int kr_rrmap_add(map_t *stash, const knot_rrset_t *rr, uint8_t rank, knot_mm_t *
/** @internal Add RRSet copy to RR array. */
int kr_rrarray_add(rr_array_t *array, const knot_rrset_t *rr, knot_mm_t *pool);
/** @internal Add RRSet copy to ranked RR array. */
int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr,
uint8_t rank, bool to_wire, uint32_t qry_uid, knot_mm_t *pool);
int kr_ranked_rrarray_set_wire(ranked_rr_array_t *array, bool to_wire, uint32_t qry_uid);
void kr_rrset_print(const knot_rrset_t *rr);
void kr_pkt_print