Commit fc55eb69 authored by Marek Vavruša's avatar Marek Vavruša

lib: layers can now push additional queries freely

current processed query is always in `request->current_query`
parent 002543f8
......@@ -175,6 +175,7 @@ struct kr_rplan {
struct kr_request {
struct kr_context *ctx;
knot_pkt_t *answer;
struct kr_query *current_query;
struct {
const knot_rrset_t *key;
const struct sockaddr *addr;
......@@ -223,7 +224,6 @@ const knot_rrset_t *knot_pkt_rr(const knot_pktsection_t *section, uint16_t i);
/* Resolution request */
struct kr_rplan *kr_resolve_plan(struct kr_request *request);
/* Resolution plan */
struct kr_query *kr_rplan_current(struct kr_rplan *rplan);
/* Query */
/* Utils */
unsigned kr_rand_uint(unsigned max);
......@@ -322,7 +322,7 @@ ffi.metatype( kr_request_t, {
__index = {
current = function(req)
assert(req)
return C.kr_rplan_current(C.kr_resolve_plan(req))
return req.current_query
end,
},
})
......
......@@ -31,7 +31,7 @@ These we call as *driver*. The driver is not meant to know *"how"* the query is
.. image:: ../doc/resolution.png
:align: center
On the other side are *layers*. They are responsible for dissecting the packets and informing the driver about the results. For example, a produce layer can generate a sub-request, a consume layer can satisfy an outstanding query or simply log something, but they should **never** alter resolution plan directly, as it would change "current query" for next-in-line layers (appending to the resolution plan is fine). They also must not block, and may not be paused.
On the other side are *layers*. They are responsible for dissecting the packets and informing the driver about the results. For example, a produce layer can generate a sub-request, a consume layer can satisfy an outstanding query or simply log something. They also must not block, and may not be paused.
.. tip:: Layers are executed asynchronously by the driver. If you need some asset beforehand, you can signalize the driver using returning state or current query flags. For example, setting a flag ``QUERY_AWAIT_CUT`` forces driver to fetch zone cut information before the packet is consumed; setting a ``QUERY_RESOLVED`` flag makes it pop a query after the current set of layers is finished; returning ``FAIL`` state makes it fail current query. The important thing is, these actions happen **after** current set of layers is done.
......@@ -57,7 +57,7 @@ This structure contains pointers to resolution context, resolution plan and also
int consume(knot_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *request = ctx->data;
struct kr_query *query = kr_rplan_current(request->rplan);
struct kr_query *query = request->current_query;
}
This is only passive processing of the incoming answer. If you want to change the course of resolution, say satisfy a query from a local cache before the library issues a query to the nameserver, you can use states (see the :ref:`Static hints <mod-hints>` for example).
......@@ -69,7 +69,7 @@ This is only passive processing of the incoming answer. If you want to change th
int produce(knot_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *request = ctx->data;
struct kr_query *cur = kr_rplan_current(request->rplan);
struct kr_query *cur = request->current_query;
/* Query can be satisfied locally. */
if (can_satisfy(cur)) {
......
......@@ -28,7 +28,7 @@
#include "lib/module.h"
#include "lib/dnssec/ta.h"
#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(&req->rplan), "iter", fmt)
#define DEBUG_MSG(fmt...) QRDEBUG(req->current_query, "iter", fmt)
/* Iterator often walks through packet section, this is an abstraction. */
typedef int (*rr_callback_t)(const knot_rrset_t *, unsigned, struct kr_request *);
......@@ -158,16 +158,14 @@ static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
return KNOT_STATE_CONSUME;
}
static int update_parent(const knot_rrset_t *rr, struct kr_request *req)
static int update_parent(const knot_rrset_t *rr, struct kr_query *qry)
{
struct kr_query *qry = kr_rplan_current(&req->rplan);
return update_nsaddr(rr, qry->parent);
}
static int update_answer(const knot_rrset_t *rr, unsigned hint, struct kr_request *req)
static int update_answer(const knot_rrset_t *rr, unsigned hint, knot_pkt_t *answer)
{
/* Scrub DNSSEC records when not requested. */
knot_pkt_t *answer = req->answer;
if (!knot_pkt_has_dnssec(answer)) {
if (rr->type != knot_pkt_qtype(answer) && knot_rrtype_is_dnssec(rr->type)) {
return KNOT_STATE_DONE; /* Scrub */
......@@ -214,7 +212,7 @@ static int has_glue(knot_pkt_t *pkt, const knot_dname_t *ns)
static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_request *req)
{
struct kr_query *qry = kr_rplan_current(&req->rplan);
struct kr_query *qry = req->current_query;
struct kr_zonecut *cut = &qry->zone_cut;
int state = KNOT_STATE_CONSUME;
......@@ -278,7 +276,7 @@ static const knot_dname_t *signature_authority(knot_pkt_t *pkt)
static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
{
int result = KNOT_STATE_CONSUME;
struct kr_query *qry = kr_rplan_current(&req->rplan);
struct kr_query *qry = req->current_query;
const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
#ifdef STRICT_MODE
......@@ -360,7 +358,7 @@ static void finalize_answer(knot_pkt_t *pkt, struct kr_query *qry, struct kr_req
for (unsigned i = 0; i < ns->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(ns, i);
if (knot_dname_in(cut->name, rr->owner)) {
update_answer(rr, 0, req);
update_answer(rr, 0, answer);
}
}
}
......@@ -368,7 +366,7 @@ static void finalize_answer(knot_pkt_t *pkt, struct kr_query *qry, struct kr_req
static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
{
struct kr_query *query = kr_rplan_current(&req->rplan);
struct kr_query *query = req->current_query;
/* Response for minimized QNAME.
* NODATA => may be empty non-terminal, retry (found zone cut)
......@@ -407,7 +405,7 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
if(knot_dname_is_equal(cname, knot_pkt_qname(req->answer))) {
hint = KNOT_COMPR_HINT_QNAME;
}
int state = is_final ? update_answer(rr, hint, req) : update_parent(rr, req);
int state = is_final ? update_answer(rr, hint, req->answer) : update_parent(rr, query);
if (state == KNOT_STATE_FAIL) {
return state;
}
......@@ -431,8 +429,6 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
if (!next) {
return KNOT_STATE_FAIL;
}
rem_node(&query->node); /* *MUST* keep current query at tail */
insert_node(&query->node, &next->node);
next->flags |= QUERY_AWAIT_CUT;
/* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */
if (kr_ta_covers(&req->ctx->trust_anchors, cname) &&
......@@ -488,7 +484,7 @@ static int prepare_query(knot_layer_t *ctx, knot_pkt_t *pkt)
{
assert(pkt && ctx);
struct kr_request *req = ctx->data;
struct kr_query *query = kr_rplan_current(&req->rplan);
struct kr_query *query = req->current_query;
if (!query || ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
return ctx->state;
}
......@@ -525,7 +521,7 @@ static int resolve(knot_layer_t *ctx, knot_pkt_t *pkt)
{
assert(pkt && ctx);
struct kr_request *req = ctx->data;
struct kr_query *query = kr_rplan_current(&req->rplan);
struct kr_query *query = req->current_query;
if (!query || (query->flags & QUERY_RESOLVED)) {
return ctx->state;
}
......
......@@ -22,7 +22,7 @@
#include "lib/cache.h"
#include "lib/module.h"
#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(rplan), " pc ", fmt)
#define DEBUG_MSG(qry, fmt...) QRDEBUG((qry), " pc ", fmt)
#define DEFAULT_MAXTTL (15 * 60)
#define DEFAULT_NOTTL (5) /* Short-time "no data" retention to avoid bursts */
......@@ -96,8 +96,7 @@ static int loot_cache(struct kr_cache_txn *txn, knot_pkt_t *pkt, struct kr_query
static int peek(knot_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_rplan *rplan = &req->rplan;
struct kr_query *qry = kr_rplan_current(rplan);
struct kr_query *qry = req->current_query;
if (ctx->state & (KNOT_STATE_FAIL|KNOT_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
return ctx->state; /* Already resolved/failed */
}
......@@ -119,7 +118,7 @@ static int peek(knot_layer_t *ctx, knot_pkt_t *pkt)
int ret = loot_cache(&txn, pkt, qry);
kr_cache_txn_abort(&txn);
if (ret == 0) {
DEBUG_MSG("=> satisfied from cache\n");
DEBUG_MSG(qry, "=> satisfied from cache\n");
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
......@@ -162,8 +161,7 @@ static uint32_t packet_ttl(knot_pkt_t *pkt)
static int stash(knot_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_rplan *rplan = &req->rplan;
struct kr_query *qry = kr_rplan_current(rplan);
struct kr_query *qry = req->current_query;
/* Cache only answers that make query resolved (i.e. authoritative)
* that didn't fail during processing and are negative. */
if (qry->flags & QUERY_CACHED || ctx->state & KNOT_STATE_FAIL) {
......@@ -211,7 +209,7 @@ static int stash(knot_layer_t *ctx, knot_pkt_t *pkt)
if (ret != 0) {
kr_cache_txn_abort(&txn);
} else {
DEBUG_MSG("=> answer cached for TTL=%u\n", ttl);
DEBUG_MSG(qry, "=> answer cached for TTL=%u\n", ttl);
kr_cache_txn_commit(&txn);
}
return ctx->state;
......
......@@ -27,7 +27,7 @@
#include "lib/module.h"
#include "lib/utils.h"
#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(rplan), " rc ", fmt)
#define DEBUG_MSG(qry, fmt...) QRDEBUG((qry), " rc ", fmt)
#define DEFAULT_MINTTL (5) /* Short-time "no data" retention to avoid bursts */
/** Record is expiring if it has less than 1% TTL (or less than 5s) */
......@@ -105,8 +105,7 @@ static int loot_cache(struct kr_cache *cache, knot_pkt_t *pkt, struct kr_query *
static int peek(knot_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_rplan *rplan = &req->rplan;
struct kr_query *qry = kr_rplan_current(rplan);
struct kr_query *qry = req->current_query;
if (ctx->state & (KNOT_STATE_FAIL|KNOT_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
return ctx->state; /* Already resolved/failed */
}
......@@ -133,7 +132,7 @@ static int peek(knot_layer_t *ctx, knot_pkt_t *pkt)
}
}
if (ret == 0) {
DEBUG_MSG("=> satisfied from cache\n");
DEBUG_MSG(qry, "=> satisfied from cache\n");
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
......@@ -277,8 +276,7 @@ static int stash_answer(struct kr_query *qry, knot_pkt_t *pkt, map_t *stash, mm_
static int stash(knot_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_rplan *rplan = &req->rplan;
struct kr_query *qry = kr_rplan_current(rplan);
struct kr_query *qry = req->current_query;
if (!qry || ctx->state & KNOT_STATE_FAIL) {
return ctx->state;
}
......@@ -296,19 +294,19 @@ static int stash(knot_layer_t *ctx, knot_pkt_t *pkt)
map_t stash = map_make();
stash.malloc = (map_alloc_f) mm_alloc;
stash.free = (map_free_f) mm_free;
stash.baton = rplan->pool;
stash.baton = &req->pool;
int ret = 0;
bool is_auth = knot_wire_get_aa(pkt->wire);
if (is_auth) {
ret = stash_answer(qry, pkt, &stash, rplan->pool);
ret = stash_answer(qry, pkt, &stash, &req->pool);
}
/* Cache authority only if chasing referral/cname chain */
if (!is_auth || qry != HEAD(rplan->pending)) {
ret = stash_authority(qry, pkt, &stash, rplan->pool);
if (!is_auth || qry != TAIL(req->rplan.pending)) {
ret = stash_authority(qry, pkt, &stash, &req->pool);
}
/* Cache DS records in referrals */
if (!is_auth && knot_pkt_has_dnssec(pkt)) {
stash_ds(qry, pkt, &stash, rplan->pool);
stash_ds(qry, pkt, &stash, &req->pool);
}
/* Cache stashed records */
if (ret == 0 && stash.root != NULL) {
......
......@@ -276,7 +276,7 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
{
int ret = 0;
struct kr_request *req = ctx->data;
struct kr_query *qry = kr_rplan_current(&req->rplan);
struct kr_query *qry = req->current_query;
/* Ignore faulty or unprocessed responses. */
if (ctx->state & (KNOT_STATE_FAIL|KNOT_STATE_CONSUME)) {
return ctx->state;
......
......@@ -26,10 +26,11 @@
#include "lib/layer/iterate.h"
#include "lib/dnssec/ta.h"
#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(rplan), "resl", fmt)
#define DEBUG_MSG(qry, fmt...) QRDEBUG((qry), "resl", fmt)
/** @internal Macro for iterating module layers. */
#define ITERATE_LAYERS(req, func, ...) \
#define ITERATE_LAYERS(req, qry, func, ...) \
(req)->current_query = (qry); \
for (unsigned i = 0; i < (req)->ctx->modules->len; ++i) { \
struct kr_module *mod = (req)->ctx->modules->at[i]; \
if (mod->layer ) { \
......@@ -38,7 +39,8 @@
(req)->state = layer.api->func(&layer, ##__VA_ARGS__); \
} \
} \
}
} /* Invalidate current query. */ \
(req)->current_query = NULL
/* Randomize QNAME letter case.
* This adds 32 bits of randomness at maximum, but that's more than an average domain name length.
......@@ -124,13 +126,13 @@ static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param)
if (!next_type || kr_rplan_satisfies(qry->parent, qry->ns.name, KNOT_CLASS_IN, next_type)) {
/* Fall back to SBELT if root server query fails. */
if (!next_type && qry->zone_cut.name[0] == '\0') {
DEBUG_MSG("=> fallback to root hints\n");
DEBUG_MSG(qry, "=> fallback to root hints\n");
kr_zonecut_set_sbelt(ctx, &qry->zone_cut);
qry->flags |= QUERY_NO_THROTTLE; /* Pick even bad SBELT servers */
return kr_error(EAGAIN);
}
/* No IPv4 nor IPv6, flag server as unuseable. */
DEBUG_MSG("=> unresolvable NS address, bailing out\n");
DEBUG_MSG(qry, "=> unresolvable NS address, bailing out\n");
qry->ns.reputation |= KR_NS_NOIP4 | KR_NS_NOIP6;
kr_nsrep_update_rep(&qry->ns, qry->ns.reputation, ctx->cache_rep);
invalidate_ns(rplan, qry);
......@@ -249,6 +251,7 @@ int kr_resolve_begin(struct kr_request *request, struct kr_context *ctx, knot_pk
request->answer = answer;
request->options = ctx->options;
request->state = KNOT_STATE_CONSUME;
request->current_query = NULL;
/* Expect first query */
kr_rplan_init(&request->rplan, request, &request->pool);
......@@ -284,7 +287,7 @@ static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
knot_wire_set_rcode(answer->wire, KNOT_RCODE_NOERROR);
/* Expect answer, pop if satisfied immediately */
ITERATE_LAYERS(request, begin, request);
ITERATE_LAYERS(request, qry, begin, request);
if (request->state == KNOT_STATE_DONE) {
kr_rplan_pop(rplan, qry);
}
......@@ -295,7 +298,6 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
{
struct kr_rplan *rplan = &request->rplan;
struct kr_context *ctx = request->ctx;
struct kr_query *qry = kr_rplan_current(rplan);
/* Empty resolution plan, push packet as the new query */
if (packet && kr_rplan_empty(rplan)) {
......@@ -306,11 +308,12 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
}
/* Different processing for network error */
struct kr_query *qry = TAIL(rplan->pending);
bool tried_tcp = (qry->flags & QUERY_TCP);
if (!packet || packet->size == 0) {
/* Network error, retry over TCP. */
if (!tried_tcp) {
DEBUG_MSG("=> NS unreachable, retrying over TCP\n");
DEBUG_MSG(qry, "=> NS unreachable, retrying over TCP\n");
qry->flags |= QUERY_TCP;
return KNOT_STATE_PRODUCE;
}
......@@ -321,7 +324,7 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
if (qname_raw && qry->secret != 0) {
randomized_qname_case(qname_raw, qry->secret);
}
ITERATE_LAYERS(request, consume, packet);
ITERATE_LAYERS(request, qry, consume, packet);
}
/* Resolution failed, invalidate current NS. */
......@@ -347,7 +350,7 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
qry->flags &= ~(QUERY_CACHED|QUERY_TCP);
}
ITERATE_LAYERS(request, reset);
ITERATE_LAYERS(request, qry, reset);
/* Do not finish with bogus answer. */
if (qry->flags & QUERY_DNSSEC_BOGUS) {
......@@ -403,10 +406,10 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
return KNOT_STATE_FAIL;
}
if (qry->sname[0] != '\0') {
DEBUG_MSG("=> root priming query\n");
DEBUG_MSG(qry, "=> root priming query\n");
zone_cut_subreq(rplan, qry, qry->zone_cut.name, KNOT_RRTYPE_NS);
} else {
DEBUG_MSG("=> using root hints\n");
DEBUG_MSG(qry, "=> using root hints\n");
}
qry->flags &= ~QUERY_AWAIT_CUT;
return KNOT_STATE_DONE;
......@@ -424,7 +427,7 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
}
/* Disable DNSSEC if it enters NTA. */
if (kr_ta_get(negative_anchors, qry->zone_cut.name)){
DEBUG_MSG(">< negative TA, going insecure\n");
DEBUG_MSG(qry, ">< negative TA, going insecure\n");
qry->flags &= ~QUERY_DNSSEC_WANT;
}
/* Enable DNSSEC if enters a new island of trust. */
......@@ -435,7 +438,7 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
WITH_DEBUG {
char qname_str[KNOT_DNAME_MAXLEN];
knot_dname_to_str(qname_str, qry->zone_cut.name, sizeof(qname_str));
DEBUG_MSG(">< TA: '%s'\n", qname_str);
DEBUG_MSG(qry, ">< TA: '%s'\n", qname_str);
}
}
if (want_secured && !qry->zone_cut.trust_anchor) {
......@@ -473,21 +476,21 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *type, knot_pkt_t *packet)
{
struct kr_rplan *rplan = &request->rplan;
struct kr_query *qry = kr_rplan_current(rplan);
unsigned ns_election_iter = 0;
/* No query left for resolution */
if (kr_rplan_empty(rplan)) {
return KNOT_STATE_FAIL;
}
/* Resolve current query and produce dependent or finish */
ITERATE_LAYERS(request, produce, packet);
struct kr_query *qry = TAIL(rplan->pending);
ITERATE_LAYERS(request, qry, produce, packet);
if (request->state != KNOT_STATE_FAIL && knot_wire_get_qr(packet->wire)) {
/* Produced an answer, consume it. */
qry->secret = 0;
request->state = KNOT_STATE_CONSUME;
ITERATE_LAYERS(request, consume, packet);
ITERATE_LAYERS(request, qry, consume, packet);
}
switch(request->state) {
case KNOT_STATE_FAIL: return request->state;
......@@ -497,13 +500,13 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
if (qry->flags & QUERY_RESOLVED) {
kr_rplan_pop(rplan, qry);
}
ITERATE_LAYERS(request, reset);
ITERATE_LAYERS(request, qry, reset);
return kr_rplan_empty(rplan) ? KNOT_STATE_DONE : KNOT_STATE_PRODUCE;
}
/* This query has RD=0 or is ANY, stop here. */
if (qry->stype == KNOT_RRTYPE_ANY || !knot_wire_get_rd(request->answer->wire)) {
DEBUG_MSG("=> qtype is ANY or RD=0, bail out\n");
DEBUG_MSG(qry, "=> qtype is ANY or RD=0, bail out\n");
return KNOT_STATE_FAIL;
}
......@@ -521,7 +524,7 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
* elect best address only, otherwise elect a completely new NS.
*/
if(++ns_election_iter >= KR_ITER_LIMIT) {
DEBUG_MSG("=> couldn't converge NS selection, bail out\n");
DEBUG_MSG(qry, "=> couldn't converge NS selection, bail out\n");
return KNOT_STATE_FAIL;
}
if (qry->flags & (QUERY_AWAIT_IPV4|QUERY_AWAIT_IPV6)) {
......@@ -529,14 +532,14 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
} else if (!qry->ns.name || !(qry->flags & QUERY_TCP)) { /* Keep address when TCP retransmit. */
/* Root DNSKEY must be fetched from the hints to avoid chicken and egg problem. */
if (qry->sname[0] == '\0' && qry->stype == KNOT_RRTYPE_DNSKEY) {
DEBUG_MSG("=> priming root DNSKEY\n");
DEBUG_MSG(qry, "=> priming root DNSKEY\n");
kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
qry->flags |= QUERY_NO_THROTTLE; /* Pick even bad SBELT servers */
}
kr_nsrep_elect(qry, request->ctx);
if (qry->ns.score > KR_NS_MAX_SCORE) {
DEBUG_MSG("=> no valid NS left\n");
ITERATE_LAYERS(request, reset);
DEBUG_MSG(qry, "=> no valid NS left\n");
ITERATE_LAYERS(request, qry, reset);
kr_rplan_pop(rplan, qry);
return KNOT_STATE_PRODUCE;
}
......@@ -549,7 +552,7 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
qry->flags &= ~(QUERY_AWAIT_IPV6|QUERY_AWAIT_IPV4|QUERY_TCP);
goto ns_election; /* Must try different NS */
}
ITERATE_LAYERS(request, reset);
ITERATE_LAYERS(request, qry, reset);
return KNOT_STATE_PRODUCE;
}
......@@ -570,7 +573,7 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
break;
}
inet_ntop(addr->sa_family, kr_nsrep_inaddr(qry->ns.addr[i]), ns_str, sizeof(ns_str));
DEBUG_MSG("%s: '%s' score: %u zone cut: '%s' m12n: '%s' type: '%s'\n",
DEBUG_MSG(qry, "%s: '%s' score: %u zone cut: '%s' m12n: '%s' type: '%s'\n",
i == 0 ? "=> querying" : " optional",
ns_str, qry->ns.score, zonecut_str, qname_str, type_str);
}
......@@ -599,8 +602,8 @@ int kr_resolve_finish(struct kr_request *request, int state)
}
}
ITERATE_LAYERS(request, finish);
DEBUG_MSG("finished: %d, queries: %zu, mempool: %zu B\n",
ITERATE_LAYERS(request, NULL, finish);
DEBUG_MSG(NULL, "finished: %d, queries: %zu, mempool: %zu B\n",
state, list_size(&rplan->resolved), (size_t) mp_total_size(request->pool.ctx));
return KNOT_STATE_DONE;
}
......
......@@ -108,6 +108,7 @@ struct kr_context
struct kr_request {
struct kr_context *ctx;
knot_pkt_t *answer;
struct kr_query *current_query; /**< Current evaluated query. */
struct {
const knot_rrset_t *key;
const struct sockaddr *addr;
......
......@@ -144,15 +144,6 @@ int kr_rplan_pop(struct kr_rplan *rplan, struct kr_query *qry)
return KNOT_EOK;
}
struct kr_query *kr_rplan_current(struct kr_rplan *rplan)
{
if (kr_rplan_empty(rplan)) {
return NULL;
}
return TAIL(rplan->pending);
}
bool kr_rplan_satisfies(struct kr_query *closure, const knot_dname_t *name, uint16_t cls, uint16_t type)
{
while (closure != NULL) {
......
......@@ -129,13 +129,6 @@ struct kr_query *kr_rplan_push(struct kr_rplan *rplan, struct kr_query *parent,
*/
int kr_rplan_pop(struct kr_rplan *rplan, struct kr_query *qry);
/**
* Currently resolved query (at the top).
* @param rplan plan instance
* @return query instance or NULL if empty
*/
struct kr_query *kr_rplan_current(struct kr_rplan *rplan);
/**
* Return true if resolution chain satisfies given query.
*/
......
......@@ -90,8 +90,8 @@ static int answer_query(knot_pkt_t *pkt, pack_t *addr_set, struct kr_query *qry)
static int query(knot_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *param = ctx->data;
struct kr_query *qry = kr_rplan_current(&param->rplan);
struct kr_request *req = ctx->data;
struct kr_query *qry = req->current_query;
if (!qry || ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
return ctx->state;
}
......
......@@ -26,7 +26,6 @@ static void test_rplan_params(void **state)
assert_null((void *)kr_rplan_push(NULL, NULL, NULL, 0, 0));
assert_int_equal(kr_rplan_pop(NULL, NULL), KNOT_EINVAL);
assert_true(kr_rplan_empty(NULL) == true);
assert_null((void *)kr_rplan_current(NULL));
kr_rplan_deinit(NULL);
/* NULL mandatory parameters */
......@@ -36,7 +35,6 @@ static void test_rplan_params(void **state)
assert_null((void *)kr_rplan_push(&rplan, NULL, NULL, 0, 0));
assert_int_equal(kr_rplan_pop(&rplan, NULL), KNOT_EINVAL);
assert_true(kr_rplan_empty(&rplan) == true);
assert_null((void *)kr_rplan_current(&rplan));
kr_rplan_deinit(&rplan);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment