Verified Commit a0cb9296 authored by Grigorii Demidov's avatar Grigorii Demidov Committed by Vladimír Čunát

prepare for adding EDNS modules

- answer_finalize: new layer
- kr_request: keep ::qsource.packet beyond the begin phase
- kr_request: add ::daemon_context
parent e28d0b27
......@@ -37,6 +37,7 @@ enum {
SLOT_consume,
SLOT_produce,
SLOT_checkout,
SLOT_answer_finalize,
SLOT_count
};
#define SLOT_size sizeof(int)
......@@ -129,6 +130,7 @@ static int l_ffi_deinit(struct kr_module *module)
LAYER_UNREGISTER(L, api, consume);
LAYER_UNREGISTER(L, api, produce);
LAYER_UNREGISTER(L, api, checkout);
LAYER_UNREGISTER(L, api, answer_finalize);
LAYER_UNREGISTER(L, api, reset);
free(api);
}
......@@ -205,6 +207,13 @@ static int l_ffi_layer_checkout(kr_layer_t *ctx, knot_pkt_t *pkt, struct sockadd
lua_pushboolean(L, type == SOCK_STREAM);
return l_ffi_call(L, 5);
}
static int l_ffi_layer_answer_finalize(kr_layer_t *ctx)
{
LAYER_FFI_CALL(ctx, answer_finalize);
lua_pushlightuserdata(L, ctx->req);
return l_ffi_call(L, 2);
}
#undef LAYER_FFI_CALL
/** @internal Conditionally register layer trampoline
......@@ -234,6 +243,7 @@ static kr_layer_api_t *l_ffi_layer_create(lua_State *L, struct kr_module *module
LAYER_REGISTER(L, api, consume);
LAYER_REGISTER(L, api, produce);
LAYER_REGISTER(L, api, checkout);
LAYER_REGISTER(L, api, answer_finalize);
LAYER_REGISTER(L, api, reset);
/* Begin is always set, as it initializes layer baton. */
api->begin = l_ffi_layer_begin;
......
......@@ -168,11 +168,9 @@ struct kr_request {
knot_pkt_t *answer;
struct kr_query *current_query;
struct {
const knot_rrset_t *key;
const struct sockaddr *addr;
const struct sockaddr *dst_addr;
const knot_pkt_t *packet;
const knot_rrset_t *opt;
_Bool tcp;
size_t size;
} qsource;
......@@ -196,6 +194,7 @@ struct kr_request {
int vars_ref;
knot_mm_t pool;
unsigned int uid;
void *daemon_context;
};
enum kr_rank {KR_RANK_INITIAL, KR_RANK_OMIT, KR_RANK_TRY, KR_RANK_INDET = 4, KR_RANK_BOGUS, KR_RANK_MISMATCH, KR_RANK_MISSING, KR_RANK_INSECURE, KR_RANK_AUTH = 16, KR_RANK_SECURE = 32};
struct kr_cache {
......
......@@ -294,6 +294,7 @@ static struct request_ctx *request_create(struct worker_ctx *worker,
req->pool = pool;
req->vars_ref = LUA_NOREF;
req->uid = uid;
req->daemon_context = worker;
/* Remember query source addr */
if (!addr || (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)) {
......@@ -347,25 +348,28 @@ static int request_start(struct request_ctx *ctx, knot_pkt_t *query)
KNOT_WIRE_MIN_PKTSIZE);
}
req->qsource.size = query->size;
if (knot_pkt_has_tsig(query)) {
req->qsource.size += query->tsig_wire.len;
}
req->answer = knot_pkt_new(NULL, answer_max, &req->pool);
if (!req->answer) {
knot_pkt_t *answer = knot_pkt_new(NULL, answer_max, &req->pool);
if (!answer) { /* Failed to allocate answer */
return kr_error(ENOMEM);
}
/* Remember query source TSIG key */
if (query->tsig_rr) {
req->qsource.key = knot_rrset_copy(query->tsig_rr, &req->pool);
knot_pkt_t *pkt = knot_pkt_new(NULL, req->qsource.size, &req->pool);
if (!pkt) {
return kr_error(ENOMEM);
}
/* Remember query source EDNS data */
if (query->opt_rr) {
req->qsource.opt = knot_rrset_copy(query->opt_rr, &req->pool);
if (knot_pkt_copy(pkt, query) != 0) {
return kr_error(ENOMEM);
}
req->qsource.packet = pkt;
/* Start resolution */
struct worker_ctx *worker = ctx->worker;
struct engine *engine = worker->engine;
kr_resolve_begin(req, &engine->resolver, req->answer);
kr_resolve_begin(req, &engine->resolver, answer);
worker->stats.queries += 1;
/* Throttle outbound queries only when high pressure */
if (worker->stats.concurrent < QUERY_RATE_THRESHOLD) {
......@@ -1063,10 +1067,11 @@ static int qr_task_finalize(struct qr_task *task, int state)
return 0;
}
struct request_ctx *ctx = task->ctx;
struct session *source_session = ctx->source.session;
kr_resolve_finish(&ctx->req, state);
task->finished = true;
if (ctx->source.session == NULL) {
if (source_session == NULL) {
(void) qr_task_on_send(task, NULL, kr_error(EIO));
return state == KR_STATE_DONE ? 0 : kr_error(EIO);
}
......@@ -1075,7 +1080,6 @@ static int qr_task_finalize(struct qr_task *task, int state)
qr_task_ref(task);
/* Send back answer */
struct session *source_session = ctx->source.session;
assert(!session_flags(source_session)->closing);
assert(ctx->source.addr.ip.sa_family != AF_UNSPEC);
int res = qr_task_send(task, source_session,
......
......@@ -78,7 +78,7 @@ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
const bool risky_vldr = is_negative && qf->FORWARD && qf->CNAME;
/* ^^ CNAME'ed NXDOMAIN answer in forwarding mode can contain
* unvalidated records; original commit: d6e22f476. */
if (knot_wire_get_cd(req->answer->wire) || qf->STUB || risky_vldr) {
if (knot_wire_get_cd(req->qsource.packet->wire) || qf->STUB || risky_vldr) {
kr_rank_set(&rank, KR_RANK_OMIT);
} else {
if (qf->DNSSEC_BOGUS) {
......
......@@ -91,7 +91,7 @@ static uint8_t get_lowest_rank(const struct kr_request *req, const struct kr_que
{
/* TODO: move rank handling into the iterator (DNSSEC_* flags)? */
const bool allow_unverified =
knot_wire_get_cd(req->answer->wire) || qry->flags.STUB;
knot_wire_get_cd(req->qsource.packet->wire) || qry->flags.STUB;
/* in stub mode we don't trust RRs anyway ^^ */
if (qry->flags.NONAUTH) {
return KR_RANK_INITIAL;
......
......@@ -84,6 +84,10 @@ struct kr_layer_api {
* don't affect the resolution or rest of the processing. */
int (*checkout)(kr_layer_t *ctx, knot_pkt_t *packet, struct sockaddr *dst, int type);
/** Finalises the answer.
* Last chance to affect what will get into the answer, including EDNS.*/
int (*answer_finalize)(kr_layer_t *ctx);
/** The module can store anything in here. */
void *data;
};
......
......@@ -916,7 +916,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
}
/* Pass-through if CD bit is set. */
if (knot_wire_get_cd(req->answer->wire)) {
if (knot_wire_get_cd(req->qsource.packet->wire)) {
check_wildcard(ctx);
wildcard_adjust_to_wire(req, qry);
rank_records(ctx, KR_RANK_OMIT, NULL);
......
......@@ -75,7 +75,11 @@ static void set_yield(ranked_rr_array_t *array, const uint32_t qry_uid, const bo
static int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->req;
knot_pkt_t *pkt_copy = knot_pkt_new(NULL, pkt->size, &req->pool);
size_t pkt_size = pkt->size;
if (knot_pkt_has_tsig(pkt)) {
pkt_size += pkt->tsig_wire.len;
}
knot_pkt_t *pkt_copy = knot_pkt_new(NULL, pkt_size, &req->pool);
struct kr_layer_pickle *pickle = mm_alloc(&req->pool, sizeof(*pickle));
if (pickle && pkt_copy && knot_pkt_copy(pkt_copy, pkt) == 0) {
struct kr_query *qry = req->current_query;
......@@ -95,6 +99,7 @@ static int reset_yield(kr_layer_t *ctx) { return kr_ok(); }
static int finish_yield(kr_layer_t *ctx) { return kr_ok(); }
static int produce_yield(kr_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
static int checkout_yield(kr_layer_t *ctx, knot_pkt_t *packet, struct sockaddr *dst, int type) { return kr_ok(); }
static int answer_finalize_yield(kr_layer_t *ctx) { return kr_ok(); }
/** @internal Macro for iterating module layers. */
#define RESUME_LAYERS(from, r, qry, func, ...) \
......@@ -381,15 +386,17 @@ static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param)
return ret;
}
static int edns_put(knot_pkt_t *pkt)
static int edns_put(knot_pkt_t *pkt, bool reclaim)
{
if (!pkt->opt_rr) {
return kr_ok();
}
/* Reclaim reserved size. */
int ret = knot_pkt_reclaim(pkt, knot_edns_wire_size(pkt->opt_rr));
if (ret != 0) {
return ret;
if (reclaim) {
/* Reclaim reserved size. */
int ret = knot_pkt_reclaim(pkt, knot_edns_wire_size(pkt->opt_rr));
if (ret != 0) {
return ret;
}
}
/* Write to packet. */
assert(pkt->current == KNOT_ADDITIONAL);
......@@ -446,16 +453,17 @@ static int edns_create(knot_pkt_t *pkt, knot_pkt_t *template, struct kr_request
return knot_pkt_reserve(pkt, wire_size);
}
static int answer_prepare(knot_pkt_t *answer, knot_pkt_t *query, struct kr_request *req)
static int answer_prepare(struct kr_request *req, knot_pkt_t *query)
{
knot_pkt_t *answer = req->answer;
if (knot_pkt_init_response(answer, query) != 0) {
return kr_error(ENOMEM); /* Failed to initialize answer */
}
/* Handle EDNS in the query */
if (knot_pkt_has_edns(query)) {
int ret = edns_create(answer, query, req);
if (ret != 0){
return ret;
answer->opt_rr = knot_rrset_copy(req->ctx->opt_rr, &answer->mm);
if (answer->opt_rr == NULL){
return kr_error(ENOMEM);
}
/* Set DO bit if set (DNSSEC requested). */
if (knot_pkt_has_dnssec(query)) {
......@@ -570,7 +578,7 @@ static int answer_fail(struct kr_request *request)
/* OPT in SERVFAIL response is still useful for cookies/additional info. */
knot_pkt_begin(answer, KNOT_ADDITIONAL);
answer_padding(request); /* Ignore failed padding in SERVFAIL answer. */
ret = edns_put(answer);
ret = edns_put(answer, false);
}
return ret;
}
......@@ -633,7 +641,6 @@ static int answer_finalize(struct kr_request *request, int state)
return answer_fail(request);
}
/* Write EDNS information */
int ret = 0;
if (answer->opt_rr) {
if (request->has_tls) {
if (answer_padding(request) != kr_ok()) {
......@@ -641,7 +648,11 @@ static int answer_finalize(struct kr_request *request, int state)
}
}
knot_pkt_begin(answer, KNOT_ADDITIONAL);
ret = edns_put(answer);
int ret = knot_pkt_put(answer, KNOT_COMPR_HINT_NONE,
answer->opt_rr, KNOT_PF_FREE);
if (ret != KNOT_EOK) {
return answer_fail(request);
}
}
if (!last) secure = false; /*< should be no-op, mostly documentation */
......@@ -676,7 +687,7 @@ static int answer_finalize(struct kr_request *request, int state)
knot_wire_clear_ad(answer->wire);
}
return ret;
return kr_ok();
}
static int query_finalize(struct kr_request *request, struct kr_query *qry, knot_pkt_t *pkt)
......@@ -693,10 +704,10 @@ static int query_finalize(struct kr_request *request, struct kr_query *qry, knot
/* Stub resolution (ask for +rd and +do) */
if (qry->flags.STUB) {
knot_wire_set_rd(pkt->wire);
if (knot_pkt_has_dnssec(request->answer)) {
if (knot_pkt_has_dnssec(request->qsource.packet)) {
knot_edns_set_do(pkt->opt_rr);
}
if (knot_wire_get_cd(request->answer->wire)) {
if (knot_wire_get_cd(request->qsource.packet->wire)) {
knot_wire_set_cd(pkt->wire);
}
/* Full resolution (ask for +cd and +do) */
......@@ -708,7 +719,7 @@ static int query_finalize(struct kr_request *request, struct kr_query *qry, knot
knot_edns_set_do(pkt->opt_rr);
knot_wire_set_cd(pkt->wire);
}
ret = edns_put(pkt);
ret = edns_put(pkt, true);
}
}
return ret;
......@@ -743,7 +754,6 @@ static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
const knot_dname_t *qname = knot_pkt_qname(packet);
uint16_t qclass = knot_pkt_qclass(packet);
uint16_t qtype = knot_pkt_qtype(packet);
bool cd_is_set = knot_wire_get_cd(packet->wire);
struct kr_query *qry = NULL;
struct kr_context *ctx = request->ctx;
struct kr_cookie_ctx *cookie_ctx = ctx ? &ctx->cookie_ctx : NULL;
......@@ -778,16 +788,15 @@ static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
knot_wire_set_ra(answer->wire);
knot_wire_set_rcode(answer->wire, KNOT_RCODE_NOERROR);
if (cd_is_set) {
assert(request->qsource.packet);
if (knot_wire_get_cd(request->qsource.packet->wire)) {
knot_wire_set_cd(answer->wire);
} else if (qry->flags.DNSSEC_WANT) {
knot_wire_set_ad(answer->wire);
}
/* Expect answer, pop if satisfied immediately */
request->qsource.packet = packet;
ITERATE_LAYERS(request, qry, begin);
request->qsource.packet = NULL;
if ((request->state & KR_STATE_DONE) != 0) {
kr_rplan_pop(rplan, qry);
} else if (qname == NULL) {
......@@ -903,7 +912,7 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
/* Empty resolution plan, push packet as the new query */
if (packet && kr_rplan_empty(rplan)) {
if (answer_prepare(request->answer, packet, request) != 0) {
if (answer_prepare(request, packet) != 0) {
return KR_STATE_FAIL;
}
return resolve_query(request, packet);
......@@ -1140,9 +1149,9 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query
/* Enable DNSSEC if enters a new island of trust. */
bool want_secured = (qry->flags.DNSSEC_WANT) &&
!knot_wire_get_cd(request->answer->wire);
!knot_wire_get_cd(request->qsource.packet->wire);
if (!(qry->flags.DNSSEC_WANT) &&
!knot_wire_get_cd(request->answer->wire) &&
!knot_wire_get_cd(request->qsource.packet->wire) &&
kr_ta_get(trust_anchors, wanted_name)) {
qry->flags.DNSSEC_WANT = true;
want_secured = true;
......@@ -1213,9 +1222,9 @@ static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
/* Enable DNSSEC if entering a new (or different) island of trust,
* and update the TA RRset if required. */
bool want_secured = (qry->flags.DNSSEC_WANT) &&
!knot_wire_get_cd(request->answer->wire);
!knot_wire_get_cd(request->qsource.packet->wire);
knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, qry->zone_cut.name);
if (!knot_wire_get_cd(request->answer->wire) && ta_rr) {
if (!knot_wire_get_cd(request->qsource.packet->wire) && ta_rr) {
qry->flags.DNSSEC_WANT = true;
want_secured = true;
......@@ -1403,7 +1412,8 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
/* This query has RD=0 or is ANY, stop here. */
if (qry->stype == KNOT_RRTYPE_ANY || !knot_wire_get_rd(request->answer->wire)) {
if (qry->stype == KNOT_RRTYPE_ANY ||
!knot_wire_get_rd(request->qsource.packet->wire)) {
VERBOSE_MSG(qry, "=> qtype is ANY or RD=0, bail out\n");
return KR_STATE_FAIL;
}
......@@ -1596,14 +1606,21 @@ int kr_resolve_finish(struct kr_request *request, int state)
#ifndef NOVERBOSELOG
struct kr_rplan *rplan = &request->rplan;
#endif
/* Finalize answer */
if (answer_finalize(request, state) != 0) {
/* Finalize answer and construct wire-buffer. */
ITERATE_LAYERS(request, NULL, answer_finalize);
if (request->state == KR_STATE_FAIL) {
state = KR_STATE_FAIL;
} else if (answer_finalize(request, state) != 0) {
state = KR_STATE_FAIL;
}
/* Error during procesing, internal failure */
/* Error during processing, internal failure */
if (state != KR_STATE_DONE) {
knot_pkt_t *answer = request->answer;
if (knot_wire_get_rcode(answer->wire) == KNOT_RCODE_NOERROR) {
knot_wire_clear_ad(answer->wire);
knot_wire_clear_aa(answer->wire);
knot_wire_set_rcode(answer->wire, KNOT_RCODE_SERVFAIL);
}
}
......
......@@ -189,13 +189,11 @@ struct kr_request {
knot_pkt_t *answer;
struct kr_query *current_query; /**< Current evaluated query. */
struct {
const knot_rrset_t *key;
/** Address that originated the request. NULL for internal origin. */
const struct sockaddr *addr;
/** Address that accepted the request. NULL for internal origin. */
const struct sockaddr *dst_addr;
const knot_pkt_t *packet;
const knot_rrset_t *opt;
bool tcp; /**< true if the request is on tcp; only meaningful if (dst_addr) */
size_t size; /**< query packet size */
} qsource;
......@@ -228,6 +226,7 @@ struct kr_request {
int vars_ref; /**< Reference to per-request variable table. LUA_NOREF if not set. */
knot_mm_t pool;
unsigned int uid; /** for logging purposes only */
void *daemon_context; /** pointer to worker from daemon. Can be used in modules. */
};
/** Initializer for an array of *_selected. */
......
......@@ -120,7 +120,8 @@ doesn't provide any layer to capture events. The Lua module can however provide
There is currently an additional "feature" in comparison to C layer functions:
the ``consume``, ``produce`` and ``checkout`` functions do not get called at all
if ``state == kres.FAIL`` (note that ``finish`` does get called nevertheless).
if ``state == kres.FAIL``;
note that ``answer_finalize`` and ``finish`` get called nevertheless.
Since the modules are like any other Lua modules, you can interact with them through the CLI and and any interface.
......
......@@ -31,7 +31,7 @@ end
-- @function Find view for given request
local function evaluate(_, req)
local client_key = req.qsource.key
local client_key = req.qsource.packet.tsig_rr
local match_cb = (client_key ~= nil) and view.key[client_key:owner()] or nil
-- Search subnets otherwise
if match_cb == nil then
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment