Commit f41f3cab authored by Marek Vavruša's avatar Marek Vavruša

lib/validate: enabled YIELD, fixed validator (partially)

validator can now yield, but it doesn't plan the sub-requests directly,
that is still a job of the driver
parent 4e0a6d2e
......@@ -13,7 +13,7 @@ MODULEDIR := $(LIBDIR)/kdns_modules
# Tools
CC ?= cc
BUILD_LDFLAGS += $(LDFLAGS)
BUILD_CFLAGS := $(CFLAGS) -std=c99 -D_GNU_SOURCE -fPIC -Wall -I$(abspath .) -I$(abspath lib/generic) -I$(abspath contrib)
BUILD_CFLAGS := $(CFLAGS) -std=c99 -D_GNU_SOURCE -fPIC -Wtype-limits -Wall -I$(abspath .) -I$(abspath lib/generic) -I$(abspath contrib)
BUILD_CFLAGS += -DPACKAGE_VERSION="\"$(MAJOR).$(MINOR)\"" -DPREFIX="\"$(PREFIX)\"" -DMODULEDIR="\"$(MODULEDIR)\""
RM := rm -f
LN := ln -s
......
......@@ -23,17 +23,55 @@ For developers
The resolution process starts with the functions in :ref:`resolve.c <lib_api_rplan>`, they are responsible for:
* reacting to state machine state (i.e. calling consume layers if we have an answer ready)
* interacting with the user (i.e. asking caller for I/O, accepting queries)
* fetching assets needed by layers (i.e. zone cut, next best NS address or trust anchor)
* interacting with the library user (i.e. asking caller for I/O, accepting queries)
* fetching assets needed by layers (i.e. zone cut)
These we call as *driver*. The driver is not meant to know *"how"* the query is resolved, but rather *"when"* to execute *"what"*. Typically here you can modify or reorder the resolution plan, or request input from the caller.
This is the *driver*. The driver is not meant to know *"how"* the query resolves, but rather *"when"* to execute *"what"*.
.. image:: ../doc/resolution.png
:align: center
On the other side are *layers*. They are responsible for dissecting the packets and informing the driver about the results. For example, a produce layer can generate a sub-request, a consume layer can satisfy an outstanding query or simply log something. They also must not block, and may not be paused.
On the other side are *layers*. They are responsible for dissecting the packets and informing the driver about the results. For example, a *produce* layer generates query, a *consume* layer validates answer.
.. tip:: Layers are executed asynchronously by the driver. If you need some asset beforehand, you can signalize the driver using returning state or current query flags. For example, setting a flag ``QUERY_AWAIT_CUT`` forces driver to fetch zone cut information before the packet is consumed; setting a ``QUERY_RESOLVED`` flag makes it pop a query after the current set of layers is finished; returning ``FAIL`` state makes it fail current query. The important thing is, these actions happen **after** current set of layers is done.
.. tip:: Layers are executed asynchronously by the driver. If you need some asset beforehand, you can signalize the driver using returning state or current query flags. For example, setting a flag ``QUERY_AWAIT_CUT`` forces driver to fetch zone cut information before the packet is consumed; setting a ``QUERY_RESOLVED`` flag makes it pop a query after the current set of layers is finished; returning ``FAIL`` state makes it fail current query.
Layers can also change course of resolution, for example by appending additional queries.
.. code-block:: lua
consume = function (state, req, answer)
answer = kres.pkt_t(answer)
if answer:qtype() == kres.type.NS then
req = kres.request_t(req)
local qry = req:push(answer:qname(), kres.type.SOA, kres.class.IN)
qry.flags = kres.query.AWAIT_CUT
end
return state
end
This **doesn't** block currently processed query, and the newly created sub-request will start as soon as driver finishes processing current. In some cases you might need to issue sub-request and process it **before** continuing with the current, i.e. validator may need a DNSKEY before it can validate signatures. In this case, layers can yield and resume afterwards.
.. code-block:: lua
consume = function (state, req, answer)
answer = kres.pkt_t(answer)
if state == kres.YIELD then
print('continuing yielded layer')
return kres.DONE
else
if answer:qtype() == kres.type.NS then
req = kres.request_t(req)
local qry = req:push(answer:qname(), kres.type.SOA, kres.class.IN)
qry.flags = kres.query.AWAIT_CUT
print('planned SOA query, yielding')
return kres.YIELD
end
return state
end
end
The ``YIELD`` state is a bit special. When a layer returns it, it interrupts current walk through the layers. When the layer receives it,
it means that it yielded before and now it is resumed. This is useful in a situation where you need a sub-request to determine whether current answer is valid or not.
Writing layers
==============
......@@ -62,8 +100,6 @@ This structure contains pointers to resolution context, resolution plan and also
This is only passive processing of the incoming answer. If you want to change the course of resolution, say satisfy a query from a local cache before the library issues a query to the nameserver, you can use states (see the :ref:`Static hints <mod-hints>` for example).
.. warning:: Never replace or push new queries onto the resolution plan, this is a job of the resolution driver. Single pass through layers expects *current query* to be constant. You can however signalize driver with requests using query flags, like ``QUERY_RESOLVED`` to mark it as resolved.
.. code-block:: c
int produce(knot_layer_t *ctx, knot_pkt_t *pkt)
......
......@@ -18,7 +18,6 @@
#include <libknot/descriptor.h>
#include <libknot/rrtype/rdname.h>
#include <libknot/rrtype/rrsig.h>
#include "lib/layer/iterate.h"
#include "lib/resolve.h"
......@@ -255,24 +254,6 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_request
return state;
}
static const knot_dname_t *signature_authority(knot_pkt_t *pkt)
{
/* Can't find signer for RRSIGs, bail out. */
if (knot_pkt_qtype(pkt) == KNOT_RRTYPE_RRSIG) {
return NULL;
}
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_AUTHORITY; ++i) {
const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
for (unsigned k = 0; k < sec->count; ++k) {
const knot_rrset_t *rr = knot_pkt_rr(sec, k);
if (rr->type == KNOT_RRTYPE_RRSIG) {
return knot_rrsig_signer_name(&rr->rrs, 0);
}
}
}
return NULL;
}
static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
{
int result = KNOT_STATE_CONSUME;
......@@ -311,34 +292,8 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
}
}
/* Track difference between current TA and signer name.
* This indicates that the NS is auth for both parent-child, and we must update DS/DNSKEY to validate it.
* @todo: This has to be checked here before we put the data into packet, there is no "DEFER" or "PAUSE" action yet.
*/
const bool track_pc_change = (!(qry->flags & QUERY_CACHED) && (qry->flags & QUERY_DNSSEC_WANT));
const knot_dname_t *ta_name = qry->zone_cut.trust_anchor ? qry->zone_cut.trust_anchor->owner : NULL;
const knot_dname_t *signer = signature_authority(pkt);
if (track_pc_change && ta_name && signer && !knot_dname_is_equal(ta_name, signer)) {
DEBUG_MSG(">< cut changed, needs revalidation\n");
if (knot_dname_is_sub(signer, qry->zone_cut.name)) {
qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
} else if (!knot_dname_is_equal(signer, qry->zone_cut.name)) {
/* Key signer is above the current cut, so we can't validate it. This happens when
a server is authoritative for both grandparent, parent and child zone.
Ascend to parent cut, and refetch authority for signer. */
if (qry->zone_cut.parent) {
memcpy(&qry->zone_cut, qry->zone_cut.parent, sizeof(qry->zone_cut));
} else {
qry->flags |= QUERY_AWAIT_CUT;
}
qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
} /* else zone cut matches, but DS/DNSKEY doesn't => refetch. */
result = KNOT_STATE_YIELD;
}
/* CONSUME => Unhelpful referral.
* DONE => Zone cut updated.
* YIELD => Bail out. */
* DONE => Zone cut updated. */
return result;
}
......
......@@ -272,6 +272,24 @@ static int update_delegation(struct kr_request *req, struct kr_query *qry, knot_
return ret;
}
static const knot_dname_t *signature_authority(knot_pkt_t *pkt)
{
/* Can't find signer for RRSIGs, bail out. */
if (knot_pkt_qtype(pkt) == KNOT_RRTYPE_RRSIG) {
return NULL;
}
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_AUTHORITY; ++i) {
const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
for (unsigned k = 0; k < sec->count; ++k) {
const knot_rrset_t *rr = knot_pkt_rr(sec, k);
if (rr->type == KNOT_RRTYPE_RRSIG) {
return knot_rrsig_signer_name(&rr->rrs, 0);
}
}
}
return NULL;
}
static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
{
int ret = 0;
......@@ -294,6 +312,34 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
return KNOT_STATE_FAIL;
}
/* Track difference between current TA and signer name.
* This indicates that the NS is auth for both parent-child, and we must update DS/DNSKEY to validate it.
* @todo: This has to be checked here before we put the data into packet, there is no "DEFER" or "PAUSE" action yet.
*/
const bool track_pc_change = (!(qry->flags & QUERY_CACHED) && (qry->flags & QUERY_DNSSEC_WANT));
const knot_dname_t *ta_name = qry->zone_cut.trust_anchor ? qry->zone_cut.trust_anchor->owner : NULL;
const knot_dname_t *signer = signature_authority(pkt);
if (track_pc_change && ta_name && signer && !knot_dname_is_equal(ta_name, signer)) {
if (ctx->state == KNOT_STATE_YIELD) { /* Already yielded for revalidation. */
return KNOT_STATE_FAIL;
}
DEBUG_MSG(qry, ">< cut changed, needs revalidation\n");
if (knot_dname_is_sub(signer, qry->zone_cut.name)) {
qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
} else if (!knot_dname_is_equal(signer, qry->zone_cut.name)) {
/* Key signer is above the current cut, so we can't validate it. This happens when
a server is authoritative for both grandparent, parent and child zone.
Ascend to parent cut, and refetch authority for signer. */
if (qry->zone_cut.parent) {
memcpy(&qry->zone_cut, qry->zone_cut.parent, sizeof(qry->zone_cut));
} else {
qry->flags |= QUERY_AWAIT_CUT;
}
qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
} /* else zone cut matches, but DS/DNSKEY doesn't => refetch. */
return KNOT_STATE_YIELD;
}
/* Check if this is a DNSKEY answer, check trust chain and store. */
uint8_t pkt_rcode = knot_wire_get_rcode(pkt->wire);
uint16_t qtype = knot_pkt_qtype(pkt);
......@@ -368,7 +414,7 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
}
}
DEBUG_MSG(qry, "<= answer valid, OK\n");
return ctx->state;
return KNOT_STATE_DONE;
}
/** Module implementation. */
const knot_layer_api_t *validate_layer(struct kr_module *module)
......
......@@ -387,7 +387,9 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
}
/* Pop query if resolved. */
if (qry->flags & QUERY_RESOLVED) {
if (request->state == KNOT_STATE_YIELD) {
return KNOT_STATE_PRODUCE; /* Requery */
} else if (qry->flags & QUERY_RESOLVED) {
kr_rplan_pop(rplan, qry);
} else if (!tried_tcp && (qry->flags & QUERY_TCP)) {
return KNOT_STATE_PRODUCE; /* Requery over TCP */
......@@ -425,51 +427,13 @@ static struct kr_query *zone_cut_subreq(struct kr_rplan *rplan, struct kr_query
return next;
}
/** @internal Check current zone cut status and credibility, spawn subrequests if needed. */
static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot_pkt_t *packet)
/* @todo: Validator refactoring, keep this in driver for now. */
static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
{
struct kr_rplan *rplan = &request->rplan;
map_t *trust_anchors = &request->ctx->trust_anchors;
map_t *negative_anchors = &request->ctx->negative_anchors;
/* The query wasn't resolved from cache,
* now it's the time to look up closest zone cut from cache. */
if (qry->flags & QUERY_AWAIT_CUT) {
/* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */
if (!kr_ta_covers(negative_anchors, qry->zone_cut.name) &&
kr_ta_covers(trust_anchors, qry->zone_cut.name)) {
qry->flags |= QUERY_DNSSEC_WANT;
} else {
qry->flags &= ~QUERY_DNSSEC_WANT;
}
int ret = ns_fetch_cut(qry, request, (qry->flags & QUERY_DNSSEC_WANT));
if (ret != 0) {
/* No cached cut found, start from SBELT and issue priming query. */
if (ret == kr_error(ENOENT)) {
ret = kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
if (ret != 0) {
return KNOT_STATE_FAIL;
}
if (qry->sname[0] != '\0') {
DEBUG_MSG(qry, "=> root priming query\n");
zone_cut_subreq(rplan, qry, qry->zone_cut.name, KNOT_RRTYPE_NS);
} else {
DEBUG_MSG(qry, "=> using root hints\n");
}
qry->flags &= ~QUERY_AWAIT_CUT;
return KNOT_STATE_DONE;
} else {
return KNOT_STATE_FAIL;
}
}
/* Update minimized QNAME if zone cut changed */
if (qry->zone_cut.name[0] != '\0' && !(qry->flags & QUERY_NO_MINIMIZE)) {
if (kr_make_query(qry, packet) != 0) {
return KNOT_STATE_FAIL;
}
}
qry->flags &= ~QUERY_AWAIT_CUT;
}
/* Disable DNSSEC if it enters NTA. */
if (kr_ta_get(negative_anchors, qry->zone_cut.name)){
DEBUG_MSG(qry, ">< negative TA, going insecure\n");
......@@ -515,7 +479,57 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
return KNOT_STATE_DONE;
}
return KNOT_STATE_PRODUCE;
return KNOT_STATE_PRODUCE;
}
/** @internal Check current zone cut status and credibility, spawn subrequests if needed. */
static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot_pkt_t *packet)
{
struct kr_rplan *rplan = &request->rplan;
map_t *trust_anchors = &request->ctx->trust_anchors;
map_t *negative_anchors = &request->ctx->negative_anchors;
/* The query wasn't resolved from cache,
* now it's the time to look up closest zone cut from cache. */
if (qry->flags & QUERY_AWAIT_CUT) {
/* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */
if (!kr_ta_covers(negative_anchors, qry->zone_cut.name) &&
kr_ta_covers(trust_anchors, qry->zone_cut.name)) {
qry->flags |= QUERY_DNSSEC_WANT;
} else {
qry->flags &= ~QUERY_DNSSEC_WANT;
}
int ret = ns_fetch_cut(qry, request, (qry->flags & QUERY_DNSSEC_WANT));
if (ret != 0) {
/* No cached cut found, start from SBELT and issue priming query. */
if (ret == kr_error(ENOENT)) {
ret = kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
if (ret != 0) {
return KNOT_STATE_FAIL;
}
if (qry->sname[0] != '\0') {
DEBUG_MSG(qry, "=> root priming query\n");
zone_cut_subreq(rplan, qry, qry->zone_cut.name, KNOT_RRTYPE_NS);
} else {
DEBUG_MSG(qry, "=> using root hints\n");
}
qry->flags &= ~QUERY_AWAIT_CUT;
return KNOT_STATE_DONE;
} else {
return KNOT_STATE_FAIL;
}
}
/* Update minimized QNAME if zone cut changed */
if (qry->zone_cut.name[0] != '\0' && !(qry->flags & QUERY_NO_MINIMIZE)) {
if (kr_make_query(qry, packet) != 0) {
return KNOT_STATE_FAIL;
}
}
qry->flags &= ~QUERY_AWAIT_CUT;
}
/* Check trust chain */
return trust_chain_check(request, qry);
}
int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *type, knot_pkt_t *packet)
......@@ -529,7 +543,14 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
}
/* If we have deferred answers, resume them. */
struct kr_query *qry = TAIL(rplan->pending);
if (false && qry->deferred != NULL) { /** DISABLED until validator is fixed */
if (qry->deferred != NULL) {
/* @todo: Refactoring validator, check trust chain before resuming. */
switch(trust_chain_check(request, qry)) {
case KNOT_STATE_FAIL: return KNOT_STATE_FAIL;
case KNOT_STATE_DONE: return KNOT_STATE_PRODUCE;
default: break;
}
DEBUG_MSG(qry, "=> resuming yielded answer\n");
struct kr_layer_pickle *pickle = qry->deferred;
request->state = KNOT_STATE_YIELD;
RESUME_LAYERS(layer_id(request, pickle->api), request, qry, consume, pickle->pkt);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment