Commit b7213a6f authored by Vladimír Čunát's avatar Vladimír Čunát

qflags: WIP refactor - hand-replace complex cases

parent 57d48522
......@@ -387,7 +387,7 @@ static int qr_task_start(struct qr_task *task, knot_pkt_t *query)
worker->stats.queries += 1;
/* Throttle outbound queries only when high pressure */
if (worker->stats.concurrent < QUERY_RATE_THRESHOLD) {
task->req.options |= QUERY_NO_THROTTLE;
task->req.options.NO_THROTTLE = true;
}
return 0;
}
......
......@@ -33,7 +33,7 @@ This is the *driver*. The driver is not meant to know *"how"* the query resolves
On the other side are *layers*. They are responsible for dissecting the packets and informing the driver about the results. For example, a *produce* layer generates query, a *consume* layer validates answer.
.. tip:: Layers are executed asynchronously by the driver. If you need some asset beforehand, you can signalize the driver using returning state or current query flags. For example, setting a flag ``QUERY_AWAIT_CUT`` forces driver to fetch zone cut information before the packet is consumed; setting a ``QUERY_RESOLVED`` flag makes it pop a query after the current set of layers is finished; returning ``FAIL`` state makes it fail current query.
.. tip:: Layers are executed asynchronously by the driver. If you need some asset beforehand, you can signalize the driver using returning state or current query flags. For example, setting a flag ``AWAIT_CUT`` forces driver to fetch zone cut information before the packet is consumed; setting a ``RESOLVED`` flag makes it pop a query after the current set of layers is finished; returning ``FAIL`` state makes it fail current query.
Layers can also change course of resolution, for example by appending additional queries.
......
......@@ -54,7 +54,7 @@ static const knot_dname_t *minimized_qname(struct kr_query *query, uint16_t *qty
{
/* Minimization disabled. */
const knot_dname_t *qname = query->sname;
if (qname[0] == '\0' || query->flags & (QUERY_NO_MINIMIZE|QUERY_STUB)) {
if (qname[0] == '\0' || query->flags.NO_MINIMIZE || query->flags.STUB) {
return qname;
}
......@@ -893,7 +893,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
}
}
if (query->flags & (QUERY_RESOLVED|QUERY_BADCOOKIE_AGAIN)) {
if (query->flags.RESOLVED || query->flags.BADCOOKIE_AGAIN) {
return ctx->state;
}
......@@ -939,7 +939,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
break; /* OK */
case KNOT_RCODE_REFUSED:
case KNOT_RCODE_SERVFAIL: {
if (query->flags & (QUERY_STUB | QUERY_FORWARD)) {
if (query->flags.STUB || query->flags.FORWARD) {
/* Pass through in stub mode */
break;
}
......
......@@ -17,9 +17,9 @@
/** @file pktcache.c
*
* This builtin module caches whole packets from/for negative answers
* or answers where wildcard expansion has occured (QUERY_DNSSEC_WEXPAND).
* or answers where wildcard expansion has occured (.DNSSEC_WEXPAND).
*
* Note: it also persists some QUERY_DNSSEC_* flags.
* Note: it also persists some DNSSEC_* flags.
* The ranks are stored in *(uint8_t *)rrset->additional (all are the same for one packet).
*/
......@@ -160,7 +160,8 @@ static int pktcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
uint8_t flags = 0;
int ret = loot_pktcache(req->ctx, pkt, req, &flags);
if (ret == 0) {
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
qry->flags.CACHED = true;
qry->flags.NO_MINIMIZE = true;
if (flags & KR_CACHE_FLAG_WCARD_PROOF) {
qry->flags.DNSSEC_WEXPAND = true;
}
......@@ -231,8 +232,7 @@ static int pktcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
const bool is_eligible = (knot_rrtype_is_metatype(qtype) || qtype == KNOT_RRTYPE_RRSIG);
bool is_negative = kr_response_classify(pkt) & (PKT_NODATA|PKT_NXDOMAIN);
bool wcard_expansion = (qry->flags.DNSSEC_WEXPAND);
if (is_negative && ((qry->flags & (QUERY_FORWARD | QUERY_CNAME)) ==
(QUERY_FORWARD | QUERY_CNAME))) {
if (is_negative && qry->flags.FORWARD && qry->flags.CNAME) {
/* Don't cache CNAME'ed NXDOMAIN answer in forwarding mode
since it can contain records
which have not been validated by validator */
......
......@@ -20,7 +20,7 @@
*
* Produce phase: if an RRset answering the query exists, the packet is filled
* by it, including the corresponding RRSIGs (subject to some conditions).
* Such a packet is recognizable: pkt->size == PKT_SIZE_NOWIRE, and QUERY_CACHED
* Such a packet is recognizable: pkt->size == PKT_SIZE_NOWIRE, and flags.CACHED
* is set in the query. The ranks are stored in *(uint8_t *)rrset->additional.
*
* TODO
......@@ -151,9 +151,9 @@ static int loot_rrcache(struct kr_request *req, knot_pkt_t *pkt,
struct kr_query *qry, uint16_t rrtype)
{
const bool allow_unverified = knot_wire_get_cd(req->answer->wire)
|| qry->flags & QUERY_STUB;
|| qry->flags.STUB;
/* Lookup direct match first; only consider authoritative records.
* TODO: move rank handling into the iterator (QUERY_DNSSEC_* flags)? */
* TODO: move rank handling into the iterator (DNSSEC_* flags)? */
uint8_t rank = 0;
uint8_t flags = 0;
uint8_t lowest_rank = KR_RANK_INITIAL | KR_RANK_AUTH;
......@@ -254,7 +254,8 @@ static int rrcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
}
if (ret == 0) {
VERBOSE_MSG(qry, "=> satisfied from cache\n");
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
qry->flags.CACHED = true;
qry->flags.NO_MINIMIZE = true;
pkt->parsed = pkt->size = PKT_SIZE_NOWIRE;
knot_wire_set_qr(pkt->wire);
knot_wire_set_aa(pkt->wire);
......
......@@ -277,8 +277,7 @@ static void mark_insecure_parents(const struct kr_query *qry)
* NS can be located at unsigned zone, but still will return
* valid DNSSEC records for initial query. */
struct kr_query *parent = qry->parent;
const uint32_t cut_flags = (QUERY_AWAIT_IPV4 | QUERY_AWAIT_IPV6);
while (parent && ((parent->flags & cut_flags) == 0)) {
while (parent && !parent->flags.AWAIT_IPV4 && !parent->flags.AWAIT_IPV6) {
parent->flags.DNSSEC_WANT = false;
parent->flags.DNSSEC_INSECURE = true;
if (parent->stype != KNOT_RRTYPE_DS &&
......@@ -304,9 +303,9 @@ static int update_parent_keys(struct kr_request *req, uint16_t answer_type)
break;
case KNOT_RRTYPE_DS:
VERBOSE_MSG(qry, "<= parent: updating DS\n");
if (qry->flags & (QUERY_DNSSEC_INSECURE)) { /* DS non-existence proven. */
if (qry->flags.DNSSEC_INSECURE) { /* DS non-existence proven. */
mark_insecure_parents(qry);
} else if ((qry->flags & (QUERY_DNSSEC_NODS | QUERY_FORWARD)) == QUERY_DNSSEC_NODS) {
} else if (qry->flags.DNSSEC_NODS && !qry->flags.FORWARD) {
if (qry->flags.DNSSEC_OPTOUT) {
mark_insecure_parents(qry);
} else {
......@@ -316,8 +315,7 @@ static int update_parent_keys(struct kr_request *req, uint16_t answer_type)
mark_insecure_parents(qry);
}
}
} else if ((qry->flags & (QUERY_DNSSEC_NODS | QUERY_FORWARD)) ==
(QUERY_DNSSEC_NODS | QUERY_FORWARD)) {
} else if (qry->flags.DNSSEC_NODS && qry->flags.FORWARD) {
int ret = kr_dnssec_matches_name_and_type(&req->auth_selected, qry->uid,
qry->sname, KNOT_RRTYPE_NS);
if (ret == kr_ok()) {
......@@ -404,7 +402,7 @@ static int update_delegation(struct kr_request *req, struct kr_query *qry, knot_
qry->flags.DNSSEC_NODS = true;
}
return ret;
} else if (qry->flags & QUERY_FORWARD && qry->parent) {
} else if (qry->flags.FORWARD && qry->parent) {
struct kr_query *parent = qry->parent;
parent->zone_cut.name = knot_dname_copy(qry->sname, parent->zone_cut.pool);
}
......@@ -813,8 +811,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
}
if (!(qry->flags.DNSSEC_WANT)) {
const uint32_t test_flags = (QUERY_CACHED | QUERY_DNSSEC_INSECURE);
const bool is_insec = ((qry->flags & test_flags) == test_flags);
const bool is_insec = qry->flags.CACHED && qry->flags.DNSSEC_INSECURE;
if ((qry->flags.DNSSEC_INSECURE)) {
rank_records(ctx, KR_RANK_INSECURE);
}
......@@ -876,8 +873,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
if (ret != KR_STATE_DONE) {
return ret;
}
if ((qry->flags & (QUERY_FORWARD | QUERY_DNSSEC_INSECURE)) ==
(QUERY_FORWARD | QUERY_DNSSEC_INSECURE)) {
if (qry->flags.FORWARD && qry->flags.DNSSEC_INSECURE) {
return KR_STATE_DONE;
}
}
......@@ -903,8 +899,8 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
}
/* Validate non-existence proof if not positive answer. */
if (!(qry->flags.CACHED) && pkt_rcode == KNOT_RCODE_NXDOMAIN &&
((qry->flags & (QUERY_FORWARD | QUERY_CNAME)) != (QUERY_FORWARD | QUERY_CNAME))) {
if (!qry->flags.CACHED && pkt_rcode == KNOT_RCODE_NXDOMAIN &&
(!qry->flags.FORWARD || !qry->flags.CNAME)) {
/* @todo If knot_pkt_qname(pkt) is used instead of qry->sname then the tests crash. */
if (!has_nsec3) {
ret = kr_nsec_name_error_response_check(pkt, KNOT_AUTHORITY, qry->sname);
......@@ -928,8 +924,8 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
/* @todo WTH, this needs API that just tries to find a proof and the caller
* doesn't have to worry about NSEC/NSEC3
* @todo rework this */
if (!(qry->flags.CACHED) && (pkt_rcode == KNOT_RCODE_NOERROR) &&
((qry->flags & (QUERY_FORWARD | QUERY_CNAME)) != (QUERY_FORWARD | QUERY_CNAME))) {
if (!qry->flags.CACHED && (pkt_rcode == KNOT_RCODE_NOERROR) &&
(!qry->flags.FORWARD || !qry->flags.CNAME)) {
bool no_data = (an->count == 0 && knot_wire_get_aa(pkt->wire));
if (no_data) {
/* @todo
......@@ -1006,7 +1002,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
return KR_STATE_FAIL;
} else if (pkt_rcode == KNOT_RCODE_NOERROR &&
referral &&
(((qry->flags & (QUERY_DNSSEC_WANT | QUERY_DNSSEC_INSECURE)) == QUERY_DNSSEC_INSECURE) ||
((!qry->flags.DNSSEC_WANT && qry->flags.DNSSEC_INSECURE) ||
(qry->flags.DNSSEC_NODS))) {
/* referral with proven DS non-existance */
qtype = KNOT_RRTYPE_DS;
......@@ -1018,7 +1014,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
}
}
if (qry->flags & QUERY_FORWARD && qry->parent) {
if (qry->flags.FORWARD && qry->parent) {
if (pkt_rcode == KNOT_RCODE_NXDOMAIN) {
qry->parent->forward_flags.NO_MINIMIZE = true;
}
......
......@@ -200,11 +200,13 @@ static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name
{
/* It can occur that here parent query already have
* provably insecured zonecut which not in the cache yet. */
const uint32_t insec_flags = QUERY_DNSSEC_INSECURE | QUERY_DNSSEC_NODS;
const uint32_t cut_flags = QUERY_AWAIT_IPV4 | QUERY_AWAIT_IPV6;
const bool is_insecured = ((qry->parent != NULL) &&
(qry->parent->flags & cut_flags) == 0 &&
(qry->parent->flags & insec_flags) != 0);
struct kr_qflags pflags;
if (qry->parent) {
pflags = qry->parent->flags;
}
const bool is_insecured = qry->parent != NULL
&& !(pflags.AWAIT_IPV4 || pflags.AWAIT_IPV6)
&& (pflags.DNSSEC_INSECURE || pflags.DNSSEC_NODS);
/* Want DNSSEC if it's possible to secure this name
* (e.g. is covered by any TA) */
......@@ -642,8 +644,8 @@ static int answer_finalize(struct kr_request *request, int state)
* as those would also be PKT_NOERROR. */
|| (answ_all_cnames && knot_pkt_qtype(answer) != KNOT_RRTYPE_CNAME))
{
secure = secure && (last->flags.DNSSEC_WANT)
&& !(last->flags & (QUERY_DNSSEC_BOGUS | QUERY_DNSSEC_INSECURE));
secure = secure && last->flags.DNSSEC_WANT
&& !last->flags.DNSSEC_BOGUS && !last->flags.DNSSEC_INSECURE;
}
}
/* Clear AD if not secure. ATM answer has AD=1 if requested secured answer. */
......@@ -841,7 +843,8 @@ static void update_nslist_score(struct kr_request *request, struct kr_query *qry
/* Do not complete NS address resolution on soft-fail. */
const int rcode = packet ? knot_wire_get_rcode(packet->wire) : 0;
if (rcode != KNOT_RCODE_SERVFAIL && rcode != KNOT_RCODE_REFUSED) {
qry->flags &= ~(QUERY_AWAIT_IPV6|QUERY_AWAIT_IPV4);
qry->flags.AWAIT_IPV6 = false;
qry->flags.AWAIT_IPV4 = false;
} else { /* Penalize SERVFAILs. */
kr_nsrep_update_rtt(&qry->ns, src, KR_NS_PENALTY, ctx->cache_rtt, KR_NS_ADD);
}
......@@ -917,7 +920,8 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
} else if (!tried_tcp && (qry->flags.TCP)) {
return KR_STATE_PRODUCE; /* Requery over TCP */
} else { /* Clear query flags for next attempt */
qry->flags &= ~(QUERY_CACHED|QUERY_TCP);
qry->flags.CACHED = false;
qry->flags.TCP = false;
}
ITERATE_LAYERS(request, qry, reset);
......@@ -1197,7 +1201,8 @@ static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
if (!next) {
return KR_STATE_FAIL;
}
next->flags |= (QUERY_AWAIT_CUT | QUERY_DNSSEC_WANT);
next->flags.AWAIT_CUT = true;
next->flags.DNSSEC_WANT = true;
return KR_STATE_DONE;
}
/* Try to fetch missing DNSKEY (either missing or above current cut).
......@@ -1345,7 +1350,7 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
case KR_STATE_CONSUME: break;
case KR_STATE_DONE:
default: /* Current query is done */
if (qry->flags & QUERY_RESOLVED && request->state != KR_STATE_YIELD) {
if (qry->flags.RESOLVED && request->state != KR_STATE_YIELD) {
kr_rplan_pop(rplan, qry);
}
ITERATE_LAYERS(request, qry, reset);
......@@ -1379,10 +1384,11 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
return KR_STATE_FAIL;
}
const bool retry = (qry->flags & (QUERY_TCP|QUERY_BADCOOKIE_AGAIN));
if (qry->flags & (QUERY_AWAIT_IPV4|QUERY_AWAIT_IPV6)) {
const struct kr_qflags qflg = qry->flags;
const bool retry = qflg.TCP || qflg.BADCOOKIE_AGAIN;
if (qflg.AWAIT_IPV4 || qflg.AWAIT_IPV6) {
kr_nsrep_elect_addr(qry, request->ctx);
} else if (qry->flags & (QUERY_FORWARD|QUERY_STUB)) {
} else if (qflg.FORWARD || qflg.STUB) {
kr_nsrep_sort(&qry->ns, request->ctx->cache_rtt);
} else if (!qry->ns.name || !retry) { /* Keep NS when requerying/stub/badcookie. */
/* Root DNSKEY must be fetched from the hints to avoid chicken and egg problem. */
......@@ -1403,7 +1409,9 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
if (qry->ns.addr[0].ip.sa_family == AF_UNSPEC) {
int ret = ns_resolve_addr(qry, request);
if (ret != 0) {
qry->flags &= ~(QUERY_AWAIT_IPV6|QUERY_AWAIT_IPV4|QUERY_TCP);
qry->flags.AWAIT_IPV6 = false;
qry->flags.AWAIT_IPV4 = false;
qry->flags.TCP = false;
qry->ns.name = NULL;
goto ns_election; /* Must try different NS */
}
......@@ -1412,7 +1420,7 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
}
/* Randomize query case (if not in safemode or turned off) */
qry->secret = (qry->flags & (QUERY_SAFEMODE | QUERY_NO_0X20))
qry->secret = (qry->flags.SAFEMODE || qry->flags.NO_0X20)
? 0 : kr_rand_uint(UINT32_MAX);
knot_dname_t *qname_raw = (knot_dname_t *)knot_pkt_qname(packet);
randomized_qname_case(qname_raw, qry->secret);
......
......@@ -135,12 +135,12 @@ static struct kr_query *kr_rplan_push_query(struct kr_rplan *rplan,
qry->ns.addr[0].ip.sa_family = AF_UNSPEC;
gettimeofday(&qry->timestamp, NULL);
kr_zonecut_init(&qry->zone_cut, (const uint8_t *)"", rplan->pool);
qry->reorder = qry->flags & QUERY_REORDER_RR
qry->reorder = qry->flags.REORDER_RR
? knot_wire_get_id(rplan->request->answer->wire)
: 0;
/* When forwarding, keep the nameserver addresses. */
if (parent && (parent->flags & qry->flags.FORWARD)) {
if (parent && parent->flags.FORWARD && qry->flags.FORWARD) {
ret = kr_nsrep_copy_set(&qry->ns, &parent->ns);
if (ret) {
query_free(rplan->pool, qry);
......
......@@ -153,7 +153,8 @@ static int query(kr_layer_t *ctx, knot_pkt_t *pkt)
VERBOSE_MSG(qry, "<= answered from hints\n");
qry->flags.DNSSEC_WANT = false; /* Never authenticated */
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
qry->flags.CACHED = true;
qry->flags.NO_MINIMIZE = true;
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
return KR_STATE_DONE;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment