Commit 57d48522 authored by Vladimír Čunát's avatar Vladimír Čunát

qflags: WIP refactor - regex replacements

sed -e 's/flags |= QUERY_\([A-Z0-9_]*\);/flags.\1 = true;/g' \
        -e 's/flags &= ~QUERY_\([A-Z0-9_]*\);/flags.\1 = false;/g' \
        -e 's/\(flags\|options\|opts\) & QUERY_\([A-Z0-9_]*\)\()\| ||\)/\1.\2\3/g' \
        -i $(git grep -l QUERY_)
parent 81d0d6c4
......@@ -113,7 +113,7 @@ This is only passive processing of the incoming answer. If you want to change th
if (can_satisfy(qry)) {
/* This flag makes the resolver move the query
* to the "resolved" list. */
qry->flags |= QUERY_RESOLVED;
qry->flags.RESOLVED = true;
return KR_STATE_DONE;
}
......
......@@ -161,7 +161,7 @@ static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
knot_dname_to_str(name_str, rr->owner, sizeof(name_str));
inet_ntop(af, addr, addr_str, sizeof(addr_str));
}
if (!(query->flags & QUERY_ALLOW_LOCAL) &&
if (!(query->flags.ALLOW_LOCAL) &&
!is_valid_addr(addr, addr_len)) {
QVERBOSE_MSG(query, "<= ignoring invalid glue for "
"'%s': '%s'\n", name_str, addr_str);
......@@ -192,11 +192,11 @@ static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_reques
continue;
}
if ((rr->type == KNOT_RRTYPE_A) &&
(req->ctx->options & QUERY_NO_IPV4)) {
(req->ctx->options.NO_IPV4)) {
continue;
}
if ((rr->type == KNOT_RRTYPE_AAAA) &&
(req->ctx->options & QUERY_NO_IPV6)) {
(req->ctx->options.NO_IPV6)) {
continue;
}
(void) update_nsaddr(rr, req->current_query);
......@@ -275,9 +275,9 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr,
}
kr_zonecut_add(cut, ns_name, NULL);
/* Choose when to use glue records. */
if (qry->flags & QUERY_PERMISSIVE) {
if (qry->flags.PERMISSIVE) {
fetch_glue(pkt, ns_name, req);
} else if (qry->flags & QUERY_STRICT) {
} else if (qry->flags.STRICT) {
/* Strict mode uses only mandatory glue. */
if (knot_dname_in(cut->name, ns_name))
fetch_glue(pkt, ns_name, req);
......@@ -299,7 +299,7 @@ static uint8_t get_initial_rank(const knot_rrset_t *rr, const struct kr_query *q
/* For RRSIGs, ensure the KR_RANK_AUTH flag corresponds to the signed RR. */
uint16_t type = kr_rrset_type_maysig(rr);
if (qry->flags & QUERY_CACHED) {
if (qry->flags.CACHED) {
return rr->additional ? *(uint8_t *)rr->additional : KR_RANK_OMIT;
/* ^^ Current use case for "cached" RRs without rank: hints module. */
}
......@@ -357,10 +357,10 @@ static int pick_authority(knot_pkt_t *pkt, struct kr_request *req, bool to_wire)
static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
{
struct kr_query *qry = req->current_query;
assert(!(qry->flags & QUERY_STUB));
assert(!(qry->flags.STUB));
int result = KR_STATE_CONSUME;
if (qry->flags & QUERY_FORWARD) {
if (qry->flags.FORWARD) {
return result;
}
......@@ -410,7 +410,7 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
}
if ((qry->flags & QUERY_DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
if ((qry->flags.DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
if (knot_wire_get_aa(pkt->wire) == 0 &&
knot_wire_get_ancount(pkt->wire) == 0 &&
ns_record_exists) {
......@@ -435,7 +435,7 @@ static void finalize_answer(knot_pkt_t *pkt, struct kr_query *qry, struct kr_req
static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral, const knot_dname_t **cname_ret)
{
struct kr_query *query = req->current_query;
assert(!(query->flags & QUERY_STUB));
assert(!(query->flags.STUB));
/* Process answer type */
const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
const knot_dname_t *cname = NULL;
......@@ -443,7 +443,7 @@ static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral,
unsigned cname_chain_len = 0;
bool is_final = (query->parent == NULL);
uint32_t iter_count = 0;
bool strict_mode = (query->flags & QUERY_STRICT);
bool strict_mode = (query->flags.STRICT);
do {
/* CNAME was found at previous iteration, but records may not follow the correct order.
* Try to find records for pending_cname owner from section start. */
......@@ -468,7 +468,7 @@ static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral,
return KR_STATE_FAIL;
}
if (rrsig_labels < cname_labels) {
query->flags |= QUERY_DNSSEC_WEXPAND;
query->flags.DNSSEC_WEXPAND = true;
}
}
......@@ -546,7 +546,7 @@ static int process_referral_answer(knot_pkt_t *pkt, struct kr_request *req)
return KR_STATE_FAIL;
}
struct kr_query *query = req->current_query;
if (!(query->flags & QUERY_CACHED)) {
if (!(query->flags.CACHED)) {
/* If not cached (i.e. got from upstream)
* make sure that this is not an authoritative answer
* (even with AA=1) for other layers.
......@@ -613,13 +613,13 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
if (!knot_dname_is_equal(knot_pkt_qname(pkt), query->sname) &&
(pkt_class & (PKT_NOERROR|PKT_NXDOMAIN|PKT_REFUSED|PKT_NODATA))) {
VERBOSE_MSG("<= found cut, retrying with non-minimized name\n");
query->flags |= QUERY_NO_MINIMIZE;
query->flags.NO_MINIMIZE = true;
return KR_STATE_CONSUME;
}
/* This answer didn't improve resolution chain, therefore must be authoritative (relaxed to negative). */
if (!is_authoritative(pkt, query)) {
if (!(query->flags & QUERY_FORWARD) &&
if (!(query->flags.FORWARD) &&
pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) {
VERBOSE_MSG("<= lame response: non-auth sent negative response\n");
return KR_STATE_FAIL;
......@@ -635,17 +635,17 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
/* Make sure that this is an authoritative answer (even with AA=0) for other layers */
knot_wire_set_aa(pkt->wire);
/* Either way it resolves current query. */
query->flags |= QUERY_RESOLVED;
query->flags.RESOLVED = true;
/* Follow canonical name as next SNAME. */
if (!knot_dname_is_equal(cname, query->sname)) {
/* Check if target record has been already copied */
query->flags |= QUERY_CNAME;
query->flags.CNAME = true;
if (is_final) {
state = process_final(pkt, req, cname);
if (state != kr_ok()) {
return state;
}
} else if ((query->flags & QUERY_FORWARD) &&
} else if ((query->flags.FORWARD) &&
((query->stype == KNOT_RRTYPE_DS) ||
(query->stype == KNOT_RRTYPE_NS))) {
/* CNAME'ed answer for DS or NS subquery.
......@@ -667,9 +667,9 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
if (!next) {
return KR_STATE_FAIL;
}
next->flags |= QUERY_AWAIT_CUT;
if (query->flags & QUERY_FORWARD) {
next->forward_flags |= QUERY_CNAME;
next->flags.AWAIT_CUT = true;
if (query->flags.FORWARD) {
next->forward_flags.CNAME = true;
if (query->parent == NULL) {
state = kr_nsrep_copy_set(&next->ns, &query->ns);
if (state != kr_ok()) {
......@@ -681,12 +681,12 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
/* Want DNSSEC if and only if it's posible to secure
* this name (i.e. iff it is covered by a TA) */
if (kr_ta_covers_qry(req->ctx, cname, query->stype)) {
next->flags |= QUERY_DNSSEC_WANT;
next->flags.DNSSEC_WANT = true;
} else {
next->flags &= ~QUERY_DNSSEC_WANT;
next->flags.DNSSEC_WANT = false;
}
if (!(query->flags & QUERY_FORWARD) ||
(query->flags & QUERY_DNSSEC_WEXPAND)) {
if (!(query->flags.FORWARD) ||
(query->flags.DNSSEC_WEXPAND)) {
state = pick_authority(pkt, req, false);
if (state != kr_ok()) {
return KR_STATE_FAIL;
......@@ -737,7 +737,7 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
static int process_stub(knot_pkt_t *pkt, struct kr_request *req)
{
struct kr_query *query = req->current_query;
assert(query->flags & QUERY_STUB);
assert(query->flags.STUB);
/* Pick all answer RRs. */
const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
for (unsigned i = 0; i < an->count; ++i) {
......@@ -753,7 +753,7 @@ static int process_stub(knot_pkt_t *pkt, struct kr_request *req)
}
knot_wire_set_aa(pkt->wire);
query->flags |= QUERY_RESOLVED;
query->flags.RESOLVED = true;
/* Pick authority RRs. */
int pkt_class = kr_response_classify(pkt);
const bool to_wire = ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) != 0);
......@@ -859,13 +859,13 @@ static int resolve_badmsg(knot_pkt_t *pkt, struct kr_request *req, struct kr_que
#ifndef STRICT_MODE
/* Work around broken auths/load balancers */
if (query->flags & QUERY_SAFEMODE) {
if (query->flags.SAFEMODE) {
return resolve_error(pkt, req);
} else if (query->flags & QUERY_NO_MINIMIZE) {
query->flags |= QUERY_SAFEMODE;
} else if (query->flags.NO_MINIMIZE) {
query->flags.SAFEMODE = true;
return KR_STATE_DONE;
} else {
query->flags |= QUERY_NO_MINIMIZE;
query->flags.NO_MINIMIZE = true;
return KR_STATE_DONE;
}
#else
......@@ -887,7 +887,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
}
WITH_VERBOSE {
if (query->flags & QUERY_TRACE) {
if (query->flags.TRACE) {
VERBOSE_MSG("<= answer received:\n");
kr_pkt_print(pkt);
}
......@@ -913,17 +913,17 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
VERBOSE_MSG("<= ignoring mismatching response\n");
/* Force TCP, to work around authoritatives messing up question
* without yielding to spoofed responses. */
query->flags |= QUERY_TCP;
query->flags.TCP = true;
return resolve_badmsg(pkt, req, query);
} else if (knot_wire_get_tc(pkt->wire)) {
VERBOSE_MSG("<= truncated response, failover to TCP\n");
if (query) {
/* Fail if already on TCP. */
if (query->flags & QUERY_TCP) {
if (query->flags.TCP) {
VERBOSE_MSG("<= TC=1 with TCP, bailing out\n");
return resolve_error(pkt, req);
}
query->flags |= QUERY_TCP;
query->flags.TCP = true;
}
return KR_STATE_CONSUME;
}
......@@ -949,7 +949,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
query->fails = 0; /* Reset per-query counter. */
return resolve_error(pkt, req);
} else {
query->flags |= QUERY_NO_MINIMIZE; /* Drop minimisation as a safe-guard. */
query->flags.NO_MINIMIZE = true; /* Drop minimisation as a safe-guard. */
return KR_STATE_CONSUME;
}
}
......@@ -963,7 +963,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
}
/* Forwarding/stub mode is special. */
if (query->flags & QUERY_STUB) {
if (query->flags.STUB) {
return process_stub(pkt, req);
}
......
......@@ -78,7 +78,7 @@ static int loot_pktcache(struct kr_context *ctx, knot_pkt_t *pkt,
uint8_t lowest_rank = KR_RANK_INITIAL | KR_RANK_AUTH;
/* There's probably little sense for NONAUTH in pktcache. */
if (!knot_wire_get_cd(req->answer->wire) && !(qry->flags & QUERY_STUB)) {
if (!knot_wire_get_cd(req->answer->wire) && !(qry->flags.STUB)) {
/* Records not present under any TA don't have their security verified at all. */
bool ta_covers = kr_ta_covers_qry(ctx, qry->sname, qry->stype);
/* ^ TODO: performance? */
......@@ -108,8 +108,8 @@ static int loot_pktcache(struct kr_context *ctx, knot_pkt_t *pkt,
/* Rank-related fixups. Add rank into the additional field. */
if (kr_rank_test(entry->rank, KR_RANK_INSECURE)) {
qry->flags |= QUERY_DNSSEC_INSECURE;
qry->flags &= ~QUERY_DNSSEC_WANT;
qry->flags.DNSSEC_INSECURE = true;
qry->flags.DNSSEC_WANT = false;
}
for (size_t i = 0; i < pkt->rrset_count; ++i) {
assert(!pkt->rr[i].additional);
......@@ -143,14 +143,14 @@ static int pktcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) ||
(qry->flags & QUERY_NO_CACHE)) {
(qry->flags.NO_CACHE)) {
return ctx->state; /* Already resolved/failed */
}
/* Both caches only peek for qry->sname and that would be useless
* to repeat on every iteration, so disable it from now on.
* Note: it's important to skip this if rrcache sets KR_STATE_DONE,
* as CNAME chains need more iterations to get fetched. */
qry->flags |= QUERY_NO_CACHE;
qry->flags.NO_CACHE = true;
if (knot_pkt_qclass(pkt) != KNOT_CLASS_IN) {
return ctx->state; /* Only IN class */
......@@ -162,10 +162,10 @@ static int pktcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
if (ret == 0) {
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
if (flags & KR_CACHE_FLAG_WCARD_PROOF) {
qry->flags |= QUERY_DNSSEC_WEXPAND;
qry->flags.DNSSEC_WEXPAND = true;
}
if (flags & KR_CACHE_FLAG_OPTOUT) {
qry->flags |= QUERY_DNSSEC_OPTOUT;
qry->flags.DNSSEC_OPTOUT = true;
}
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
......@@ -219,7 +219,7 @@ static int pktcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
struct kr_query *qry = req->current_query;
/* Cache only answers that make query resolved (i.e. authoritative)
* that didn't fail during processing and are negative. */
if (qry->flags & QUERY_CACHED || ctx->state & KR_STATE_FAIL) {
if (qry->flags.CACHED || ctx->state & KR_STATE_FAIL) {
return ctx->state; /* Don't cache anything if failed. */
}
/* Cache only authoritative answers from IN class. */
......@@ -230,7 +230,7 @@ static int pktcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
const uint16_t qtype = knot_pkt_qtype(pkt);
const bool is_eligible = (knot_rrtype_is_metatype(qtype) || qtype == KNOT_RRTYPE_RRSIG);
bool is_negative = kr_response_classify(pkt) & (PKT_NODATA|PKT_NXDOMAIN);
bool wcard_expansion = (qry->flags & QUERY_DNSSEC_WEXPAND);
bool wcard_expansion = (qry->flags.DNSSEC_WEXPAND);
if (is_negative && ((qry->flags & (QUERY_FORWARD | QUERY_CNAME)) ==
(QUERY_FORWARD | QUERY_CNAME))) {
/* Don't cache CNAME'ed NXDOMAIN answer in forwarding mode
......@@ -261,24 +261,24 @@ static int pktcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
/* If cd bit is set or we got answer via non-validated forwarding,
* make the rank bad; otherwise it depends on flags. */
if (knot_wire_get_cd(req->answer->wire) || qry->flags & QUERY_STUB) {
if (knot_wire_get_cd(req->answer->wire) || qry->flags.STUB) {
kr_rank_set(&header.rank, KR_RANK_OMIT);
} else {
if (qry->flags & QUERY_DNSSEC_BOGUS) {
if (qry->flags.DNSSEC_BOGUS) {
kr_rank_set(&header.rank, KR_RANK_BOGUS);
} else if (qry->flags & QUERY_DNSSEC_INSECURE) {
} else if (qry->flags.DNSSEC_INSECURE) {
kr_rank_set(&header.rank, KR_RANK_INSECURE);
} else if (qry->flags & QUERY_DNSSEC_WANT) {
} else if (qry->flags.DNSSEC_WANT) {
kr_rank_set(&header.rank, KR_RANK_SECURE);
}
}
VERBOSE_MSG(qry, "=> candidate rank: 0%0.2o\n", header.rank);
/* Set cache flags */
if (qry->flags & QUERY_DNSSEC_WEXPAND) {
if (qry->flags.DNSSEC_WEXPAND) {
header.flags |= KR_CACHE_FLAG_WCARD_PROOF;
}
if (qry->flags & QUERY_DNSSEC_OPTOUT) {
if (qry->flags.DNSSEC_OPTOUT) {
header.flags |= KR_CACHE_FLAG_OPTOUT;
}
......
......@@ -89,13 +89,13 @@ static int loot_rr(struct kr_cache *cache, knot_pkt_t *pkt, const knot_dname_t *
}
if (is_expiring(&cache_rr, drift)) {
qry->flags |= QUERY_EXPIRING;
qry->flags.EXPIRING = true;
}
if ((*flags) & KR_CACHE_FLAG_WCARD_PROOF) {
/* Record was found, but wildcard answer proof is needed.
* Do not update packet, try to fetch whole packet from pktcache instead. */
qry->flags |= QUERY_DNSSEC_WEXPAND;
qry->flags.DNSSEC_WEXPAND = true;
return kr_error(ENOENT);
}
......@@ -157,7 +157,7 @@ static int loot_rrcache(struct kr_request *req, knot_pkt_t *pkt,
uint8_t rank = 0;
uint8_t flags = 0;
uint8_t lowest_rank = KR_RANK_INITIAL | KR_RANK_AUTH;
if (qry->flags & QUERY_NONAUTH) {
if (qry->flags.NONAUTH) {
lowest_rank = KR_RANK_INITIAL;
/* Note: there's little sense in validation status for non-auth records.
* In case of using NONAUTH to get NS IPs, knowing that you ask correct
......@@ -179,7 +179,7 @@ static int loot_rrcache(struct kr_request *req, knot_pkt_t *pkt,
int ret = loot_rr(cache, pkt, qry->sname, qry->sclass, rrtype, qry,
&rank, &flags, 0, lowest_rank);
if (ret != 0 && rrtype != KNOT_RRTYPE_CNAME
&& !(qry->flags & QUERY_STUB)) {
&& !(qry->flags.STUB)) {
/* Chase CNAME if no direct hit.
* We avoid this in STUB mode because the current iterator
* (process_stub()) is unable to iterate in STUB mode to follow
......@@ -193,13 +193,13 @@ static int loot_rrcache(struct kr_request *req, knot_pkt_t *pkt,
}
if (kr_rank_test(rank, KR_RANK_INSECURE)) {
qry->flags |= QUERY_DNSSEC_INSECURE;
qry->flags &= ~QUERY_DNSSEC_WANT;
qry->flags.DNSSEC_INSECURE = true;
qry->flags.DNSSEC_WANT = false;
}
/* Record may have RRSIGs, try to find them. */
if (allow_unverified
|| ((qry->flags & QUERY_DNSSEC_WANT) && kr_rank_test(rank, KR_RANK_SECURE))) {
|| ((qry->flags.DNSSEC_WANT) && kr_rank_test(rank, KR_RANK_SECURE))) {
kr_rank_set(&lowest_rank, KR_RANK_INITIAL); /* no security for RRSIGs */
ret = loot_rr(cache, pkt, qry->sname, qry->sclass, rrtype, qry,
&rank, &flags, true, lowest_rank);
......@@ -232,7 +232,7 @@ static int rrcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags.NO_CACHE)) {
return ctx->state; /* Already resolved/failed or already tried, etc. */
}
/* Reconstruct the answer from the cache,
......@@ -275,7 +275,7 @@ struct rrcache_baton
static int commit_rrsig(struct rrcache_baton *baton, uint8_t rank, uint8_t flags, knot_rrset_t *rr)
{
/* If not doing secure resolution, ignore (unvalidated) RRSIGs. */
if (!(baton->qry->flags & QUERY_DNSSEC_WANT)) {
if (!(baton->qry->flags.DNSSEC_WANT)) {
return kr_ok();
}
/* Commit covering RRSIG to a separate cache namespace. */
......@@ -326,11 +326,11 @@ static int commit_rr(const char *key, void *val, void *data)
uint8_t flags = KR_CACHE_FLAG_NONE;
if (kr_rank_test(rank, KR_RANK_AUTH)) {
if (baton->qry->flags & QUERY_DNSSEC_WEXPAND) {
if (baton->qry->flags.DNSSEC_WEXPAND) {
flags |= KR_CACHE_FLAG_WCARD_PROOF;
}
if ((rr->type == KNOT_RRTYPE_NS) &&
(baton->qry->flags & QUERY_DNSSEC_NODS)) {
(baton->qry->flags.DNSSEC_NODS)) {
flags |= KR_CACHE_FLAG_NODS;
}
}
......@@ -428,7 +428,7 @@ static int rrcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
/* Cache only positive answers, not meta types or RRSIG. */
const uint16_t qtype = knot_pkt_qtype(pkt);
const bool is_eligible = !(knot_rrtype_is_metatype(qtype) || qtype == KNOT_RRTYPE_RRSIG);
if (qry->flags & QUERY_CACHED || knot_wire_get_rcode(pkt->wire) != KNOT_RCODE_NOERROR || !is_eligible) {
if (qry->flags.CACHED || knot_wire_get_rcode(pkt->wire) != KNOT_RCODE_NOERROR || !is_eligible) {
return ctx->state;
}
/* Stash data selected by iterator from the last receieved packet. */
......
This diff is collapsed.
......@@ -94,10 +94,10 @@ static unsigned eval_addr_set(pack_t *addr_set, kr_nsrep_lru_t *rttcache, unsign
bool is_valid = false;
/* Check if the address isn't disabled. */
if (len == sizeof(struct in6_addr)) {
is_valid = !(opts & QUERY_NO_IPV6);
is_valid = !(opts.NO_IPV6);
favour = FAVOUR_IPV6;
} else {
is_valid = !(opts & QUERY_NO_IPV4);
is_valid = !(opts.NO_IPV4);
}
/* Get RTT for this address (if known) */
if (is_valid) {
......@@ -146,10 +146,10 @@ static int eval_nsrep(const char *k, void *v, void *baton)
if (reputation & KR_NS_NOIP4) {
score = KR_NS_UNKNOWN;
/* Try to start with clean slate */
if (!(ctx->options & QUERY_NO_IPV6)) {
if (!(ctx->options.NO_IPV6)) {
reputation &= ~KR_NS_NOIP6;
}
if (!(ctx->options & QUERY_NO_IPV4)) {
if (!(ctx->options.NO_IPV4)) {
reputation &= ~KR_NS_NOIP4;
}
}
......@@ -162,7 +162,7 @@ static int eval_nsrep(const char *k, void *v, void *baton)
* The fastest NS is preferred by workers until it is depleted (timeouts or degrades),
* at the same time long distance scouts probe other sources (low probability).
* Servers on TIMEOUT (depleted) can be probed by the dice roll only */
if (score <= ns->score && (qry->flags & QUERY_NO_THROTTLE || score < KR_NS_TIMEOUT)) {
if (score <= ns->score && (qry->flags.NO_THROTTLE || score < KR_NS_TIMEOUT)) {
update_nsrep_set(ns, (const knot_dname_t *)k, addr_choice, score);
ns->reputation = reputation;
} else {
......@@ -170,7 +170,7 @@ static int eval_nsrep(const char *k, void *v, void *baton)
if ((kr_rand_uint(100) < 10) && (kr_rand_uint(KR_NS_MAX_SCORE) >= score)) {
/* If this is a low-reliability probe, go with TCP to get ICMP reachability check. */
if (score >= KR_NS_LONG) {
qry->flags |= QUERY_TCP;
qry->flags.TCP = true;
}
update_nsrep_set(ns, (const knot_dname_t *)k, addr_choice, score);
ns->reputation = reputation;
......
This diff is collapsed.
......@@ -140,7 +140,7 @@ static struct kr_query *kr_rplan_push_query(struct kr_rplan *rplan,
: 0;
/* When forwarding, keep the nameserver addresses. */
if (parent && (parent->flags & qry->flags & QUERY_FORWARD)) {
if (parent && (parent->flags & qry->flags.FORWARD)) {
ret = kr_nsrep_copy_set(&qry->ns, &parent->ns);
if (ret) {
query_free(rplan->pool, qry);
......
......@@ -375,10 +375,10 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
unsigned *cached = lru_get_try(ctx->cache_rep,
(const char *)ns_name, knot_dname_size(ns_name));
unsigned reputation = (cached) ? *cached : 0;
if (!(reputation & KR_NS_NOIP4) && !(ctx->options & QUERY_NO_IPV4)) {
if (!(reputation & KR_NS_NOIP4) && !(ctx->options.NO_IPV4)) {
fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_A, timestamp);
}
if (!(reputation & KR_NS_NOIP6) && !(ctx->options & QUERY_NO_IPV6)) {
if (!(reputation & KR_NS_NOIP6) && !(ctx->options.NO_IPV6)) {
fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_AAAA, timestamp);
}
}
......
......@@ -225,7 +225,7 @@ int check_response(kr_layer_t *ctx, knot_pkt_t *pkt)
return ctx->state;
}
if (!cookie_ctx->clnt.enabled || (qry->flags & QUERY_TCP)) {
if (!cookie_ctx->clnt.enabled || (qry->flags.TCP)) {
return ctx->state;
}
......@@ -265,7 +265,7 @@ int check_response(kr_layer_t *ctx, knot_pkt_t *pkt)
#endif
if (rcode == KNOT_RCODE_BADCOOKIE) {
struct kr_query *next = NULL;
if (!(qry->flags & QUERY_BADCOOKIE_AGAIN)) {
if (!(qry->flags.BADCOOKIE_AGAIN)) {
/* Received first BADCOOKIE, regenerate query. */
next = kr_rplan_push(&req->rplan, qry->parent,
qry->sname, qry->sclass,
......@@ -274,7 +274,7 @@ int check_response(kr_layer_t *ctx, knot_pkt_t *pkt)
if (next) {
VERBOSE_MSG(NULL, "%s\n", "BADCOOKIE querying again");
qry->flags |= QUERY_BADCOOKIE_AGAIN;
qry->flags.BADCOOKIE_AGAIN = true;
} else {
/*
* Either the planning of the second request failed or
......@@ -283,7 +283,7 @@ int check_response(kr_layer_t *ctx, knot_pkt_t *pkt)
* RFC7873 5.3 says that TCP should be used. Currently
* we always expect that the server doesn't support TCP.
*/
qry->flags &= ~QUERY_BADCOOKIE_AGAIN;
qry->flags.BADCOOKIE_AGAIN = false;
return KR_STATE_FAIL;
}
......
......@@ -174,7 +174,7 @@ static int dnstap_log(kr_layer_t *ctx) {
if (rplan->resolved.len > 0) {
struct kr_query *last = array_tail(rplan->resolved);
/* Only add query_zone when not answered from cache */
if (!(last->flags & QUERY_CACHED)) {
if (!(last->flags.CACHED)) {
const knot_dname_t *zone_cut_name = last->zone_cut.name;
if (zone_cut_name != NULL) {
m.query_zone.data = (uint8_t *)zone_cut_name;
......
......@@ -152,7 +152,7 @@ static int query(kr_layer_t *ctx, knot_pkt_t *pkt)
}
VERBOSE_MSG(qry, "<= answered from hints\n");
qry->flags &= ~QUERY_DNSSEC_WANT; /* Never authenticated */
qry->flags.DNSSEC_WANT = false; /* Never authenticated */
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
......
......@@ -137,11 +137,11 @@ static void collect_sample(struct stat_data *data, struct kr_rplan *rplan, knot_
for (size_t i = 0; i < rplan->resolved.len; ++i) {
/* Sample queries leading to iteration or expiring */
struct kr_query *qry = rplan->resolved.at[i];
if ((qry->flags & QUERY_CACHED) && !(qry->flags & QUERY_EXPIRING)) {
if ((qry->flags.CACHED) && !(qry->flags.EXPIRING)) {
continue;
}
int key_len = collect_key(key, qry->sname, qry->stype);
if (qry->flags & QUERY_EXPIRING) {
if (qry->flags.EXPIRING) {
unsigned *count = lru_get_new(data->queries.expiring, key, key_len);
if (count)
*count += 1;
......@@ -158,7 +158,7 @@ static int collect_rtt(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
if (qry->flags & QUERY_CACHED || !req->upstream.addr) {
if (qry->flags.CACHED || !req->upstream.addr) {
return ctx->state;
}
......@@ -221,7 +221,7 @@ static int collect(kr_layer_t *ctx)
}
/* Observe the final query. */
struct kr_query *last = array_tail(rplan->resolved);
if (last->flags & QUERY_CACHED) {
if (last->flags.CACHED) {
stat_const_add(data, metric_answer_cached, 1);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment