Commit fa7e52e6 authored by Vladimír Čunát's avatar Vladimír Čunát

reduce verbosity of cache-related logs

It was just too noisy for default --verbose.
parent a444fdb4
......@@ -635,7 +635,8 @@ static int cache_peek_real(kr_layer_t *ctx, knot_pkt_t *pkt)
/** It's simply inside of cycle taken out to decrease indentation. \return error code. */
static int stash_rrset(const ranked_rr_array_t *arr, int arr_i,
const struct kr_query *qry, struct kr_cache *cache);
const struct kr_query *qry, struct kr_cache *cache,
int *unauth_cnt);
int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
{
......@@ -658,6 +659,7 @@ int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
/* Stash individual records. */
ranked_rr_array_t *selected[] = kr_request_selected(req);
int ret = 0;
int unauth_cnt = 0;
for (int psec = KNOT_ANSWER; psec <= KNOT_ADDITIONAL; ++psec) {
const ranked_rr_array_t *arr = selected[psec];
/* uncached entries are located at the end */
......@@ -667,7 +669,7 @@ int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
continue;
/* TODO: probably safe to break but maybe not worth it */
}
ret = stash_rrset(arr, i, qry, cache);
ret = stash_rrset(arr, i, qry, cache, &unauth_cnt);
if (ret) {
VERBOSE_MSG(qry, "=> stashing RRs errored out\n");
goto finally;
......@@ -680,12 +682,16 @@ int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
stash_pkt(pkt, qry, req);
finally:
if (unauth_cnt) {
VERBOSE_MSG(qry, "=> stashed also %d nonauth RRs\n", unauth_cnt);
};
kr_cache_sync(cache);
return ctx->state; /* we ignore cache-stashing errors */
}
static int stash_rrset(const ranked_rr_array_t *arr, int arr_i,
const struct kr_query *qry, struct kr_cache *cache)
const struct kr_query *qry, struct kr_cache *cache,
int *unauth_cnt)
{
const ranked_rr_array_entry_t *entry = arr->at[arr_i];
if (entry->cached) {
......@@ -818,9 +824,14 @@ static int stash_rrset(const ranked_rr_array_t *arr, int arr_i,
assert(entry_h_consistent(val_new_entry, rr->type));
WITH_VERBOSE(qry) {
/* Reduce verbosity. */
if (!kr_rank_test(entry->rank, KR_RANK_AUTH)) {
++*unauth_cnt;
return kr_ok();
}
auto_free char *type_str = kr_rrtype_text(rr->type),
*encl_str = kr_dname_text(encloser);
VERBOSE_MSG(qry, "=> stashed rank: 0%0.2o, %s %s%s"
VERBOSE_MSG(qry, "=> stashed rank: 0%.2o, %s %s%s "
"(%d B total, incl. %d RRSIGs)\n",
entry->rank, type_str, (wild_labels ? "*." : ""), encl_str,
(int)val_new_entry.len, (rr_sigs ? rr_sigs->rrs.rr_count : 0)
......@@ -938,7 +949,7 @@ int kr_cache_peek_exact(struct kr_cache *cache, const knot_dname_t *name, uint16
struct kr_cache_p *peek)
{ /* Just wrap with extra verbose logging. */
const int ret = peek_exact_real(cache, name, type, peek);
if (VERBOSE_STATUS) {
if (false && VERBOSE_STATUS) { /* too noisy for usual --verbose */
auto_free char *type_str = kr_rrtype_text(type),
*name_str = kr_dname_text(name);
const char *result_str = (ret == kr_ok() ? "hit" :
......
......@@ -148,7 +148,7 @@ static bool is_valid_addr(const uint8_t *addr, size_t len)
}
/** @internal Update NS address from record \a rr. Return _FAIL on error. */
static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query, int *glue_cnt)
{
if (rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA) {
const knot_rdata_t *rdata = rr->rrs.data;
......@@ -172,15 +172,21 @@ static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
if (ret != 0) {
return KR_STATE_FAIL;
}
QVERBOSE_MSG(query, "<= using glue for "
++*glue_cnt; /* reduced verbosity */
/* QVERBOSE_MSG(query, "<= using glue for "
"'%s': '%s'\n", name_str, addr_str);
*/
}
return KR_STATE_CONSUME;
}
/** @internal From \a pkt, fetch glue records for name \a ns, and update the cut etc. */
/** @internal From \a pkt, fetch glue records for name \a ns, and update the cut etc.
*
* \param glue_cnt the number of accepted addresses (to be incremented)
*/
static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, bool in_bailiwick,
struct kr_request *req, const struct kr_query *qry)
struct kr_request *req, const struct kr_query *qry, int *glue_cnt)
{
ranked_rr_array_t *selected[] = kr_request_selected(req);
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
......@@ -208,7 +214,7 @@ static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, bool in_bailiwic
(req->ctx->options.NO_IPV6)) {
continue;
}
(void) update_nsaddr(rr, req->current_query);
(void) update_nsaddr(rr, req->current_query, glue_cnt);
}
}
}
......@@ -233,7 +239,8 @@ static bool has_glue(knot_pkt_t *pkt, const knot_dname_t *ns)
* @param current_cut is cut name before this packet.
* @return _DONE if cut->name changes, _FAIL on error, and _CONSUME otherwise. */
static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr,
struct kr_request *req, const knot_dname_t *current_cut)
struct kr_request *req, const knot_dname_t *current_cut,
int *glue_cnt)
{
struct kr_query *qry = req->current_query;
struct kr_zonecut *cut = &qry->zone_cut;
......@@ -302,7 +309,7 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr,
do_fetch = in_bailiwick;
}
if (do_fetch) {
fetch_glue(pkt, ns_name, in_bailiwick, req, qry);
fetch_glue(pkt, ns_name, in_bailiwick, req, qry, glue_cnt);
}
}
......@@ -402,12 +409,13 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
/* Remember current bailiwick for NS processing. */
const knot_dname_t *current_zone_cut = qry->zone_cut.name;
bool ns_record_exists = false;
int glue_cnt = 0;
/* Update zone cut information. */
for (unsigned i = 0; i < ns->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(ns, i);
if (rr->type == KNOT_RRTYPE_NS) {
ns_record_exists = true;
int state = update_cut(pkt, rr, req, current_zone_cut);
int state = update_cut(pkt, rr, req, current_zone_cut, &glue_cnt);
switch(state) {
case KR_STATE_DONE: result = state; break;
case KR_STATE_FAIL: return state; break;
......@@ -419,6 +427,10 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
}
}
if (glue_cnt) {
VERBOSE_MSG("<= loaded %d glue addresses\n", glue_cnt);
}
if ((qry->flags.DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
if (knot_wire_get_aa(pkt->wire) == 0 &&
......@@ -492,7 +504,8 @@ static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral,
/* if not referral, mark record to be written to final answer */
to_wire = !referral;
} else {
state = update_nsaddr(rr, query->parent);
int cnt_ = 0;
state = update_nsaddr(rr, query->parent, &cnt_);
if (state == KR_STATE_FAIL) {
return state;
}
......
......@@ -393,7 +393,7 @@ int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut,
const knot_dname_t *name, const struct kr_query *qry,
bool * restrict secured)
{
VERBOSE_MSG(qry, "_find_cached\n");
//VERBOSE_MSG(qry, "_find_cached\n");
if (!ctx || !cut || !name) {
return kr_error(EINVAL);
}
......@@ -425,7 +425,7 @@ int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut,
kr_cache_sync(&ctx->cache);
WITH_VERBOSE(qry) {
auto_free char *label_str = kr_dname_text(label);
VERBOSE_MSG(qry, " and found cut: %s\n", label_str);
VERBOSE_MSG(qry, "found cut: %s\n", label_str);
}
return kr_ok();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment