Commit 18b47e33 authored by Grigorii Demidov's avatar Grigorii Demidov Committed by Marek Vavrusa

lib/cache: flags field was added to cache entry header

parent 9162b035
......@@ -330,7 +330,7 @@ int kr_cache_clear(struct kr_cache_txn *txn)
return ret;
}
int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp)
int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint8_t *rank, uint8_t *flags, uint32_t *timestamp)
{
if (!txn_is_valid(txn) || !rr || !timestamp) {
return kr_error(EINVAL);
......@@ -345,6 +345,9 @@ int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank,
if (rank) {
*rank = entry->rank;
}
if (flags) {
*flags = entry->flags;
}
rr->rrs.rr_count = entry->count;
rr->rrs.data = entry->data;
return kr_ok();
......@@ -399,7 +402,7 @@ int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t dr
return kr_ok();
}
int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp)
int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp)
{
if (!txn_is_valid(txn) || !rr) {
return kr_error(EINVAL);
......@@ -415,6 +418,7 @@ int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_
.timestamp = timestamp,
.ttl = 0,
.rank = rank,
.flags = flags,
.count = rr->rrs.rr_count
};
knot_rdata_t *rd = rr->rrs.data;
......@@ -429,7 +433,7 @@ int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_
return kr_cache_insert(txn, KR_CACHE_RR, rr->owner, rr->type, &header, data);
}
int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp)
int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint8_t *rank, uint8_t *flags, uint32_t *timestamp)
{
if (!txn_is_valid(txn) || !rr || !timestamp) {
return kr_error(EINVAL);
......@@ -445,13 +449,16 @@ int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *ra
if (rank) {
*rank = entry->rank;
}
if (flags) {
*flags = entry->flags;
}
rr->type = KNOT_RRTYPE_RRSIG;
rr->rrs.rr_count = entry->count;
rr->rrs.data = entry->data;
return kr_ok();
}
int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp)
int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp)
{
if (!txn_is_valid(txn) || !rr) {
return kr_error(EINVAL);
......@@ -467,6 +474,7 @@ int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint
.timestamp = timestamp,
.ttl = 0,
.rank = rank,
.flags = flags,
.count = rr->rrs.rr_count
};
for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
......
......@@ -44,6 +44,15 @@ enum kr_cache_rank {
/* @note Rank must not exceed 6 bits */
};
/** Cache entry flags */
enum kr_cache_flag {
KR_CACHE_FLAG_NONE = 0,
KR_CACHE_FLAG_WCARD_PROOF = 1 /* Entry contains either packet with wildcard
* answer either record for which wildcard
* expansion proof is needed */
};
/**
* Serialized form of the RRSet with inception timestamp and maximum TTL.
*/
......@@ -52,7 +61,8 @@ struct kr_cache_entry
uint32_t timestamp;
uint32_t ttl;
uint16_t count;
uint16_t rank;
uint8_t rank;
uint8_t flags;
uint8_t data[];
};
......@@ -192,11 +202,12 @@ int kr_cache_peek_rank(struct kr_cache_txn *txn, uint8_t tag, const knot_dname_t
* @param txn transaction instance
* @param rr query RRSet (its rdataset may be changed depending on the result)
* @param rank entry rank will be stored in this variable
* @param flags entry flags
* @param timestamp current time (will be replaced with drift if successful)
* @return 0 or an errcode
*/
KR_EXPORT
int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp);
int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint8_t *rank, uint8_t *flags, uint32_t *timestamp);
/**
* Clone read-only RRSet and adjust TTLs.
......@@ -214,11 +225,12 @@ int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t dr
* @param txn transaction instance
* @param rr inserted RRSet
* @param rank rank of the data
* @param flags additional flags for the data
* @param timestamp current time
* @return 0 or an errcode
*/
KR_EXPORT
int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp);
int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp);
/**
* Peek the cache for the given RRset signature (name, type)
......@@ -226,11 +238,12 @@ int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_
* @param txn transaction instance
* @param rr query RRSET (its rdataset and type may be changed depending on the result)
* @param rank entry rank will be stored in this variable
* @param flags entry additional flags
* @param timestamp current time (will be replaced with drift if successful)
* @return 0 or an errcode
*/
KR_EXPORT
int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp);
int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint8_t *rank, uint8_t *flags, uint32_t *timestamp);
/**
* Insert the selected RRSIG RRSet of the selected type covered into cache, replacing any existing data.
......@@ -238,8 +251,9 @@ int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *ra
* @param txn transaction instance
* @param rr inserted RRSIG RRSet
* @param rank rank of the data
* @param flags additional flags for the data
* @param timestamp current time
* @return 0 or an errcode
*/
KR_EXPORT
int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp);
int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp);
......@@ -230,53 +230,6 @@ int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx,
return vctx->result;
}
int kr_section_check_wcard(kr_rrset_validation_ctx_t *vctx)
{
const knot_pkt_t *pkt = vctx->pkt;
knot_section_t section_id = vctx->section_id;
const knot_dname_t *zone_name = vctx->zone_name;
const knot_pktsection_t *sec = knot_pkt_section(pkt, section_id);
for (unsigned i = 0; i < sec->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(sec, i);
if (rr->type == KNOT_RRTYPE_RRSIG) {
continue;
}
if ((rr->type == KNOT_RRTYPE_NS) && (vctx->section_id == KNOT_AUTHORITY)) {
continue;
}
if (!knot_dname_in(zone_name, rr->owner)) {
continue;
}
int covered_labels = knot_dname_labels(rr->owner, NULL);
if (knot_dname_is_wildcard(rr->owner)) {
/* The asterisk does not count, RFC4034 3.1.3, paragraph 3. */
--covered_labels;
}
for (unsigned j = 0; j < sec->count; ++j) {
const knot_rrset_t *rrsig = knot_pkt_rr(sec, j);
if (rrsig->type != KNOT_RRTYPE_RRSIG) {
continue;
}
if ((rr->rclass != rrsig->rclass) || !knot_dname_is_equal(rr->owner, rrsig->owner)) {
continue;
}
for (uint16_t k = 0; k < rrsig->rrs.rr_count; ++k) {
if (knot_rrsig_type_covered(&rrsig->rrs, k) != rr->type) {
continue;
}
int rrsig_labels = knot_rrsig_labels(&rrsig->rrs, k);
if (rrsig_labels > covered_labels) {
return kr_error(EINVAL);
}
if (rrsig_labels < covered_labels) {
vctx->flags |= KR_DNSSEC_VFLG_WEXPAND;
}
}
}
}
return kr_ok();
}
int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rrset_t *ta)
{
const knot_pkt_t *pkt = vctx->pkt;
......
......@@ -119,14 +119,6 @@ KR_EXPORT KR_PURE
int kr_dnssec_key_match(const uint8_t *key_a_rdata, size_t key_a_rdlen,
const uint8_t *key_b_rdata, size_t key_b_rdlen);
/** Return 0 if wildcard expansion occurs in specified section.
* @param vctx Pointer to validation context.
* @note vctx->keys, vctx->timestamp, vctx->has_nsec3 has no meanings.
* @return 0 if wildcard expansion occurs or an error code.
*/
KR_EXPORT KR_PURE
int kr_section_check_wcard(kr_rrset_validation_ctx_t *vctx);
/**
* Construct a DNSSEC key.
* @param key Pointer to be set to newly created DNSSEC key.
......
......@@ -45,7 +45,7 @@ static void adjust_ttl(knot_rrset_t *rr, uint32_t drift)
}
static int loot_cache_pkt(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_dname_t *qname,
uint16_t rrtype, bool want_secure, uint32_t timestamp)
uint16_t rrtype, bool want_secure, uint32_t timestamp, uint8_t *flags)
{
struct kr_cache_entry *entry = NULL;
int ret = kr_cache_peek(txn, KR_CACHE_PKT, qname, rrtype, &entry, &timestamp);
......@@ -80,17 +80,22 @@ static int loot_cache_pkt(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_
}
}
/* Copy cache entry flags */
if (flags) {
*flags = entry->flags;
}
return ret;
}
/** @internal Try to find a shortcut directly to searched packet. */
static int loot_pktcache(struct kr_cache_txn *txn, knot_pkt_t *pkt, struct kr_query *qry)
static int loot_pktcache(struct kr_cache_txn *txn, knot_pkt_t *pkt, struct kr_query *qry, uint8_t *flags)
{
uint32_t timestamp = qry->timestamp.tv_sec;
const knot_dname_t *qname = qry->sname;
uint16_t rrtype = qry->stype;
const bool want_secure = (qry->flags & QUERY_DNSSEC_WANT);
return loot_cache_pkt(txn, pkt, qname, rrtype, want_secure, timestamp);
return loot_cache_pkt(txn, pkt, qname, rrtype, want_secure, timestamp, flags);
}
static int pktcache_peek(knot_layer_t *ctx, knot_pkt_t *pkt)
......@@ -115,11 +120,15 @@ static int pktcache_peek(knot_layer_t *ctx, knot_pkt_t *pkt)
}
/* Fetch either answer to original or minimized query */
int ret = loot_pktcache(&txn, pkt, qry);
uint8_t flags = 0;
int ret = loot_pktcache(&txn, pkt, qry, &flags);
kr_cache_txn_abort(&txn);
if (ret == 0) {
DEBUG_MSG(qry, "=> satisfied from cache\n");
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
if (flags & KR_CACHE_FLAG_WCARD_PROOF) {
qry->flags |= QUERY_DNSSEC_WEXPAND;
}
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
knot_wire_set_aa(pkt->wire);
......@@ -199,6 +208,7 @@ static int pktcache_stash(knot_layer_t *ctx, knot_pkt_t *pkt)
.timestamp = qry->timestamp.tv_sec,
.ttl = ttl,
.rank = KR_RANK_BAD,
.flags = KR_CACHE_FLAG_NONE,
.count = data.len
};
......@@ -209,6 +219,11 @@ static int pktcache_stash(knot_layer_t *ctx, knot_pkt_t *pkt)
header.rank = KR_RANK_INSECURE;
}
/* Set cache flags */
if (qry->flags & QUERY_DNSSEC_WANT) {
header.flags |= KR_CACHE_FLAG_WCARD_PROOF;
}
/* Check if we can replace (allow current or better rank, SECURE is always accepted). */
if (header.rank < KR_RANK_SECURE) {
int cached_rank = kr_cache_peek_rank(&txn, KR_CACHE_PKT, qname, qtype, header.timestamp);
......
......@@ -37,7 +37,8 @@ static inline bool is_expiring(const knot_rrset_t *rr, uint32_t drift)
}
static int loot_rr(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_dname_t *name,
uint16_t rrclass, uint16_t rrtype, struct kr_query *qry, uint16_t *rank, bool fetch_rrsig)
uint16_t rrclass, uint16_t rrtype, struct kr_query *qry,
uint8_t *rank, uint8_t *flags, bool fetch_rrsig)
{
/* Check if record exists in cache */
int ret = 0;
......@@ -45,9 +46,9 @@ static int loot_rr(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_dname_t
knot_rrset_t cache_rr;
knot_rrset_init(&cache_rr, (knot_dname_t *)name, rrtype, rrclass);
if (fetch_rrsig) {
ret = kr_cache_peek_rrsig(txn, &cache_rr, rank, &drift);
ret = kr_cache_peek_rrsig(txn, &cache_rr, rank, flags, &drift);
} else {
ret = kr_cache_peek_rr(txn, &cache_rr, rank, &drift);
ret = kr_cache_peek_rr(txn, &cache_rr, rank, flags, &drift);
}
if (ret != 0) {
return ret;
......@@ -85,11 +86,11 @@ static int loot_rrcache(struct kr_cache *cache, knot_pkt_t *pkt, struct kr_query
return ret;
}
/* Lookup direct match first */
uint16_t rank = 0;
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, 0);
uint8_t rank = 0;
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, NULL, 0);
if (ret != 0 && rrtype != KNOT_RRTYPE_CNAME) { /* Chase CNAME if no direct hit */
rrtype = KNOT_RRTYPE_CNAME;
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, 0);
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, NULL, 0);
}
/* Record is flagged as INSECURE => doesn't have RRSIG. */
if (ret == 0 && (rank & KR_RANK_INSECURE)) {
......@@ -97,7 +98,7 @@ static int loot_rrcache(struct kr_cache *cache, knot_pkt_t *pkt, struct kr_query
qry->flags &= ~QUERY_DNSSEC_WANT;
/* Record may have RRSIG, try to find it. */
} else if (ret == 0 && dobit) {
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, true);
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, NULL, true);
}
kr_cache_txn_abort(&txn);
return ret;
......@@ -153,14 +154,14 @@ struct rrcache_baton
uint32_t min_ttl;
};
static int commit_rrsig(struct rrcache_baton *baton, uint16_t rank, knot_rrset_t *rr)
static int commit_rrsig(struct rrcache_baton *baton, uint8_t rank, uint8_t flags, knot_rrset_t *rr)
{
/* If not doing secure resolution, ignore (unvalidated) RRSIGs. */
if (!(baton->qry->flags & QUERY_DNSSEC_WANT)) {
return kr_ok();
}
/* Commit covering RRSIG to a separate cache namespace. */
return kr_cache_insert_rrsig(baton->txn, rr, rank, baton->timestamp);
return kr_cache_insert_rrsig(baton->txn, rr, rank, flags, baton->timestamp);
}
static int commit_rr(const char *key, void *val, void *data)
......@@ -177,7 +178,7 @@ static int commit_rr(const char *key, void *val, void *data)
}
/* Save RRSIG in a special cache. */
uint16_t rank = KEY_FLAG_RANK(key);
uint8_t rank = KEY_FLAG_RANK(key);
/* Non-authoritative NSs should never be trusted,
* it may be present in an otherwise secure answer but it
* is only a hint for local state. */
......@@ -189,7 +190,7 @@ static int commit_rr(const char *key, void *val, void *data)
rank |= KR_RANK_INSECURE;
}
if (KEY_COVERING_RRSIG(key)) {
return commit_rrsig(baton, rank, rr);
return commit_rrsig(baton, rank, KR_CACHE_FLAG_NONE, rr);
}
/* Accept only better rank (if not overriding) */
if (!(rank & KR_RANK_SECURE) && !(baton->qry->flags & QUERY_NO_CACHE)) {
......@@ -201,7 +202,7 @@ static int commit_rr(const char *key, void *val, void *data)
knot_rrset_t query_rr;
knot_rrset_init(&query_rr, rr->owner, rr->type, rr->rclass);
return kr_cache_insert_rr(baton->txn, rr, rank, baton->timestamp);
return kr_cache_insert_rr(baton->txn, rr, rank, KR_CACHE_FLAG_NONE, baton->timestamp);
}
static int stash_commit(map_t *stash, struct kr_query *qry, struct kr_cache_txn *txn, struct kr_request *req)
......
......@@ -183,29 +183,6 @@ static int validate_records(struct kr_query *qry, knot_pkt_t *answer, knot_mm_t
return ret;
}
static int check_wcard_expanded(struct kr_query *qry, knot_pkt_t *pkt, knot_section_t section_id)
{
kr_rrset_validation_ctx_t vctx = {
.pkt = pkt,
.section_id = section_id,
.keys = NULL,
.zone_name = qry->zone_cut.name,
.timestamp = 0,
.has_nsec3 = false,
.flags = 0,
.result = 0
};
int ret = kr_section_check_wcard(&vctx);
if (ret != 0) {
return ret;
}
if (vctx.flags & KR_DNSSEC_VFLG_WEXPAND) {
qry->flags |= QUERY_DNSSEC_WEXPAND;
}
return kr_ok();
}
static int validate_keyset(struct kr_query *qry, knot_pkt_t *answer, bool has_nsec3)
{
/* Merge DNSKEY records from answer that are below/at current cut. */
......@@ -506,20 +483,16 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
* Do not revalidate data from cache, as it's already trusted. */
if (!(qry->flags & QUERY_CACHED)) {
ret = validate_records(qry, pkt, req->rplan.pool, has_nsec3);
} else {
/* Records already were validated.
* Check if wildcard answer. */
ret = check_wcard_expanded(qry, pkt, KNOT_ANSWER);
}
if (ret != 0) {
DEBUG_MSG(qry, "<= couldn't validate RRSIGs\n");
qry->flags |= QUERY_DNSSEC_BOGUS;
return KNOT_STATE_FAIL;
if (ret != 0) {
DEBUG_MSG(qry, "<= couldn't validate RRSIGs\n");
qry->flags |= QUERY_DNSSEC_BOGUS;
return KNOT_STATE_FAIL;
}
}
/* Check if wildcard expansion detected for final query.
* If yes, copy authority. */
if ((qry->parent == NULL) && (qry->flags & QUERY_DNSSEC_WEXPAND)) {
/* Wildcard expansion detected for final query.
* Copy authority. */
const knot_pktsection_t *auth = knot_pkt_section(pkt, KNOT_AUTHORITY);
for (unsigned i = 0; i < auth->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(auth, i);
......
......@@ -299,10 +299,10 @@ int kr_zonecut_set_sbelt(struct kr_context *ctx, struct kr_zonecut *cut)
/** Fetch address for zone cut. */
static void fetch_addr(struct kr_zonecut *cut, const knot_dname_t *ns, uint16_t rrtype, struct kr_cache_txn *txn, uint32_t timestamp)
{
uint16_t rank = 0;
uint8_t rank = 0;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)ns, rrtype, KNOT_CLASS_IN);
if (kr_cache_peek_rr(txn, &cached_rr, &rank, &timestamp) != 0) {
if (kr_cache_peek_rr(txn, &cached_rr, &rank, NULL, &timestamp) != 0) {
return;
}
......@@ -316,12 +316,12 @@ static void fetch_addr(struct kr_zonecut *cut, const knot_dname_t *ns, uint16_t
}
/** Fetch best NS for zone cut. */
static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut, const knot_dname_t *name, struct kr_cache_txn *txn, uint32_t timestamp, uint16_t * restrict rank)
static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut, const knot_dname_t *name, struct kr_cache_txn *txn, uint32_t timestamp, uint8_t * restrict rank)
{
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)name, KNOT_RRTYPE_NS, KNOT_CLASS_IN);
int ret = kr_cache_peek_rr(txn, &cached_rr, rank, &drift);
int ret = kr_cache_peek_rr(txn, &cached_rr, rank, NULL, &drift);
if (ret != 0) {
return ret;
}
......@@ -355,11 +355,11 @@ static int fetch_rrset(knot_rrset_t **rr, const knot_dname_t *owner, uint16_t ty
return kr_error(ENOENT);
}
uint16_t rank = 0;
uint8_t rank = 0;
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)owner, type, KNOT_CLASS_IN);
int ret = kr_cache_peek_rr(txn, &cached_rr, &rank, &drift);
int ret = kr_cache_peek_rr(txn, &cached_rr, &rank, NULL, &drift);
if (ret != 0) {
return ret;
}
......@@ -409,7 +409,7 @@ int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut, const
/* Start at QNAME parent. */
while (txn) {
/* Fetch NS first and see if it's insecure. */
uint16_t rank = 0;
uint8_t rank = 0;
const bool is_root = (label[0] == '\0');
if (fetch_ns(ctx, cut, label, txn, timestamp, &rank) == 0) {
/* Flag as insecure if cached as this */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment