Commit 2f4317c1 authored by Vladimír Čunát's avatar Vladimír Čunát

Merge branch 'master' into policy-reserved-domains

parents 9635aa40 758b133d
......@@ -16,9 +16,10 @@ BUILD_CFLAGS += --coverage
endif
# Dependencies
$(eval $(call find_lib,libknot,2.3.1,yes))
$(eval $(call find_lib,libdnssec,2.3.1,yes))
$(eval $(call find_lib,libzscanner,2.3.1,yes))
KNOT_MINVER := 2.3.1
$(eval $(call find_lib,libknot,$(KNOT_MINVER),yes))
$(eval $(call find_lib,libdnssec,$(KNOT_MINVER),yes))
$(eval $(call find_lib,libzscanner,$(KNOT_MINVER),yes))
$(eval $(call find_lib,lmdb))
$(eval $(call find_lib,libuv,1.0,yes))
$(eval $(call find_lib,nettle,,yes))
......@@ -127,13 +128,13 @@ info:
# Verify required dependencies are met, as listed above
ifeq ($(HAS_libknot),no)
$(error libknot >= 2.3.1 required)
$(error libknot >= $(KNOT_MINVER) required)
endif
ifeq ($(HAS_libzscanner),no)
$(error libzscanner >= 2.3.1 required)
$(error libzscanner >= $(KNOT_MINVER) required)
endif
ifeq ($(HAS_libdnssec),no)
$(error libdnssec >= 2.3.1 required)
$(error libdnssec >= $(KNOT_MINVER) required)
endif
ifeq ($(HAS_lua),no)
$(error luajit required)
......
Knot Resolver 1.3.2 (2017-07-xx)
Knot Resolver 1.3.2 (2017-07-28)
================================
Security
--------
- fix possible opportunities to use insecure data from cache as keys
for validation
Bugfixes
--------
- daemon: check existence of config file even if rundir isn't specified
- policy.FORWARD and STUB: use RTT tracking to choose servers (#125, #208)
- dns64: fix CNAME problems (#203) It still won't work with query policies.
- dns64: fix CNAME problems (#203) It still won't work with policy.STUB.
- hints: better interpretation of hosts-like files (#204)
also, error out if a bad entry is encountered in the file
- dnssec: handle unknown DNSKEY/DS algorithms (#210)
- predict: fix the module, broken since 1.2.0 (#154)
Improvements
------------
......
# Project
MAJOR := 1
MINOR := 3
PATCH := 1
PATCH := 2
EXTRA :=
ABIVER := 3
BUILDMODE := dynamic
......
......@@ -144,6 +144,12 @@ The watchdog process must notify kresd about active file descriptors, and kresd
The daemon also supports `systemd socket activation`_, it is automatically detected and requires no configuration on users's side.
To run the daemon by hand, such as under ``nohup``, use ``-f 1`` to start a single fork. For example:
.. code-block:: bash
$ nohup ./daemon/kresd -a 127.0.0.1 -f 1 &
Configuration
=============
......
......@@ -1188,7 +1188,10 @@ static int wrk_resolve(lua_State *L)
lua_error(L);
}
uint8_t dname[KNOT_DNAME_MAXLEN];
knot_dname_from_str(dname, lua_tostring(L, 1), sizeof(dname));
if (!knot_dname_from_str(dname, lua_tostring(L, 1), sizeof(dname))) {
lua_pushstring(L, "invalid qname");
lua_error(L);
};
/* Check class and type */
uint16_t rrtype = lua_tointeger(L, 2);
if (!lua_isnumber(L, 2)) {
......
......@@ -125,6 +125,12 @@ When you have all the dependencies ready, you can build and install.
Production code should be compiled with ``-DNDEBUG``.
If you build the binary with ``-DNOVERBOSELOG``, it won't be possible to turn on verbose logging; we advise packagers against using that flag.
.. note:: If you build with ``PREFIX``, you may need to also set the ``LDFLAGS`` for the libraries:
.. code-block:: bash
make LDFLAGS="-Wl,-rpath=/usr/local/lib" PREFIX="/usr/local"
Alternatively you can build only specific parts of the project, i.e. ``library``.
.. code-block:: bash
......@@ -290,7 +296,7 @@ You can hack on the container by changing the container entrypoint to shell like
.. _sphinx_rtd_theme: https://pypi.python.org/pypi/sphinx_rtd_theme
.. _GNU Make: https://www.gnu.org/software/make/
.. _pkg-config: https://www.freedesktop.org/wiki/Software/pkg-config/
.. _libknot: https://gitlab.labs.nic.cz/labs/knot
.. _libknot: https://gitlab.labs.nic.cz/knot/knot-dns
.. _cmocka: https://cmocka.org/
.. _Python: https://www.python.org/
.. _luasec: https://luarocks.org/modules/brunoos/luasec
......@@ -300,7 +306,7 @@ You can hack on the container by changing the container entrypoint to shell like
.. _boot2docker: http://boot2docker.io/
.. _deckard: https://gitlab.labs.nic.cz/knot/deckard
.. _deckard_doc: https://gitlab.labs.nic.cz/knot/resolver/blob/master/tests/README.rst
.. _deckard_doc: https://gitlab.labs.nic.cz/knot/knot-resolver/blob/master/tests/README.rst
.. _libsystemd: https://www.freedesktop.org/wiki/Software/systemd/
.. _dnstap: http://dnstap.info/
......
......@@ -76,7 +76,9 @@ it means that it yielded before and now it is resumed. This is useful in a situa
Writing layers
==============
The resolver :ref:`library <lib_index>` leverages the `processing API`_ from the libknot to separate packet processing code into layers.
.. warning:: FIXME: this dev-docs section is outdated! Better see comments in files instead, for now.
The resolver :ref:`library <lib_index>` leverages the processing API from the libknot to separate packet processing code into layers.
.. note:: This is only crash-course in the library internals, see the resolver :ref:`library <lib_index>` documentation for the complete overview of the services.
......@@ -272,8 +274,7 @@ As described in the layers, you can not only retrieve information about current
req:pop(qry)
.. _libknot: https://gitlab.labs.nic.cz/labs/knot/tree/master/src/libknot
.. _`processing API`: https://gitlab.labs.nic.cz/labs/knot/tree/master/src/libknot/processing
.. _bindings: https://gitlab.labs.nic.cz/knot/resolver/blob/master/daemon/lua/kres.lua#L361
.. _libknot: https://gitlab.labs.nic.cz/knot/knot-dns/tree/master/src/libknot
.. _bindings: https://gitlab.labs.nic.cz/knot/knot-resolver/blob/master/daemon/lua/kres.lua.in
.. |---| unicode:: U+02014 .. em dash
......@@ -236,15 +236,70 @@ int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx,
return vctx->result;
}
/* Fallbacks: implemented in newer libdnssec.
* Note: changing some from true to false is NOT enough to fully remove the support. */
#if KNOT_VERSION_HEX < ((2 << 16) | (6 << 8) | 0)
static bool dnssec_algorithm_key_support(dnssec_key_algorithm_t algo)
{
switch (algo) {
case DNSSEC_KEY_ALGORITHM_DSA_SHA1:
case DNSSEC_KEY_ALGORITHM_DSA_SHA1_NSEC3:
case DNSSEC_KEY_ALGORITHM_RSA_SHA1:
case DNSSEC_KEY_ALGORITHM_RSA_SHA1_NSEC3:
case DNSSEC_KEY_ALGORITHM_RSA_SHA256:
case DNSSEC_KEY_ALGORITHM_RSA_SHA512:
case DNSSEC_KEY_ALGORITHM_ECDSA_P256_SHA256:
case DNSSEC_KEY_ALGORITHM_ECDSA_P384_SHA384:
return true;
//case DNSSEC_KEY_ALGORITHM_ED25519:
//case DNSSEC_KEY_ALGORITHM_ED448:
default:
return false;
}
}
static bool dnssec_algorithm_digest_support(dnssec_key_digest_t algo)
{
switch (algo) {
case DNSSEC_KEY_DIGEST_SHA1:
case DNSSEC_KEY_DIGEST_SHA256:
case DNSSEC_KEY_DIGEST_SHA384:
return true;
default:
return false;
};
}
#endif
static bool kr_ds_algo_support(const knot_rrset_t *ta)
{
for (uint16_t i = 0; i < ta->rrs.rr_count; ++i) {
if (dnssec_algorithm_digest_support(knot_ds_digest_type(&ta->rrs, i))
&& dnssec_algorithm_key_support(knot_ds_alg(&ta->rrs, i))) {
return true;
}
}
return false;
}
int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rrset_t *ta)
{
const knot_pkt_t *pkt = vctx->pkt;
const knot_rrset_t *keys = vctx->keys;
if (!pkt || !keys || !ta) {
const bool ok = pkt && keys && ta && ta->rrs.rr_count && ta->rrs.data
&& ta->type == KNOT_RRTYPE_DS;
if (!ok) {
assert(false);
return kr_error(EINVAL);
}
/* Check if at least one DS has a usable algorithm pair. */
if (!kr_ds_algo_support(ta)) {
/* See RFC6840 5.2. */
return vctx->result = kr_error(DNSSEC_INVALID_DS_ALGORITHM);
}
/* RFC4035 5.2, bullet 1
* The supplied DS record has been authenticated.
* It has been validated or is part of a configured trust anchor.
......@@ -273,6 +328,7 @@ int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rrset_t *ta)
assert (vctx->result == 0);
return vctx->result;
}
/* No useable key found */
vctx->result = kr_error(ENOENT);
return vctx->result;
......@@ -363,7 +419,7 @@ int kr_dnssec_key_from_rdata(struct dseckey **key, const knot_dname_t *kown, con
ret = dnssec_key_set_rdata(new_key, &binary_key);
if (ret != DNSSEC_EOK) {
dnssec_key_free(new_key);
return kr_error(ENOMEM);
return kr_error(ret);
}
if (kown) {
ret = dnssec_key_set_dname(new_key, kown);
......
......@@ -85,9 +85,11 @@ int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx,
size_t key_pos, const struct dseckey *key);
/**
* Check whether the DNSKEY rrset matches the supplied trust anchor RRSet.
* @param vctx Pointer to validation context.
* @param ta Trust anchor RRSet against which to validate the DNSKEY RRSet.
* @return 0 or error code, same as vctx->result.
* @param vctx Pointer to validation context.
* @param ta Trust anchor RRSet against which to validate the DNSKEY RRSet.
* @return 0 or error code, same as vctx->result. In particular,
* DNSSEC_INVALID_DS_ALGORITHM if *each* DS records is unusable
* due to unimplemented DNSKEY or DS algorithm.
*/
int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rrset_t *ta);
......@@ -130,6 +132,7 @@ int kr_dnssec_key_match(const uint8_t *key_a_rdata, size_t key_a_rdlen,
* @param kown DNSKEY owner name.
* @param rdata DNSKEY RDATA
* @param rdlen DNSKEY RDATA length
* @return 0 or error code; in particular: DNSSEC_INVALID_KEY_ALGORITHM
*/
int kr_dnssec_key_from_rdata(struct dseckey **key, const knot_dname_t *kown, const uint8_t *rdata, size_t rdlen);
......
......@@ -40,7 +40,6 @@ static int authenticate_ds(const dnssec_key_t *key, dnssec_binary_t *ds_rdata, u
dnssec_binary_t computed_ds = {0, };
int ret = dnssec_key_create_ds(key, digest_type, &computed_ds);
if (ret != DNSSEC_EOK) {
ret = kr_error(ENOMEM);
goto fail;
}
......@@ -53,7 +52,7 @@ static int authenticate_ds(const dnssec_key_t *key, dnssec_binary_t *ds_rdata, u
fail:
dnssec_binary_free(&computed_ds);
return ret;
return kr_error(ret);
}
int kr_authenticate_referral(const knot_rrset_t *ref, const dnssec_key_t *key)
......@@ -73,11 +72,12 @@ int kr_authenticate_referral(const knot_rrset_t *ref, const dnssec_key_t *key)
};
ret = authenticate_ds(key, &ds_rdata, knot_ds_digest_type(&ref->rrs, i));
if (ret == 0) { /* Found a good DS */
break;
return kr_ok();
}
rd = kr_rdataset_next(rd);
}
return ret;
return kr_error(ret);
}
/**
......
......@@ -23,7 +23,8 @@
* Performs referral authentication according to RFC4035 5.2, bullet 2
* @param ref Referral RRSet. Currently only DS can be used.
* @param key Already parsed key.
* @return 0 or error code.
* @return 0 or error code. In particular: DNSSEC_INVALID_DS_ALGORITHM
* in case *all* DSs in ref use an unimplemented algorithm.
*/
int kr_authenticate_referral(const knot_rrset_t *ref, const dnssec_key_t *key);
......
......@@ -23,6 +23,8 @@
* backed by an array.
*
* @note Maximum object size is 2^16 bytes, see ::pack_objlen_t
* @TODO If some mistake happens somewhere, the access may end up in an infinite loop.
* (equality comparison on pointers)
*
* # Example usage:
*
......@@ -113,6 +115,23 @@ static inline uint8_t *pack_obj_next(uint8_t *it)
return pack_obj_val(it) + pack_obj_len(it);
}
/** Return pointer to the last packed object. */
static inline uint8_t *pack_last(pack_t pack)
{
if (pack.len == 0) {
return NULL;
}
uint8_t *it = pack_head(pack);
uint8_t *tail = pack_tail(pack);
while (true) {
uint8_t *next = pack_obj_next(it);
if (next == tail) {
return it;
}
it = next;
}
}
/** Push object to the end of the pack
* @return 0 on success, negative number on failure
*/
......
......@@ -88,7 +88,6 @@ static int loot_rr(struct kr_cache *cache, knot_pkt_t *pkt, const knot_dname_t *
return kr_error(ENOENT);
}
/* Mark as expiring if it has less than 1% TTL (or less than 5s) */
if (is_expiring(&cache_rr, drift)) {
qry->flags |= QUERY_EXPIRING;
}
......@@ -271,7 +270,6 @@ struct rrcache_baton
struct kr_query *qry;
struct kr_cache *cache;
unsigned timestamp;
uint32_t min_ttl;
};
static int commit_rrsig(struct rrcache_baton *baton, uint8_t rank, uint8_t flags, knot_rrset_t *rr)
......@@ -288,14 +286,6 @@ static int commit_rr(const char *key, void *val, void *data)
{
knot_rrset_t *rr = val;
struct rrcache_baton *baton = data;
/* Ensure minimum TTL */
knot_rdata_t *rd = rr->rrs.data;
for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
if (knot_rdata_ttl(rd) < baton->min_ttl) {
knot_rdata_set_ttl(rd, baton->min_ttl);
}
rd = kr_rdataset_next(rd);
}
/* Save RRSIG in a special cache. */
uint8_t rank = KEY_FLAG_RANK(key);
......@@ -310,7 +300,19 @@ static int commit_rr(const char *key, void *val, void *data)
* This way they would have to hit the first answer (whenever TTL expires). */
if (cached_rank >= 0) {
VERBOSE_MSG(baton->qry, "=> orig. rank: 0%0.2o\n", cached_rank);
if (cached_rank >= rank) {
bool accept = rank > cached_rank;
/* Additionally accept equal rank if the cached RR is expiring.
* This is primarily for prefetching from predict module. */
if (rank == cached_rank) {
uint32_t drift = baton->timestamp;
knot_rrset_t cache_rr;
knot_rrset_init(&cache_rr, rr->owner, rr->type, rr->rclass);
int ret = kr_cache_peek_rr(baton->cache, &cache_rr, NULL, NULL, &drift);
if (ret != kr_ok() || is_expiring(&cache_rr, drift)) {
accept = true;
}
}
if (!accept) {
return kr_ok();
}
}
......@@ -343,7 +345,6 @@ static int stash_commit(map_t *stash, struct kr_query *qry, struct kr_cache *cac
.qry = qry,
.cache = cache,
.timestamp = qry->timestamp.tv_sec,
.min_ttl = MAX(DEFAULT_MINTTL, cache->ttl_min),
};
return map_walk(stash, &commit_rr, &baton);
}
......@@ -370,6 +371,8 @@ static int stash_selected(struct kr_request *req, knot_pkt_t *pkt, map_t *stash,
if (!arr->len) {
return kr_ok();
}
uint32_t min_ttl = MAX(DEFAULT_MINTTL, req->ctx->cache.ttl_min);
/* uncached entries are located at the end */
for (ssize_t i = arr->len - 1; i >= 0; --i) {
ranked_rr_array_entry_t *entry = arr->at[i];
......@@ -379,7 +382,17 @@ static int stash_selected(struct kr_request *req, knot_pkt_t *pkt, map_t *stash,
if (entry->cached) {
continue;
}
const knot_rrset_t *rr = entry->rr;
knot_rrset_t *rr = entry->rr;
/* Ensure minimum TTL */
knot_rdata_t *rd = rr->rrs.data;
for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
if (knot_rdata_ttl(rd) < min_ttl) {
knot_rdata_set_ttl(rd, min_ttl);
}
rd = kr_rdataset_next(rd);
}
/* Skip NSEC3 RRs and their signatures. We don't use them this way.
* They would be stored under the hashed name, etc. */
if (kr_rrset_type_maysig(rr) == KNOT_RRTYPE_NSEC3) {
......
......@@ -887,6 +887,14 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
if (ret == kr_error(EAGAIN)) {
VERBOSE_MSG(qry, ">< cut changed, needs revalidation\n");
return KR_STATE_YIELD;
} else if (ret == kr_error(DNSSEC_INVALID_DS_ALGORITHM)) {
VERBOSE_MSG(qry, ">< all DS entries use unsupported algorithm pairs, going insecure\n");
/* ^ the message is a bit imprecise to avoid being too verbose */
qry->flags &= ~QUERY_DNSSEC_WANT;
qry->flags |= QUERY_DNSSEC_INSECURE;
rank_records(ctx, KR_RANK_INSECURE);
mark_insecure_parents(qry);
return KR_STATE_DONE;
} else if (ret != 0) {
VERBOSE_MSG(qry, "<= bad keys, broken trust chain\n");
qry->flags |= QUERY_DNSSEC_BOGUS;
......
......@@ -958,6 +958,7 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query
if (qry->parent != NULL &&
!(qry->forward_flags & QUERY_CNAME) &&
!(qry->flags & QUERY_DNS64_MARK) &&
knot_dname_in(qry->parent->zone_cut.name, qry->zone_cut.name)) {
return KR_STATE_PRODUCE;
}
......
......@@ -354,6 +354,10 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
if (ret != 0) {
return ret;
}
/* Note: we accept *any* rank from the cache. We assume that nothing
* completely untrustworthy could get into the cache, e.g out-of-bailiwick
* records that weren't validated.
*/
/* Materialize as we'll going to do more cache lookups. */
knot_rrset_t rr_copy;
......@@ -384,10 +388,10 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
}
/**
* Fetch RRSet of given type. (and of reasonable trustworthiness)
* Fetch secure RRSet of given type.
*/
static int fetch_rrset(knot_rrset_t **rr, struct kr_cache *cache,
const knot_dname_t *owner, uint16_t type, knot_mm_t *pool, uint32_t timestamp)
static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache,
const knot_dname_t *owner, uint16_t type, knot_mm_t *pool, uint32_t timestamp)
{
if (!rr) {
return kr_error(ENOENT);
......@@ -401,8 +405,7 @@ static int fetch_rrset(knot_rrset_t **rr, struct kr_cache *cache,
if (ret != 0) {
return ret;
}
const bool rankOK = kr_rank_test(rank, KR_RANK_SECURE)
|| (kr_rank_test(rank, KR_RANK_INSECURE) && kr_rank_test(rank, KR_RANK_AUTH));
const bool rankOK = kr_rank_test(rank, KR_RANK_SECURE);
if (!rankOK) {
return kr_error(ENOENT);
}
......@@ -448,9 +451,9 @@ int kr_zonecut_find_cached(struct kr_context *ctx, struct kr_zonecut *cut, const
}
/* Fetch DS and DNSKEY if caller wants secure zone cut */
if (*secured || is_root) {
fetch_rrset(&cut->trust_anchor, &ctx->cache, label,
fetch_secure_rrset(&cut->trust_anchor, &ctx->cache, label,
KNOT_RRTYPE_DS, cut->pool, timestamp);
fetch_rrset(&cut->key, &ctx->cache, label,
fetch_secure_rrset(&cut->key, &ctx->cache, label,
KNOT_RRTYPE_DNSKEY, cut->pool, timestamp);
}
update_cut_name(cut, label);
......
......@@ -51,4 +51,4 @@ Dependencies
^^^^^^^^^^^^
* `Nettle <https://www.lysator.liu.se/~nisse/nettle/>`_ required for HMAC-SHA256
* development version of `libknot (master branch) <https://gitlab.labs.nic.cz/labs/knot/tree/master>`_ for DNS cookies handling
......@@ -5,7 +5,7 @@ DNS64
The module for :rfc:`6147` DNS64 AAAA-from-A record synthesis, it is used to enable client-server communication between an IPv6-only client and an IPv4-only server. See the well written `introduction`_ in the PowerDNS documentation.
.. warning:: The module currently won't work well with query policies.
.. warning:: The module currently won't work well with policy.STUB.
.. tip:: The A record sub-requests will be DNSSEC secured, but the synthetic AAAA records can't be. Make sure the last mile between stub and resolver is secure to avoid spoofing.
......
......@@ -44,7 +44,7 @@ struct dnstap_data {
/*
* dt_pack packs the dnstap message for transport
* https://gitlab.labs.nic.cz/labs/knot/blob/master/src/contrib/dnstap/dnstap.c#L24
* https://gitlab.labs.nic.cz/knot/knot-dns/blob/master/src/contrib/dnstap/dnstap.c#L24
* */
uint8_t* dt_pack(const Dnstap__Dnstap *d, uint8_t **buf, size_t *sz)
{
......@@ -65,7 +65,7 @@ uint8_t* dt_pack(const Dnstap__Dnstap *d, uint8_t **buf, size_t *sz)
}
/* set_address fills in address detail in dnstap_message
* https://gitlab.labs.nic.cz/labs/knot/blob/master/src/contrib/dnstap/message.c#L28
* https://gitlab.labs.nic.cz/knot/knot-dns/blob/master/src/contrib/dnstap/message.c#L28
*/
static void set_address(const struct sockaddr *sockaddr,
ProtobufCBinaryData *addr,
......@@ -236,7 +236,7 @@ int dnstap_deinit(struct kr_module *module) {
}
/* dnstap_unix_writer returns a unix fstream writer
* https://gitlab.labs.nic.cz/labs/knot/blob/master/src/knot/modules/dnstap.c#L159
* https://gitlab.labs.nic.cz/knot/knot-dns/blob/master/src/knot/modules/dnstap.c#L159
*/
static struct fstrm_writer* dnstap_unix_writer(const char *path) {
......
......@@ -22,7 +22,7 @@ Examples
hints.root({
['j.root-servers.net.'] = { '2001:503:c27::2:30', '192.58.128.30' }
})
-- Set a custom hint
-- Add a custom hint
hints['foo.bar'] = '127.0.0.1'
.. note:: The ``policy`` module applies before ``hints``, meaning e.g. that hints for special names (:rfc:`6761#section-6`) like ``localhost`` or ``test`` will get shadowed by ``policy`` rules by default.
......@@ -60,6 +60,12 @@ Properties
Add a hostname - address pair hint.
.. note::
If multiple addresses have been added for a name, all are returned in a forward query.
If multiple names have been added to an address, the last one defined is returned
in a corresponding PTR query.
.. function:: hints.del(pair)
:param string pair: ``hostname address`` i.e. ``"localhost 127.0.0.1"``, or just ``hostname``
......
This diff is collapsed.
......@@ -3,7 +3,7 @@
Prefetching records
-------------------
The module tracks expiring records (having less than 5% of original TTL) and batches them for predict.
The module refreshes records that are about to expire when they're used (having less than 1% of original TTL).
This improves latency for frequently used records, as they are fetched in advance.
It is also able to learn usage patterns and repetitive queries that the server makes. For example, if
......@@ -29,6 +29,7 @@ Example configuration
Defaults are 15 minutes window, 6 hours period.
.. tip:: Use period 0 to turn off prediction and just do prefetching of expiring records.
That works even without the 'stats' module.
Exported metrics
^^^^^^^^^^^^^^^^
......@@ -47,4 +48,4 @@ Properties
Reconfigure the predictor to given tracking window and period length. Both parameters are optional.
Window length is in minutes, period is a number of windows that can be kept in memory.
e.g. if a ``window`` is 15 minutes, a ``period`` of "24" means 6 hours.
\ No newline at end of file
e.g. if a ``window`` is 15 minutes, a ``period`` of "24" means 6 hours.
......@@ -31,7 +31,7 @@ function predict.drain(ev)
local deleted = 0
for key, val in pairs(predict.queue) do
local qtype, qname = key:match('(%S*)%s(.*)')
worker.resolve(qname, kres.type[qtype], 1, kres.query.NO_CACHE)
worker.resolve(qname, kres.type[qtype], kres.class.IN, kres.query.NO_CACHE)
predict.queue[key] = nil
deleted = deleted + 1
if deleted >= predict.batch then
......@@ -54,7 +54,7 @@ local function enqueue(queries)
local nr_queries = #queries
for i = 1, nr_queries do
local entry = queries[i]
local key = string.format('%s %s', entry.stype, entry.name)
local key = string.format('%s %s', entry.type, entry.name)
if not predict.queue[key] then
predict.queue[key] = 1
queued = queued + 1
......@@ -63,44 +63,42 @@ local function enqueue(queries)
return queued
end
-- Prefetch soon-to-expire records
function predict.prefetch()
local queries = stats.expiring()
stats.clear_expiring()
return enqueue(queries)
-- Enqueue queries from same format as predict.queue or predict.log
local function enqueue_from_log(current)
if not current then return 0 end
queued = 0
for key, val in pairs(current) do
if val and not predict.queue[key] then
predict.queue[key] = val
queued = queued + 1
end
end
return queued
end
-- Sample current epoch, return number of sampled queries
function predict.sample(epoch_now)
if not epoch_now then return 0, 0 end
local current = predict.log[epoch_now] or {}
local queries = stats.frequent()
stats.clear_frequent()
local queued = 0
local current = predict.log[epoch_now]
if predict.epoch ~= epoch_now or current == nil then
if current ~= nil then
queued = enqueue(current)
end
current = {}
end
local nr_samples = #queries
for i = 1, nr_samples do
local entry = queries[i]
local key = string.format('%s %s', entry.stype, entry.name)
local key = string.format('%s %s', entry.type, entry.name)
current[key] = 1
end
predict.log[epoch_now] = current
return nr_samples, queued
return nr_samples
end
-- Predict queries for the upcoming epoch
local function generate(epoch_now)
if not epoch_now then return 0 end
local queued = 0
local period = predict.period + 1
for i = 1, predict.period / 2 - 1 do
local current = predict.log[(epoch_now - i) % period]
local past = predict.log[(epoch_now - 2*i) % period]
local current = predict.log[(epoch_now - i - 1) % predict.period + 1]
local past = predict.log[(epoch_now - 2*i - 1) % predict.period + 1]
if current and past then
for k, v in pairs(current) do
if past[k] ~= nil and not predict.queue[k] then
......@@ -114,19 +112,29 @@ local function generate(epoch_now)
end
function predict.process(ev)
if not stats then error("'stats' module required") end
if (predict.period or 0) ~= 0 and not stats then
error("'stats' module required")
end
-- Start a new epoch, or continue sampling
predict.ev_sample = nil
local epoch_now = current_epoch()
local nr_learned, nr_queued = predict.sample(epoch_now)
-- End of epoch, predict next
local nr_queued = 0
-- End of epoch
if predict.epoch ~= epoch_now then
stats['predict.epoch'] = epoch_now
predict.epoch = epoch_now
-- enqueue records from upcoming epoch
nr_queued = enqueue_from_log(predict.log[epoch_now])
-- predict next epoch
nr_queued = nr_queued + generate(epoch_now)
-- clear log for new epoch
predict.log[epoch_now] = {}
end
-- Prefetch expiring records
nr_queued = nr_queued + predict.prefetch()
-- Sample current epoch
local nr_learned = predict.sample(epoch_now)
-- Dispatch predicted queries
if nr_queued > 0 then
predict.queue_len = predict.queue_len + nr_queued
......@@ -136,8 +144,10 @@ function predict.process(ev)
end
end
predict.ev_sample = event.after(next_event(), predict.process)
stats['predict.queue'] = predict.queue_len
stats['predict.learned'] = nr_learned
if stats then
stats['predict.queue'] = predict.queue_len
stats['predict.learned'] = nr_learned
end
collectgarbage()
end
......@@ -169,4 +179,19 @@ function predict.config(config)
predict.init()
end
predict.layer = {
-- Prefetch all expiring (sub-)queries immediately after the request finishes.
-- Doing that immediately is simplest and avoids creating (new) large bursts of activity.
finish = function (state, req)
req = kres.request_t(req)
local qrys = req.rplan.resolved
for i = 0, (tonumber(qrys.len) - 1) do -- size_t doesn't work for some reason
local qry = qrys.at[i]
if bit.band(qry.flags, kres.query.EXPIRING) ~= 0 then
worker.resolve(kres.dname2str(qry.sname), qry.stype, qry.sclass, kres.query.NO_CACHE)
end
end
end
}
return predict
......@@ -20,7 +20,7 @@ CMD ["/usr/local/sbin/kresd"]
RUN \
apk --update add ${RUN_PKGS} && \
apk add --virtual build-dep ${BUILD_PKGS} && \
git clone https://gitlab.labs.nic.cz/knot/resolver.git /tmp/build && \
git clone https://gitlab.labs.nic.cz/knot/knot-resolver.git /tmp/build && \
cd /tmp/build && \
./scripts/bootstrap-depends.sh /usr/local && \
make -j4 install && \
......
......@@ -130,15 +130,11 @@ function pkg {
PIP_PKGS="dnspython==1.11 cpp-coveralls Jinja2"
if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
brew update
brew unlink python
brew install python
brew link --overwrite python || true
for p in python makedepend hiredis libmemcached protobuf-c cmocka jansson gnutls luajit libuv; do
for p in makedepend hiredis libmemcached protobuf-c cmocka jansson gnutls luajit libuv; do
echo "BEGIN $p";
brew install $p
echo "END $p";
done
pip install --upgrade pip || true
pip install ${PIP_PKGS}