Commit 9d5beac5 authored by Vladimír Čunát's avatar Vladimír Čunát

lru: new implementation and also interface

The implementation is now similar to set-associative caches
that x86 CPU use.  Also the API is changed a bit, leading to
slight simplification of our use patterns.
parent 6d8c2b0c
......@@ -5,7 +5,7 @@ include platform.mk
all: info lib daemon modules
install: lib-install daemon-install modules-install etc-install
check: all tests
clean: contrib-clean lib-clean daemon-clean modules-clean tests-clean doc-clean
clean: contrib-clean lib-clean daemon-clean modules-clean tests-clean doc-clean bench-clean
doc: doc-html
.PHONY: all install check clean doc info
......
......@@ -27,9 +27,9 @@ $(foreach bench,$(bench_BIN),$(eval $(call make_bench,$(bench))))
.PHONY: bench bench-clean
bench-clean: $(foreach bench,$(bench_BIN),$(bench)-clean)
bench: $(foreach bench,$(bench_BIN),bench/$(bench))
# Test LRU with increasing overfill, misses should increase ~ linearly
@./bench/bench_lru 22 bench/bench_lru_set1.tsv - 65536 # fill = 1
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 32768 # fill = 2
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 16384 # fill = 4
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 8192 # fill = 8
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 4096 # fill = 16
\ No newline at end of file
@echo "Test LRU with increasing overfill, misses should increase ~ linearly" >&2
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 65536 # fill ~ 1
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 32768 # fill ~ 2
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 16384 # fill ~ 4
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 8192 # fill ~ 8
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 4096 # fill ~ 16
......@@ -17,6 +17,7 @@
#include <math.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <sys/time.h>
#include <unistd.h>
......@@ -26,17 +27,25 @@
typedef kr_nsrep_lru_t lru_bench_t;
#define p_out(...) do { \
printf(__VA_ARGS__); \
fflush(stdout); \
} while (0)
#define p_err(...) fprintf(stderr, __VA_ARGS__)
static int die(const char *cause) {
static int die(const char *cause)
{
fprintf(stderr, "%s: %s\n", cause, strerror(errno));
exit(1);
}
static void time_get(struct timeval *tv) {
static void time_get(struct timeval *tv)
{
if (gettimeofday(tv, NULL))
die("gettimeofday");
}
static void time_print_diff(struct timeval *tv, size_t op_count) {
static void time_print_diff(struct timeval *tv, size_t op_count)
{
struct timeval now;
time_get(&now);
now.tv_sec -= tv->tv_sec;
......@@ -48,11 +57,16 @@ static void time_print_diff(struct timeval *tv, size_t op_count) {
size_t speed = round((double)(op_count) / 1000
/ (now.tv_sec + (double)(now.tv_usec)/1000000));
printf("\t%ld.%06d s, \t %zd kop/s\n", now.tv_sec, (int)now.tv_usec, speed);
p_out("%ld.%06d", now.tv_sec, (int)now.tv_usec);
p_err(" s"); p_out(","); p_err("\t");
p_out("%zd", speed);
p_err(" kops/s"); p_out(","); p_err("\n");
}
/// initialize seed for random()
static int ssrandom(char *s) {
static int ssrandom(char *s)
{
if (*s == '-') { // initialize from time
struct timeval now;
time_get(&now);
......@@ -75,7 +89,8 @@ struct key {
};
/// read lines from a file and reorder them randomly
static struct key * read_lines(const char *fname, size_t *count) {
static struct key * read_lines(const char *fname, size_t *count, char **pfree)
{
// read the file at once
int fd = open(fname, O_RDONLY);
if (fd < 0)
......@@ -85,6 +100,7 @@ static struct key * read_lines(const char *fname, size_t *count) {
die("stat");
size_t flen = (size_t)st.st_size;
char *fbuf = malloc(flen + 1);
*pfree = fbuf;
if (fbuf == NULL)
die("malloc");
if (read(fd, fbuf, flen) < 0)
......@@ -101,7 +117,11 @@ static struct key * read_lines(const char *fname, size_t *count) {
}
*count = lines;
size_t avg_len = (flen + 1) / lines - 1;
printf("%zu lines read, average length %zu\n", lines, avg_len);
p_err("lines read: ");
p_out("%zu,", lines);
p_err("\taverage length ");
p_out("%zu,", avg_len);
struct key *result = calloc(lines, sizeof(struct key));
result[0].chars = fbuf;
......@@ -137,26 +157,33 @@ static struct key * read_lines(const char *fname, size_t *count) {
#define lru_get_try lru_get
#endif
static void usage(const char *progname) {
fprintf(stderr, "usage: %s <log_count> <input> <seed> [lru_size]\n"
"The seed must be at least 12 characters or \"-\".\n" , progname);
static void usage(const char *progname)
{
p_err("usage: %s <log_count> <input> <seed> [lru_size]\n", progname);
p_err("The seed must be at least 12 characters or \"-\".\n"
"Standard output contains csv-formatted lines.\n");
exit(1);
}
int main(int argc, char ** argv) {
int main(int argc, char ** argv)
{
if (argc != 4 && argc != 5)
usage(argv[0]);
if (ssrandom(argv[3]) < 0)
usage(argv[0]);
p_out("\n");
size_t key_count;
struct key *keys = read_lines(argv[2], &key_count);
char *data_to_free = NULL;
struct key *keys = read_lines(argv[2], &key_count, &data_to_free);
size_t run_count;
{
size_t run_log = atoi(argv[1]);
assert(run_log < 64);
run_count = 1ULL << run_log;
printf("test run length: 2^%zd\n", run_log);
p_err("\ntest run length:\t2^");
p_out("%zd,", run_log);
}
struct timeval time;
......@@ -164,7 +191,7 @@ int main(int argc, char ** argv) {
lru_bench_t *lru;
#ifdef lru_create
lru_create(&lru, lru_size, NULL);
lru_create(&lru, lru_size, NULL, NULL);
#else
lru = malloc(lru_size(lru_bench_t, lru_size));
if (lru)
......@@ -172,12 +199,19 @@ int main(int argc, char ** argv) {
#endif
if (!lru)
die("malloc");
printf("LRU size:\t%d\n", lru_size);
p_err("\nLRU capacity:\t");
p_out("%d,",
#ifdef lru_capacity
lru_capacity(lru) // report real capacity, if provided
#else
lru_size
#endif
);
size_t miss = 0;
p_err("\nload everything:\t");
time_get(&time);
printf("load everything:");
for (size_t i = 0, ki = key_count; i < run_count; ++i, --ki) {
for (size_t i = 0, ki = key_count - 1; i < run_count; ++i, --ki) {
unsigned *r = lru_get_new(lru, keys[ki].chars, keys[ki].len);
if (!r || *r == 0)
++miss;
......@@ -187,12 +221,14 @@ int main(int argc, char ** argv) {
ki = key_count;
}
time_print_diff(&time, run_count);
printf("LRU misses:\t%zd%%\n", (miss * 100 + 50) / run_count);
p_err("LRU misses [%%]:\t");
p_out("%zd,",(miss * 100 + 50) / run_count);
p_err("\n");
unsigned accum = 0; // compute something to make sure compiler can't remove code
p_err("search everything:\t");
time_get(&time);
printf("search everything:");
for (size_t i = 0, ki = key_count; i < run_count; ++i, --ki) {
for (size_t i = 0, ki = key_count - 1; i < run_count; ++i, --ki) {
unsigned *r = lru_get_try(lru, keys[ki].chars, keys[ki].len);
if (r)
accum += *r;
......@@ -200,7 +236,14 @@ int main(int argc, char ** argv) {
ki = key_count;
}
time_print_diff(&time, run_count);
printf("ignore: %u\n", accum);
p_err("ignore: %u\n", accum);
// free memory, at least with new LRU
#ifdef lru_create
lru_free(lru);
#endif
free(keys);
free(data_to_free);
return 0;
}
......
......@@ -660,12 +660,9 @@ static int cache_clear(lua_State *L)
}
/* Clear reputation tables */
lru_deinit(engine->resolver.cache_rtt);
lru_deinit(engine->resolver.cache_rep);
lru_init(engine->resolver.cache_rtt, LRU_RTT_SIZE);
lru_init(engine->resolver.cache_rep, LRU_REP_SIZE);
lru_deinit(engine->resolver.cache_cookie);
lru_init(engine->resolver.cache_cookie, LRU_COOKIES_SIZE);
lru_reset(engine->resolver.cache_rtt);
lru_reset(engine->resolver.cache_rep);
lru_reset(engine->resolver.cache_cookie);
lua_pushboolean(L, true);
return 1;
}
......
......@@ -443,18 +443,9 @@ static int init_resolver(struct engine *engine)
kr_zonecut_init(&engine->resolver.root_hints, (const uint8_t *)"", engine->pool);
kr_zonecut_set_sbelt(&engine->resolver, &engine->resolver.root_hints);
/* Open NS rtt + reputation cache */
engine->resolver.cache_rtt = mm_alloc(engine->pool, lru_size(kr_nsrep_lru_t, LRU_RTT_SIZE));
if (engine->resolver.cache_rtt) {
lru_init(engine->resolver.cache_rtt, LRU_RTT_SIZE);
}
engine->resolver.cache_rep = mm_alloc(engine->pool, lru_size(kr_nsrep_lru_t, LRU_REP_SIZE));
if (engine->resolver.cache_rep) {
lru_init(engine->resolver.cache_rep, LRU_REP_SIZE);
}
engine->resolver.cache_cookie = mm_alloc(engine->pool, lru_size(kr_cookie_lru_t, LRU_COOKIES_SIZE));
if (engine->resolver.cache_cookie) {
lru_init(engine->resolver.cache_cookie, LRU_COOKIES_SIZE);
}
lru_create(&engine->resolver.cache_rtt, LRU_RTT_SIZE, engine->pool, NULL);
lru_create(&engine->resolver.cache_rep, LRU_REP_SIZE, engine->pool, NULL);
lru_create(&engine->resolver.cache_cookie, LRU_COOKIES_SIZE, engine->pool, NULL);
/* Load basic modules */
engine_register(engine, "iterate", NULL, NULL);
......@@ -507,20 +498,17 @@ static int init_state(struct engine *engine)
return kr_ok();
}
static enum lru_apply_do update_stat_item(const char *key, uint len,
unsigned *rtt, void *baton)
{
return *rtt > KR_NS_LONG ? LRU_APPLY_DO_EVICT : LRU_APPLY_DO_NOTHING;
}
/** @internal Walk RTT table, clearing all entries with bad score
* to compensate for intermittent network issues or temporary bad behaviour. */
static void update_state(uv_timer_t *handle)
{
struct engine *engine = handle->data;
/* Walk RTT table, clearing all entries with bad score
* to compensate for intermittent network issues or temporary bad behaviour. */
kr_nsrep_lru_t *table = engine->resolver.cache_rtt;
for (size_t i = 0; i < table->size; ++i) {
if (!table->slots[i].key)
continue;
if (table->slots[i].data > KR_NS_LONG) {
lru_evict(table, i);
}
}
lru_apply(engine->resolver.cache_rtt, update_stat_item, NULL);
}
int engine_init(struct engine *engine, knot_mm_t *pool)
......@@ -573,9 +561,11 @@ void engine_deinit(struct engine *engine)
network_deinit(&engine->net);
kr_zonecut_deinit(&engine->resolver.root_hints);
kr_cache_close(&engine->resolver.cache);
lru_deinit(engine->resolver.cache_rtt);
lru_deinit(engine->resolver.cache_rep);
lru_deinit(engine->resolver.cache_cookie);
/* The lru keys are currently malloc-ated and need to be freed. */
lru_free(engine->resolver.cache_rtt);
lru_free(engine->resolver.cache_rep);
lru_free(engine->resolver.cache_cookie);
/* Clear IPC pipes */
for (size_t i = 0; i < engine->ipc_set.len; ++i) {
......
......@@ -33,7 +33,7 @@ const uint8_t *kr_cookie_lru_get(kr_cookie_lru_t *cache,
return NULL;
}
struct cookie_opt_data *cached = lru_get(cache, addr, addr_len);
struct cookie_opt_data *cached = lru_get_try(cache, addr, addr_len);
return cached ? cached->opt_data : NULL;
}
......@@ -61,7 +61,7 @@ int kr_cookie_lru_set(kr_cookie_lru_t *cache, const struct sockaddr *sa,
return kr_error(EINVAL);
}
struct cookie_opt_data *cached = lru_set(cache, addr, addr_len);
struct cookie_opt_data *cached = lru_get_new(cache, addr, addr_len);
if (!cached) {
return kr_error(ENOMEM);
}
......
......@@ -43,7 +43,7 @@ struct cookie_opt_data {
/**
* DNS cookies tracking.
*/
typedef lru_hash(struct cookie_opt_data) kr_cookie_lru_t;
typedef lru_t(struct cookie_opt_data) kr_cookie_lru_t;
/**
* @brief Obtain LRU cache entry.
......
/* Copyright (C) 2016 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "lib/generic/lru.h"
#include "contrib/murmurhash3/murmurhash3.h"
typedef struct lru_group lru_group_t;
struct lru_item {
uint16_t key_len, val_len; /**< Two bytes should be enough for our purposes. */
char data[]; /**< Place for both key and value. */
};
/** @internal Compute offset of value in struct lru_item. */
static uint val_offset(uint key_len)
{
uint key_end = offsetof(struct lru_item, data) + key_len;
// align it to the closest multiple of four
return round_power(key_end, 2);
}
/** @internal Return pointer to value in an item. */
static void * item_val(struct lru_item *it)
{
return it->data + val_offset(it->key_len) - offsetof(struct lru_item, data);
}
/** @internal Compute the size of an item. ATM we don't align/pad the end of it. */
static uint item_size(uint key_len, uint val_len)
{
return val_offset(key_len) + val_len;
}
/** @internal Free each item. */
KR_EXPORT void lru_free_items_impl(struct lru *lru)
{
assert(lru);
for (size_t i = 0; i < (1 << (size_t)lru->log_groups); ++i) {
lru_group_t *g = &lru->groups[i];
for (int j = 0; j < LRU_ASSOC; ++j)
mm_free(lru->mm, g->items[j]);
}
}
/** @internal See lru_apply. */
KR_EXPORT void lru_apply_impl(struct lru *lru, lru_apply_fun f, void *baton)
{
bool ok = lru && f;
if (!ok) {
assert(false);
return;
}
for (size_t i = 0; i < (1 << (size_t)lru->log_groups); ++i) {
lru_group_t *g = &lru->groups[i];
for (uint j = 0; j < LRU_ASSOC; ++j) {
struct lru_item *it = g->items[j];
if (!it)
continue;
enum lru_apply_do ret =
f(it->data, it->key_len, item_val(it), baton);
switch(ret) {
case LRU_APPLY_DO_EVICT: // evict
mm_free(lru->mm, it);
g->items[j] = NULL;
g->counts[j] = 0;
g->hashes[j] = 0;
break;
default:
assert(ret == LRU_APPLY_DO_NOTHING);
}
}
}
}
/** @internal See lru_create. */
KR_EXPORT struct lru * lru_create_impl(uint max_slots, knot_mm_t *mm_array, knot_mm_t *mm)
{
assert(max_slots);
if (!max_slots)
return NULL;
// let lru->log_groups = ceil(log2(max_slots / (float) assoc))
// without trying for efficiency
uint group_count = (max_slots - 1) / LRU_ASSOC + 1;
uint log_groups = 0;
for (uint s = group_count - 1; s; s /= 2)
++log_groups;
group_count = 1 << log_groups;
assert(max_slots <= group_count * LRU_ASSOC && group_count * LRU_ASSOC < 2 * max_slots);
size_t size = offsetof(struct lru, groups[group_count]);
struct lru *lru = mm_alloc(mm_array, size);
if (unlikely(lru == NULL))
return NULL;
*lru = (struct lru){
.mm = mm,
.mm_array = mm_array,
.log_groups = log_groups,
};
// zeros are a good init
memset(lru->groups, 0, size - offsetof(struct lru, groups));
return lru;
}
/** @internal Decrement all counters within a group. */
static void group_dec_counts(lru_group_t *g) {
g->counts[LRU_TRACKED] = LRU_TRACKED;
for (uint i = 0; i < LRU_TRACKED + 1; ++i)
if (likely(g->counts[i]))
--g->counts[i];
}
/** @internal Increment a counter within a group. */
static void group_inc_count(lru_group_t *g, int i) {
if (likely(++(g->counts[i])))
return;
g->counts[i] = -1;
// We could've decreased or halved all of them, but let's keep the max.
}
/** @internal Implementation of both getting and insertion.
* Note: val_len is only meaningful if do_insert. */
KR_EXPORT void * lru_get_impl(struct lru *lru, const char *key, uint key_len,
uint val_len, bool do_insert)
{
bool ok = lru && (key || !key_len) && key_len <= UINT16_MAX
&& (!do_insert || val_len <= UINT16_MAX);
if (!ok) {
assert(false);
return NULL; // reasonable fallback when not debugging
}
// find the right group
uint32_t khash = hash(key, key_len);
uint16_t khash_top = khash >> 16;
lru_group_t *g = &lru->groups[khash & ((1 << lru->log_groups) - 1)];
struct lru_item *it = NULL;
uint i;
// scan the *stored* elements in the group
for (i = 0; i < LRU_ASSOC; ++i) {
if (g->hashes[i] == khash_top) {
it = g->items[i];
if (likely(it && it->key_len == key_len
&& memcmp(it->data, key, key_len) == 0))
goto found; // to reduce huge nesting depth
}
}
// key not found; first try an empty/counted-out place to insert
if (do_insert)
for (i = 0; i < LRU_ASSOC; ++i)
if (g->items[i] == NULL || g->counts[i] == 0)
goto insert;
// check if we track key's count at least
for (i = LRU_ASSOC; i < LRU_TRACKED; ++i) {
if (g->hashes[i] == khash_top) {
group_inc_count(g, i);
if (!do_insert)
return NULL;
// check if we trumped some stored key
for (uint j = 0; j < LRU_ASSOC; ++j)
if (unlikely(g->counts[i] > g->counts[j])) {
// evict key j, i.e. swap with i
--g->counts[i]; // we increment it below
SWAP(g->counts[i], g->counts[j]);
SWAP(g->hashes[i], g->hashes[j]);
i = j;
goto insert;
}
return NULL;
}
}
// not found at all: decrement all counts but only on every LRU_TRACKED occasion
if (g->counts[LRU_TRACKED])
--g->counts[LRU_TRACKED];
else
group_dec_counts(g);
return NULL;
insert: // insert into position i (incl. key)
assert(i < LRU_ASSOC);
g->hashes[i] = khash_top;
it = g->items[i];
uint new_size = item_size(key_len, val_len);
if (it == NULL || new_size != item_size(it->key_len, it->val_len)) {
// (re)allocate
mm_free(lru->mm, it);
it = g->items[i] = mm_alloc(lru->mm, new_size);
if (it == NULL)
return NULL;
}
it->key_len = key_len;
it->val_len = val_len;
memcpy(it->data, key, key_len);
memset(item_val(it), 0, val_len); // clear the value
found: // key and hash OK on g->items[i]; now update stamps
assert(i < LRU_ASSOC);
group_inc_count(g, i);
return item_val(g->items[i]);
}
This diff is collapsed.
libkres_SOURCES := \
lib/generic/lru.c \
lib/generic/map.c \
lib/layer/iterate.c \
lib/layer/validate.c \
......@@ -20,6 +21,7 @@ libkres_SOURCES := \
libkres_HEADERS := \
lib/generic/array.h \
lib/generic/lru.h \
lib/generic/map.h \
lib/generic/set.h \
lib/layer.h \
......
......@@ -98,7 +98,7 @@ static unsigned eval_addr_set(pack_t *addr_set, kr_nsrep_lru_t *rttcache, unsign
}
/* Get RTT for this address (if known) */
if (is_valid) {
unsigned *cached = rttcache ? lru_get(rttcache, val, len) : NULL;
unsigned *cached = rttcache ? lru_get_try(rttcache, val, len) : NULL;
unsigned addr_score = (cached) ? *cached : KR_NS_GLUED;
if (addr_score < score + favour) {
/* Shake down previous contenders */
......@@ -124,7 +124,8 @@ static int eval_nsrep(const char *k, void *v, void *baton)
/* Fetch NS reputation */
if (ctx->cache_rep) {
unsigned *cached = lru_get(ctx->cache_rep, k, knot_dname_size((const uint8_t *)k));
unsigned *cached = lru_get_try(ctx->cache_rep, k,
knot_dname_size((const uint8_t *)k));
if (cached) {
reputation = *cached;
}
......@@ -188,7 +189,9 @@ int kr_nsrep_set(struct kr_query *qry, size_t index, uint8_t *addr, size_t addr_
/* Retrieve RTT from cache */
if (addr && addr_len > 0) {
struct kr_context *ctx = qry->ns.ctx;
unsigned *score = ctx ? lru_get(ctx->cache_rtt, (const char *)addr, addr_len) : NULL;
unsigned *score = ctx
? lru_get_try(ctx->cache_rtt, (const char *)addr, addr_len)
: NULL;
if (score) {
qry->ns.score = MIN(qry->ns.score, *score);
}
......@@ -255,7 +258,7 @@ int kr_nsrep_update_rtt(struct kr_nsrep *ns, const struct sockaddr *addr,
addr_len = sizeof(struct in6_addr);
}
}
unsigned *cur = lru_set(cache, addr_in, addr_len);
unsigned *cur = lru_get_new(cache, addr_in, addr_len);
if (!cur) {
return kr_error(ENOMEM);
}
......@@ -290,7 +293,7 @@ int kr_nsrep_update_rep(struct kr_nsrep *ns, unsigned reputation, kr_nsrep_lru_t
/* Store in the struct */
ns->reputation = reputation;
/* Store reputation in the LRU cache */
unsigned *cur = lru_set(cache, (const char *)ns->name, knot_dname_size(ns->name));
unsigned *cur = lru_get_new(cache, (const char *)ns->name, knot_dname_size(ns->name));
if (!cur) {
return kr_error(ENOMEM);
}
......
......@@ -62,7 +62,7 @@ enum kr_ns_update_mode {
/**
* NS reputation/QoS tracking.
*/
typedef lru_hash(unsigned) kr_nsrep_lru_t;
typedef lru_t(unsigned) kr_nsrep_lru_t;
/* Maximum count of addresses probed in one go (last is left empty) */
#define KR_NSREP_MAXADDR 4
......
......@@ -16,6 +16,7 @@
#pragma once
#include <assert.h>
#include <stdio.h>
#include <stdbool.h>
#include <sys/time.h>
......@@ -44,6 +45,11 @@ KR_EXPORT void kr_log_debug(const char *fmt, ...);
#define WITH_DEBUG if(0)
#endif
/* C11 compatibility, but without any implementation so far. */
#ifndef static_assert
#define static_assert(cond, msg)
#endif
/** @cond Memory alloc routines */
static inline void *mm_alloc(knot_mm_t *mm, size_t size)
{
......
......@@ -341,7 +341,8 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut, const knot_d
const knot_dname_t *ns_name = knot_ns_name(&rr_copy.rrs, i);
kr_zonecut_add(cut, ns_name, NULL);
/* Fetch NS reputation and decide whether to prefetch A/AAAA records. */
unsigned *cached = lru_get(ctx->cache_rep, (const char *)ns_name, knot_dname_size(ns_name));
unsigned *cached = lru_get_try(ctx->cache_rep,
(const char *)ns_name, knot_dname_size(ns_name));
unsigned reputation = (cached) ? *cached : 0;
if (!(reputation & KR_NS_NOIP4) && !(ctx->options & QUERY_NO_IPV4)) {
fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_A, timestamp);
......
......@@ -76,7 +76,7 @@ static struct const_metric_elm const_metrics[] = {
/** @endcond */
/** @internal LRU hash of most frequent names. */
typedef lru_hash(unsigned) namehash_t;
typedef lru_t(unsigned) namehash_t;
typedef array_t(struct sockaddr_in6) addrlist_t;
/** @internal Stats data structure. */
......@@ -142,12 +142,12 @@ static void collect_sample(struct stat_data *data, struct kr_rplan *rplan, knot_
}
int key_len = collect_key(key, qry->sname, qry->stype);
if (qry->flags & QUERY_EXPIRING) {
unsigned *count = lru_set(data->queries.expiring, key, key_len);
unsigned *count = lru_get_new(data->queries.expiring, key, key_len);
if (count)
*count += 1;
/* Consider 1 in N for frequent sampling. */
} else if (kr_rand_uint(FREQUENT_PSAMPLE) <= 1) {
unsigned *count = lru_set(data->queries.frequent, key, key_len);
unsigned *count = lru_get_new(data->queries.frequent, key, key_len);
if (count)
*count += 1;
}
......@@ -325,6 +325,23 @@ static char* stats_list(void *env, struct kr_module *module, const char *args)
return ret;
}
/** @internal Helper for dump_list: add a single namehash_t item to JSON. */
static enum lru_apply_do dump_value(const char *key, uint len, unsigned *val, void *baton)
{
uint16_t key_type = 0;
char key_name[KNOT_DNAME_MAXLEN], type_str[16];
/* Extract query name, type and counter */
memcpy(&key_type, key, sizeof(key_type));
knot_dname_to_str(key_name, (uint8_t *)key + sizeof(key_type), sizeof(key_name));
knot_rrtype_to_string(key_type, type_str, sizeof(type_str));
/* Convert to JSON object */
JsonNode *json_val = json_mkobject();
json_append_member(json_val, "count", json_mknumber(*val));
json_append_member(json_val, "name", json_mkstring(key_name));
json_append_member(json_val, "type", json_mkstring(type_str));
json_append_element((JsonNode *)baton, json_val);
return LRU_APPLY_DO_NOTHING; // keep the item
}
/**
* List frequent names.
*
......@@ -335,25 +352,8 @@ static char* dump_list(void *env, struct kr_module *module, const char *args, na
if (!table) {
return NULL;
}
uint16_t key_type = 0;
char key_name[KNOT_DNAME_MAXLEN], type_str[16];
JsonNode *root = json_mkarray();
for (unsigned i = 0; i < table->size; ++i) {
struct lru_slot *slot = lru_slot_at((struct lru_hash_base *)table, i);
if (slot->key) {
/* Extract query name, type and counter */
memcpy(&key_type, slot->key, sizeof(key_type));
knot_dname_to_str(key_name, (uint8_t *)slot->key + sizeof(key_type), sizeof(key_name));
knot_rrtype_to_string(key_type, type_str, sizeof(type_str));
unsigned *slot_val = lru_slot_val(slot, lru_slot_offset(table));
/* Convert to JSON object */
JsonNode *json_val = json_mkobject();
json_append_member(json_val, "count", json_mknumber(*slot_val));
json_append_member(json_val, "name", json_mkstring(key_name));
json_append_member(json_val, "type", json_mkstring(type_str));
json_append_element(root, json_val);
}
}
lru_apply(table, dump_value, root);
char *ret = json_encode(root);
json_delete(root);
return ret;
......@@ -368,8 +368,7 @@ static char* dump_frequent(void *env, struct kr_module *module, const char *args
static char* clear_frequent(void *env, struct kr_module *module, const char *args)
{
struct stat_data *data = module->data;
lru_deinit(data->queries.frequent);
lru_init(data->queries.frequent, FREQUENT_COUNT);
lru_reset(data->queries.frequent);
return NULL;
}
......@@ -382,8 +381,7 @@ static char* dump_expiring(void *env, struct kr_module *module, const char *args
static char* clear_expiring(void *env, struct kr_module *module, const char *args)
{
struct stat_data *data = module->data;
lru_deinit(data->queries.expiring);
lru_init(data->queries.expiring, FREQUENT_COUNT);
lru_reset(data->queries.expiring);
return NULL;
}