Commit b655c2f9 authored by Daniel Salzman's avatar Daniel Salzman

Merge branch 'rrl_refactor' into 'master'

Module RRL refactoring

See merge request !993
parents 1abc2d8d 0eb7d6db
/* Copyright (C) 2018 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
/* Copyright (C) 2019 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -25,11 +25,11 @@
/* Hopscotch defines. */
#define HOP_LEN (sizeof(unsigned)*8)
/* Limits */
#define RRL_CLSBLK_MAXLEN (4 + 8 + 1 + 256)
/* Limits (class, ipv6 remote, dname) */
#define RRL_CLSBLK_MAXLEN (1 + 8 + 255)
/* CIDR block prefix lengths for v4/v6 */
#define RRL_V4_PREFIX ((uint32_t)0x00ffffff) /* /24 */
#define RRL_V6_PREFIX ((uint64_t)0x00ffffffffffffff) /* /56 */
#define RRL_V4_PREFIX_LEN 3 /* /24 */
#define RRL_V6_PREFIX_LEN 7 /* /56 */
/* Defaults */
#define RRL_SSTART 2 /* 1/Nth of the rate for slow start */
#define RRL_PSIZE_LARGE 1024
......@@ -90,7 +90,7 @@ static uint8_t rrl_clsid(rrl_req_t *p)
{
/* Check error code */
int ret = CLS_NULL;
switch (knot_wire_get_rcode(p->w)) {
switch (knot_wire_get_rcode(p->wire)) {
case KNOT_RCODE_NOERROR: ret = CLS_NORMAL; break;
case KNOT_RCODE_NXDOMAIN: return CLS_NXDOMAIN; break;
default: return CLS_ERROR; break;
......@@ -123,7 +123,7 @@ static uint8_t rrl_clsid(rrl_req_t *p)
}
/* Check ancount */
if (knot_wire_get_ancount(p->w) == 0) {
if (knot_wire_get_ancount(p->wire) == 0) {
return CLS_EMPTY;
}
......@@ -155,7 +155,7 @@ static int rrl_clsname(uint8_t *dst, size_t maxlen, uint8_t cls, rrl_req_t *req,
return knot_dname_to_wire(dst, name, maxlen);
}
static int rrl_classify(uint8_t *dst, size_t maxlen, const struct sockaddr_storage *a,
static int rrl_classify(uint8_t *dst, size_t maxlen, const struct sockaddr_storage *remote,
rrl_req_t *req, const knot_dname_t *name)
{
/* Class */
......@@ -164,110 +164,101 @@ static int rrl_classify(uint8_t *dst, size_t maxlen, const struct sockaddr_stora
int blklen = sizeof(cls);
/* Address (in network byteorder, adjust masks). */
uint64_t nb = 0;
if (a->ss_family == AF_INET6) {
struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)a;
nb = *((uint64_t *)(&ipv6->sin6_addr)) & RRL_V6_PREFIX;
uint64_t netblk = 0;
if (remote->ss_family == AF_INET6) {
struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)remote;
memcpy(&netblk, &ipv6->sin6_addr, RRL_V6_PREFIX_LEN);
} else {
struct sockaddr_in *ipv4 = (struct sockaddr_in *)a;
nb = ((uint32_t)ipv4->sin_addr.s_addr) & RRL_V4_PREFIX;
struct sockaddr_in *ipv4 = (struct sockaddr_in *)remote;
memcpy(&netblk, &ipv4->sin_addr, RRL_V4_PREFIX_LEN);
}
if (blklen + sizeof(nb) > maxlen) {
return KNOT_ESPACE;
}
memcpy(dst + blklen, (void *)&nb, sizeof(nb));
blklen += sizeof(nb);
memcpy(dst + blklen, &netblk, sizeof(netblk));
blklen += sizeof(netblk);
/* Name */
uint16_t *len_pos = (uint16_t *)(dst + blklen);
blklen += sizeof(uint16_t);
int ret = rrl_clsname(dst + blklen, maxlen - blklen, cls, req, name);
if (ret < 0) {
return ret;
}
uint16_t len = ret;
memcpy(len_pos, &len, sizeof(len));
uint8_t len = ret;
blklen += len;
return blklen;
}
static int bucket_free(rrl_item_t *b, uint32_t now)
static int bucket_free(rrl_item_t *bucket, uint32_t now)
{
return b->cls == CLS_NULL || (b->time + 1 < now);
return bucket->cls == CLS_NULL || (bucket->time + 1 < now);
}
static int bucket_match(rrl_item_t *b, rrl_item_t *m)
static int bucket_match(rrl_item_t *bucket, rrl_item_t *match)
{
return b->cls == m->cls &&
b->netblk == m->netblk &&
b->qname == m->qname;
return bucket->cls == match->cls &&
bucket->netblk == match->netblk &&
bucket->qname == match->qname;
}
static int find_free(rrl_table_t *t, unsigned i, uint32_t now)
static int find_free(rrl_table_t *tbl, unsigned id, uint32_t now)
{
rrl_item_t *np = t->arr + t->size;
rrl_item_t *b = NULL;
for (b = t->arr + i; b != np; ++b) {
if (bucket_free(b, now)) {
return b - (t->arr + i);
for (int i = id; i < tbl->size; i++) {
if (bucket_free(&tbl->arr[i], now)) {
return i - id;
}
}
np = t->arr + i;
for (b = t->arr; b != np; ++b) {
if (bucket_free(b, now)) {
return (b - t->arr) + (t->size - i);
for (int i = 0; i < id; i++) {
if (bucket_free(&tbl->arr[i], now)) {
return i + (tbl->size - id);
}
}
/* this happens if table is full... force vacate current elm */
return i;
return id;
}
static inline unsigned find_match(rrl_table_t *t, uint32_t id, rrl_item_t *m)
static inline unsigned find_match(rrl_table_t *tbl, uint32_t id, rrl_item_t *m)
{
unsigned f = 0;
unsigned d = 0;
unsigned match = t->arr[id].hop;
while (match != 0) {
d = __builtin_ctz(match);
f = (id + d) % t->size;
if (bucket_match(t->arr + f, m)) {
return d;
unsigned new_id = 0;
unsigned hop = 0;
unsigned match_bitmap = tbl->arr[id].hop;
while (match_bitmap != 0) {
hop = __builtin_ctz(match_bitmap); /* offset of next potential match */
new_id = (id + hop) % tbl->size;
if (bucket_match(&tbl->arr[new_id], m)) {
return hop;
} else {
match &= ~(1<<d); /* clear potential match */
match_bitmap &= ~(1 << hop); /* clear potential match */
}
}
return HOP_LEN + 1;
}
static inline unsigned reduce_dist(rrl_table_t *t, unsigned id, unsigned d, unsigned *f)
static inline unsigned reduce_dist(rrl_table_t *tbl, unsigned id, unsigned dist, unsigned *free_id)
{
unsigned rd = HOP_LEN - 1;
while (rd > 0) {
unsigned s = (t->size + *f - rd) % t->size; /* bucket to be vacated */
if (t->arr[s].hop != 0) {
unsigned o = __builtin_ctz(t->arr[s].hop); /* offset of first valid bucket */
if (o < rd) { /* only offsets in <s, f> are interesting */
unsigned e = (s + o) % t->size; /* this item will be displaced to [f] */
unsigned keep_hop = t->arr[*f].hop; /* unpredictable padding */
memcpy(t->arr + *f, t->arr + e, sizeof(rrl_item_t));
t->arr[*f].hop = keep_hop;
t->arr[e].cls = CLS_NULL;
t->arr[s].hop &= ~(1<<o);
t->arr[s].hop |= 1<<rd;
*f = e;
return d - (rd - o);
unsigned vacate_id = (tbl->size + *free_id - rd) % tbl->size; /* bucket to be vacated */
if (tbl->arr[vacate_id].hop != 0) {
unsigned hop = __builtin_ctz(tbl->arr[vacate_id].hop); /* offset of first valid bucket */
if (hop < rd) { /* only offsets in <vacate_id, free_id> are interesting */
unsigned new_id = (vacate_id + hop) % tbl->size; /* this item will be displaced to [free_id] */
unsigned keep_hop = tbl->arr[*free_id].hop; /* unpredictable padding */
memcpy(tbl->arr + *free_id, tbl->arr + new_id, sizeof(rrl_item_t));
tbl->arr[*free_id].hop = keep_hop;
tbl->arr[new_id].cls = CLS_NULL;
tbl->arr[vacate_id].hop &= ~(1 << hop);
tbl->arr[vacate_id].hop |= 1 << rd;
*free_id = new_id;
return dist - (rd - hop);
}
}
--rd;
}
assert(rd == 0); /* this happens with p=1/fact(HOP_LEN) */
*f = id;
d = 0; /* force vacate initial element */
return d;
*free_id = id;
dist = 0; /* force vacate initial element */
return dist;
}
static void rrl_log_state(knotd_mod_t *mod, const struct sockaddr_storage *ss,
......@@ -289,49 +280,49 @@ static void rrl_log_state(knotd_mod_t *mod, const struct sockaddr_storage *ss,
addr_str, rrl_clsstr(cls), what);
}
static void rrl_lock(rrl_table_t *t, int lk_id)
static void rrl_lock(rrl_table_t *tbl, int lk_id)
{
assert(lk_id > -1);
pthread_mutex_lock(t->lk + lk_id);
pthread_mutex_lock(tbl->lk + lk_id);
}
static void rrl_unlock(rrl_table_t *t, int lk_id)
static void rrl_unlock(rrl_table_t *tbl, int lk_id)
{
assert(lk_id > -1);
pthread_mutex_unlock(t->lk + lk_id);
pthread_mutex_unlock(tbl->lk + lk_id);
}
static int rrl_setlocks(rrl_table_t *rrl, uint32_t granularity)
static int rrl_setlocks(rrl_table_t *tbl, uint32_t granularity)
{
assert(!rrl->lk); /* Cannot change while locks are used. */
assert(granularity <= rrl->size / 10); /* Due to int. division err. */
assert(!tbl->lk); /* Cannot change while locks are used. */
assert(granularity <= tbl->size / 10); /* Due to int. division err. */
if (pthread_mutex_init(&rrl->ll, NULL) < 0) {
if (pthread_mutex_init(&tbl->ll, NULL) < 0) {
return KNOT_ENOMEM;
}
/* Alloc new locks. */
rrl->lk = malloc(granularity * sizeof(pthread_mutex_t));
if (!rrl->lk) {
tbl->lk = malloc(granularity * sizeof(pthread_mutex_t));
if (!tbl->lk) {
return KNOT_ENOMEM;
}
memset(rrl->lk, 0, granularity * sizeof(pthread_mutex_t));
memset(tbl->lk, 0, granularity * sizeof(pthread_mutex_t));
/* Initialize. */
for (size_t i = 0; i < granularity; ++i) {
if (pthread_mutex_init(rrl->lk + i, NULL) < 0) {
if (pthread_mutex_init(tbl->lk + i, NULL) < 0) {
break;
}
++rrl->lk_count;
++tbl->lk_count;
}
/* Incomplete initialization */
if (rrl->lk_count != granularity) {
for (size_t i = 0; i < rrl->lk_count; ++i) {
pthread_mutex_destroy(rrl->lk + i);
if (tbl->lk_count != granularity) {
for (size_t i = 0; i < tbl->lk_count; ++i) {
pthread_mutex_destroy(tbl->lk + i);
}
free(rrl->lk);
rrl->lk_count = 0;
free(tbl->lk);
tbl->lk_count = 0;
return KNOT_ERROR;
}
......@@ -345,100 +336,100 @@ rrl_table_t *rrl_create(size_t size, uint32_t rate)
}
const size_t tbl_len = sizeof(rrl_table_t) + size * sizeof(rrl_item_t);
rrl_table_t *t = calloc(1, tbl_len);
if (!t) {
rrl_table_t *tbl = calloc(1, tbl_len);
if (!tbl) {
return NULL;
}
t->size = size;
t->rate = rate;
tbl->size = size;
tbl->rate = rate;
if (dnssec_random_buffer((uint8_t *)&t->key, sizeof(t->key)) != DNSSEC_EOK) {
free(t);
if (dnssec_random_buffer((uint8_t *)&tbl->key, sizeof(tbl->key)) != DNSSEC_EOK) {
free(tbl);
return NULL;
}
if (rrl_setlocks(t, RRL_LOCK_GRANULARITY) != KNOT_EOK) {
free(t);
if (rrl_setlocks(tbl, RRL_LOCK_GRANULARITY) != KNOT_EOK) {
free(tbl);
return NULL;
}
return t;
return tbl;
}
/*! \brief Get bucket for current combination of parameters. */
static rrl_item_t *rrl_hash(rrl_table_t *t, const struct sockaddr_storage *a,
static rrl_item_t *rrl_hash(rrl_table_t *tbl, const struct sockaddr_storage *remote,
rrl_req_t *req, const knot_dname_t *zone, uint32_t stamp,
int *lock)
{
uint8_t buf[RRL_CLSBLK_MAXLEN];
int len = rrl_classify(buf, sizeof(buf), a, req, zone);
int len = rrl_classify(buf, sizeof(buf), remote, req, zone);
if (len < 0) {
return NULL;
}
uint32_t id = SipHash24(&t->key, buf, len) % t->size;
uint32_t id = SipHash24(&tbl->key, buf, len) % tbl->size;
/* Lock for lookup. */
pthread_mutex_lock(&t->ll);
pthread_mutex_lock(&tbl->ll);
/* Find an exact match in <id, id + HOP_LEN). */
uint8_t *qname = buf + sizeof(uint8_t) + sizeof(uint64_t);
knot_dname_t *qname = buf + sizeof(uint8_t) + sizeof(uint64_t);
uint64_t netblk;
memcpy(&netblk, buf + sizeof(uint8_t), sizeof(netblk));
rrl_item_t match = {
.hop = 0,
.netblk = netblk,
.ntok = t->rate * RRL_CAPACITY,
.ntok = tbl->rate * RRL_CAPACITY,
.cls = buf[0],
.flags = RRL_BF_NULL,
.qname = SipHash24(&t->key, qname + 1, qname[0]),
.qname = SipHash24(&tbl->key, qname, knot_dname_size(qname)),
.time = stamp
};
unsigned d = find_match(t, id, &match);
if (d > HOP_LEN) { /* not an exact match, find free element [f] */
d = find_free(t, id, stamp);
unsigned dist = find_match(tbl, id, &match);
if (dist > HOP_LEN) { /* not an exact match, find free element [f] */
dist = find_free(tbl, id, stamp);
}
/* Reduce distance to fit <id, id + HOP_LEN) */
unsigned f = (id + d) % t->size;
while (d >= HOP_LEN) {
d = reduce_dist(t, id, d, &f);
unsigned free_id = (id + dist) % tbl->size;
while (dist >= HOP_LEN) {
dist = reduce_dist(tbl, id, dist, &free_id);
}
/* Assign granular lock and unlock lookup. */
*lock = f % t->lk_count;
rrl_lock(t, *lock);
pthread_mutex_unlock(&t->ll);
*lock = free_id % tbl->lk_count;
rrl_lock(tbl, *lock);
pthread_mutex_unlock(&tbl->ll);
/* found free elm 'k' which is in <id, id + HOP_LEN) */
t->arr[id].hop |= (1 << d);
rrl_item_t *b = t->arr + f;
assert(f == (id+d) % t->size);
/* found free bucket which is in <id, id + HOP_LEN) */
tbl->arr[id].hop |= (1 << dist);
rrl_item_t *bucket = &tbl->arr[free_id];
assert(free_id == (id + dist) % tbl->size);
/* Inspect bucket state. */
unsigned hop = b->hop;
if (b->cls == CLS_NULL) {
memcpy(b, &match, sizeof(rrl_item_t));
b->hop = hop;
unsigned hop = bucket->hop;
if (bucket->cls == CLS_NULL) {
memcpy(bucket, &match, sizeof(rrl_item_t));
bucket->hop = hop;
}
/* Check for collisions. */
if (!bucket_match(b, &match)) {
if (!(b->flags & RRL_BF_SSTART)) {
memcpy(b, &match, sizeof(rrl_item_t));
b->hop = hop;
b->ntok = t->rate + t->rate / RRL_SSTART;
b->flags |= RRL_BF_SSTART;
if (!bucket_match(bucket, &match)) {
if (!(bucket->flags & RRL_BF_SSTART)) {
memcpy(bucket, &match, sizeof(rrl_item_t));
bucket->hop = hop;
bucket->ntok = tbl->rate + tbl->rate / RRL_SSTART;
bucket->flags |= RRL_BF_SSTART;
}
}
return b;
return bucket;
}
int rrl_query(rrl_table_t *rrl, const struct sockaddr_storage *a, rrl_req_t *req,
const knot_dname_t *zone, knotd_mod_t *mod)
int rrl_query(rrl_table_t *rrl, const struct sockaddr_storage *remote,
rrl_req_t *req, const knot_dname_t *zone, knotd_mod_t *mod)
{
if (!rrl || !req || !a) {
if (!rrl || !req || !remote) {
return KNOT_EINVAL;
}
......@@ -446,8 +437,8 @@ int rrl_query(rrl_table_t *rrl, const struct sockaddr_storage *a, rrl_req_t *req
int ret = KNOT_EOK;
int lock = -1;
uint32_t now = time_now().tv_sec;
rrl_item_t *b = rrl_hash(rrl, a, req, zone, now, &lock);
if (!b) {
rrl_item_t *bucket = rrl_hash(rrl, remote, req, zone, now, &lock);
if (!bucket) {
if (lock > -1) {
rrl_unlock(rrl, lock);
}
......@@ -455,41 +446,41 @@ int rrl_query(rrl_table_t *rrl, const struct sockaddr_storage *a, rrl_req_t *req
}
/* Calculate rate for dT */
uint32_t dt = now - b->time;
uint32_t dt = now - bucket->time;
if (dt > RRL_CAPACITY) {
dt = RRL_CAPACITY;
}
/* Visit bucket. */
b->time = now;
bucket->time = now;
if (dt > 0) { /* Window moved. */
/* Check state change. */
if ((b->ntok > 0 || dt > 1) && (b->flags & RRL_BF_ELIMIT)) {
b->flags &= ~RRL_BF_ELIMIT;
rrl_log_state(mod, a, b->flags, b->cls);
if ((bucket->ntok > 0 || dt > 1) && (bucket->flags & RRL_BF_ELIMIT)) {
bucket->flags &= ~RRL_BF_ELIMIT;
rrl_log_state(mod, remote, bucket->flags, bucket->cls);
}
/* Add new tokens. */
uint32_t dn = rrl->rate * dt;
if (b->flags & RRL_BF_SSTART) { /* Bucket in slow-start. */
b->flags &= ~RRL_BF_SSTART;
if (bucket->flags & RRL_BF_SSTART) { /* Bucket in slow-start. */
bucket->flags &= ~RRL_BF_SSTART;
}
b->ntok += dn;
if (b->ntok > RRL_CAPACITY * rrl->rate) {
b->ntok = RRL_CAPACITY * rrl->rate;
bucket->ntok += dn;
if (bucket->ntok > RRL_CAPACITY * rrl->rate) {
bucket->ntok = RRL_CAPACITY * rrl->rate;
}
}
/* Last item taken. */
if (b->ntok == 1 && !(b->flags & RRL_BF_ELIMIT)) {
b->flags |= RRL_BF_ELIMIT;
rrl_log_state(mod, a, b->flags, b->cls);
if (bucket->ntok == 1 && !(bucket->flags & RRL_BF_ELIMIT)) {
bucket->flags |= RRL_BF_ELIMIT;
rrl_log_state(mod, remote, bucket->flags, bucket->cls);
}
/* Decay current bucket. */
if (b->ntok > 0) {
--b->ntok;
} else if (b->ntok == 0) {
if (bucket->ntok > 0) {
--bucket->ntok;
} else if (bucket->ntok == 0) {
ret = KNOT_ELIMIT;
}
......
/* Copyright (C) 2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
/* Copyright (C) 2019 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -72,7 +72,7 @@ typedef enum {
* \brief RRL request descriptor.
*/
typedef struct {
const uint8_t *w;
const uint8_t *wire;
uint16_t len;
rrl_req_flag_t flags;
knot_pkt_t *query;
......@@ -90,15 +90,15 @@ rrl_table_t *rrl_create(size_t size, uint32_t rate);
* \brief Query the RRL table for accept or deny, when the rate limit is reached.
*
* \param rrl RRL table.
* \param a Source address.
* \param remote Source address.
* \param req RRL request (containing resp., flags and question).
* \param zone Zone name related to the response (or NULL).
* \param mod Query module (needed for logging).
* \retval KNOT_EOK if passed.
* \retval KNOT_ELIMIT when the limit is reached.
*/
int rrl_query(rrl_table_t *rrl, const struct sockaddr_storage *a, rrl_req_t *req,
const knot_dname_t *zone, knotd_mod_t *mod);
int rrl_query(rrl_table_t *rrl, const struct sockaddr_storage *remote,
rrl_req_t *req, const knot_dname_t *zone, knotd_mod_t *mod);
/*!
* \brief Roll a dice whether answer slips or not.
......
/* Copyright (C) 2018 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
/* Copyright (C) 2019 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -97,7 +97,7 @@ static knotd_state_t ratelimit_apply(knotd_state_t state, knot_pkt_t *pkt,
}
rrl_req_t req = {
.w = pkt->wire,
.wire = pkt->wire,
.query = qdata->query
};
......@@ -164,17 +164,16 @@ int rrl_load(knotd_mod_t *mod)
}
// Create table.
knotd_conf_t rate = knotd_conf_mod(mod, MOD_RATE_LIMIT);
knotd_conf_t size = knotd_conf_mod(mod, MOD_TBL_SIZE);
ctx->rrl = rrl_create(size.single.integer, rate.single.integer);
uint32_t rate = knotd_conf_mod(mod, MOD_RATE_LIMIT).single.integer;
size_t size = knotd_conf_mod(mod, MOD_TBL_SIZE).single.integer;
ctx->rrl = rrl_create(size, rate);
if (ctx->rrl == NULL) {
ctx_free(ctx);
return KNOT_ENOMEM;
}
// Get slip.
knotd_conf_t conf = knotd_conf_mod(mod, MOD_SLIP);
ctx->slip = conf.single.integer;
ctx->slip = knotd_conf_mod(mod, MOD_SLIP).single.integer;
// Get whitelist.
ctx->whitelist = knotd_conf_mod(mod, MOD_WHITELIST);
......
/* Copyright (C) 2018 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
/* Copyright (C) 2019 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -21,6 +21,7 @@
#include "libknot/libknot.h"
#include "contrib/sockaddr.h"
#include "knot/modules/rrl/functions.c"
#include "stdio.h"
/* Enable time-dependent tests. */
//#define ENABLE_TIMED_TESTS
......@@ -115,7 +116,7 @@ int main(int argc, char *argv[])
knot_wire_flags_set_qr(rbuf);
rrl_req_t rq;
rq.w = rbuf;
rq.wire = rbuf;
rq.len = rlen;
rq.query = query;
rq.flags = 0;
......@@ -125,7 +126,7 @@ int main(int argc, char *argv[])
rrl_table_t *rrl = rrl_create(RRL_SIZE, rate);
ok(rrl != NULL, "rrl: create");
/* 4. N unlimited requests. */
/* 2. N unlimited requests. */
knot_dname_t *zone = knot_dname_from_str_alloc("rrl.");
struct sockaddr_storage addr;
......@@ -142,6 +143,16 @@ int main(int argc, char *argv[])
}
is_int(0, ret, "rrl: unlimited IPv4/v6 requests");
/* 3. Endian-independent hash input buffer. */
uint8_t buf[RRL_CLSBLK_MAXLEN];
// CLS_LARGE + remote + dname wire.
uint8_t expectedv4[] = "\x10\x01\x02\x03\x00\x00\x00\x00\x00\x04""beef";
rrl_classify(buf, sizeof(buf), &addr, &rq, qname);
is_int(0, memcmp(buf, expectedv4, sizeof(expectedv4)), "rrl: IPv4 hash input buffer");
uint8_t expectedv6[] = "\x10\x11\x22\x33\x44\x55\x66\x77\x00\x04""beef";
rrl_classify(buf, sizeof(buf), &addr6, &rq, qname);
is_int(0, memcmp(buf, expectedv6, sizeof(expectedv6)), "rrl: IPv6 hash input buffer");
#ifdef ENABLE_TIMED_TESTS
/* 5. limited request */
ret = rrl_query(rrl, &addr, &rq, zone, NULL);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment