remove memcached and redis modules from source tree

Source was kept for historical reasons but was not in use since 2.0.0.
It is now clear that there are better approaches to implement
distributed cache so it is pointless to keep old stuff in tree and
confuse users.
parent 19d4952a
Pipeline #39352 failed with stages
in 6 minutes and 22 seconds
......@@ -32,8 +32,6 @@ $(eval $(call find_lib,cmocka))
$(eval $(call find_bin,doxygen))
$(eval $(call find_bin,sphinx-build))
$(eval $(call find_pythonpkg,breathe))
#$(eval $(call find_lib,libmemcached,1.0))
#$(eval $(call find_lib,hiredis,,yes))
$(eval $(call find_lib,socket_wrapper))
$(eval $(call find_lib,libsystemd,227))
$(eval $(call find_lib,gnutls))
......@@ -118,8 +116,6 @@ info:
$(info [$(HAS_sphinx-build)] sphinx-build (doc))
$(info [$(HAS_breathe)] python-breathe (doc))
$(info [$(HAS_go)] go (modules/go, Go buildmode=c-shared support))
# $(info [$(HAS_libmemcached)] libmemcached (modules/memcached))
# $(info [$(HAS_hiredis)] hiredis (modules/redis))
$(info [$(HAS_cmocka)] cmocka (tests/unit))
$(info [$(HAS_libsystemd)] systemd (daemon))
# $(info [$(HAS_nettle)] nettle (modules/cookies))
......
......@@ -61,10 +61,6 @@ There are also *optional* packages that enable specific functionality in Knot Re
"`clang-tidy`_", "``lint-c``", "Syntax and static analysis checker for C."
"luacov_", "``check-config``", "Code coverage analysis for Lua modules."
.. "libmemcached_", "``modules/memcached``", "To build memcached backend module."
"hiredis_", "``modules/redis``", "To build redis backend module."
"Go_ 1.5+", "``modules``", "Build modules written in Go."
.. [#] Requires C99, ``__attribute__((cleanup))`` and ``-MMD -MP`` for dependency file generation. GCC, Clang and ICC are supported.
.. [#] You can use variables ``<dependency>_CFLAGS`` and ``<dependency>_LIBS`` to configure dependencies manually (i.e. ``libknot_CFLAGS`` and ``libknot_LIBS``).
.. [#] libuv 1.7 brings SO_REUSEPORT support that is needed for multiple forks. libuv < 1.7 can be still used, but only in single-process mode. Use :ref:`different method <daemon-reuseport>` for load balancing.
......@@ -92,7 +88,7 @@ Most of the dependencies can be resolved from packages, here's an overview for s
# integration tests
sudo dnf install cmake git python-dns python-jinja2
# optional features
sudo dnf install golang hiredis-devel libmemcached-devel lua-sec-compat lua-socket-compat systemd-devel
sudo dnf install lua-sec-compat lua-socket-compat systemd-devel
# docs
sudo dnf install doxygen python-breathe python-sphinx
......@@ -304,8 +300,6 @@ You can hack on the container by changing the container entrypoint to shell like
.. _Lua: https://www.lua.org/about.html
.. _LuaJIT: http://luajit.org/luajit.html
.. _Go: https://golang.org
.. _libmemcached: http://libmemcached.org/libMemcached.html
.. _hiredis: https://github.com/redis/hiredis
.. _geoip: https://github.com/abh/geoip
.. _Doxygen: https://www.stack.nl/~dimitri/doxygen/manual/index.html
.. _breathe: https://github.com/michaeljones/breathe
......
......@@ -17,8 +17,6 @@ Knot Resolver modules
.. include:: ../modules/daf/README.rst
.. include:: ../modules/rebinding/README.rst
.. include:: ../modules/graphite/README.rst
.. .. include:: ../modules/memcached/README.rst
.. .. include:: ../modules/redis/README.rst
.. include:: ../modules/etcd/README.rst
.. include:: ../modules/dns64/README.rst
.. include:: ../modules/renumber/README.rst
......
Memcached cache storage
-----------------------
Module providing a cache storage backend for memcached_, which makes a good fit for
making a shared cache between resolvers.
After loading you can see the storage backend registered and useable.
.. code-block:: lua
> modules.load 'memcached'
> cache.backends()
[memcached://] => true
And you can use it right away, see the `libmemcached configuration`_ reference for configuration string
options, the most essential ones are `--SERVER` or `--SOCKET`. Here's an example for connecting to UNIX socket.
.. code-block:: lua
> cache.storage = 'memcached://--SOCKET="/var/sock/memcached"'
.. note:: The memcached_ instance **MUST** support binary protocol, in order to make it work with binary keys. You can pass other options to the configuration string for performance tuning.
.. warning:: The memcached_ server is responsible for evicting entries out of cache, the pruning function is not implemented, and neither is aborting write transactions.
Build resolver shared cache
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The memcached_ takes care of the data replication and fail over, you can add multiple servers at once.
.. code-block:: lua
> cache.storage = 'memcached://--SOCKET="/var/sock/memcached" --SERVER=192.168.1.1 --SERVER=cache2.domain'
Dependencies
^^^^^^^^^^^^
Depends on the libmemcached_ library.
.. _memcached: https://memcached.org/
.. _libmemcached: http://libmemcached.org/libMemcached.html
.. _`libmemcached configuration`: http://docs.libmemcached.org/libmemcached_configuration.html#description
/* Copyright (C) 2014-2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/** @file cdb_memcached.c
* @brief Implemented all the things that the resolver cache needs,
* it's not a general-purpose namedb implementation, and it can't
* be since it's *cache* by principle and it doesn't guarantee persistence anyway.
*/
#include <assert.h>
#include <string.h>
#include <limits.h>
#include <libmemcached/memcached.h>
#include "contrib/cleanup.h"
#include "lib/generic/array.h"
#include "lib/cache/cdb_api.h"
#include "lib/cache/api.h"
#include "lib/utils.h"
/* memcached client */
struct memcached_cli {
memcached_st *handle;
memcached_result_st res;
};
static int cdb_init(knot_db_t **db, struct kr_cdb_opts *opts, knot_mm_t *pool)
{
if (!db || !opts) {
return kr_error(EINVAL);
}
struct memcached_cli *cli = malloc(sizeof(*cli));
if (!cli) {
return kr_error(ENOMEM);
}
memset(cli, 0, sizeof(*cli));
/* Make sure we're running on binary protocol, as the
* textual protocol is broken for binary keys. */
auto_free char *config_str = kr_strcatdup(2, opts->path, " --BINARY-PROTOCOL");
cli->handle = memcached(config_str, strlen(config_str));
if (!cli->handle) {
free(cli);
return kr_error(EIO);
}
/* Create result set */
memcached_result_st *res = memcached_result_create(cli->handle, &cli->res);
if (!res) {
memcached_free(cli->handle);
free(cli);
return kr_error(ENOMEM);
}
*db = cli;
return 0;
}
static void cdb_deinit(knot_db_t *db)
{
struct memcached_cli *cli = db;
memcached_result_free(&cli->res);
memcached_free(cli->handle);
free(cli);
}
static int cdb_sync(knot_db_t *db)
{
return 0;
}
static int cdb_count(knot_db_t *db)
{
struct memcached_cli *cli = db;
memcached_return_t error = 0;
memcached_stat_st *stats = memcached_stat(cli->handle, NULL, &error);
if (error != 0) {
return kr_error(EIO);
}
size_t ret = stats->curr_items;
free(stats);
return (ret > INT_MAX) ? INT_MAX : ret;
}
static int cdb_clear(knot_db_t *db)
{
struct memcached_cli *cli = db;
memcached_return_t ret = memcached_flush(cli->handle, 0);
if (ret != 0) {
return kr_error(EIO);
}
return 0;
}
static int cdb_readv(knot_db_t *db, const knot_db_val_t *key, knot_db_val_t *val,
int maxcount)
{
if (!db || !key || !val) {
return kr_error(EINVAL);
}
struct memcached_cli *cli = db;
/* Convert to libmemcached query format */
assert(maxcount < 1000); /* Sane upper bound */
const char *keys [maxcount];
size_t lengths [maxcount];
for (int i = 0; i < maxcount; ++i) {
keys[i] = key[i].data;
lengths[i] = key[i].len;
}
/* Execute multiple get and retrieve results */
memcached_return_t status = memcached_mget(cli->handle, keys, lengths, maxcount);
memcached_result_free(&cli->res);
memcached_result_create(cli->handle, &cli->res);
for (int i = 0; i < maxcount; ++i) {
memcached_result_st *res = memcached_fetch_result(cli->handle, &cli->res, &status);
if (!res) { /* Less results than expected */
return kr_error(ENOENT);
}
val[i].len = memcached_result_length(res);
val[i].data = (void *)memcached_result_value(res);
}
return 0;
}
static int cdb_writev(knot_db_t *db, const knot_db_val_t *key, knot_db_val_t *val,
int maxcount)
{
if (!db || !key || !val) {
return kr_error(EINVAL);
}
struct memcached_cli *cli = db;
/* @warning This expects usage only for recursor cache, if anyone
* desires to port this somewhere else, TTL shouldn't be interpreted.
*/
memcached_return_t ret = 0;
for (int i = 0; i < maxcount; ++i) {
if (val->len < 2) {
/* @note Special values/namespaces, not a RR entry with TTL. */
ret = memcached_set(cli->handle, key[i].data, key[i].len, val[i].data, val[i].len, 0, 0);
} else {
struct kr_cache_entry *entry = val[i].data;
ret = memcached_set(cli->handle, key[i].data, key[i].len, val[i].data, val[i].len, entry->ttl, 0);
}
if (ret != 0) {
break;
}
}
return ret;
}
static int cdb_remove(knot_db_t *db, knot_db_val_t *key, int maxcount)
{
if (!db || !key) {
return kr_error(EINVAL);
}
struct memcached_cli *cli = db;
memcached_return_t ret = 0;
for (int i = 0; i < maxcount; ++i) {
memcached_return_t ret = memcached_delete(cli->handle, key[i].data, key[i].len, 0);
if (ret != 0) {
break;
}
}
return ret;
}
static int cdb_match(knot_db_t *cache, knot_db_val_t *key, knot_db_val_t *val, int maxcount)
{
if (!cache || !key || !val) {
return kr_error(EINVAL);
}
return kr_error(ENOSYS);
}
const struct kr_cdb_api *cdb_memcached(void)
{
static const struct kr_cdb_api api = {
"memcached",
cdb_init, cdb_deinit, cdb_count, cdb_clear, cdb_sync,
cdb_readv, cdb_writev, cdb_remove,
cdb_match, NULL /* prune */
};
return &api;
}
/* Copyright (C) 2014-2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <contrib/cleanup.h>
#include "daemon/engine.h"
#include "lib/cache/cdb_api.h"
#include "lib/module.h"
#include "lib/cache/api.h"
/** @internal Redis API */
const struct kr_cdb_api *cdb_memcached(void);
KR_EXPORT
int memcached_init(struct kr_module *module)
{
struct engine *engine = module->data;
array_push(engine->backends, cdb_memcached());
return 0;
}
KR_EXPORT
int memcached_deinit(struct kr_module *module)
{
struct engine *engine = module->data;
/* It was currently loaded, close cache */
if (engine->resolver.cache.api == cdb_memcached()) {
kr_cache_close(&engine->resolver.cache);
}
/* Prevent from loading it again */
for (unsigned i = 0; i < engine->backends.len; ++i) {
const struct kr_cdb_api *api = engine->backends.at[i];
if (strcmp(api->name, "memcached") == 0) {
array_del(engine->backends, i);
break;
}
}
return 0;
}
KR_MODULE_EXPORT(memcached);
memcached_CFLAGS := -fPIC
memcached_SOURCES := modules/memcached/memcached.c modules/memcached/cdb_memcached.c
memcached_DEPEND := $(libkres)
memcached_LIBS := $(libkres_TARGET) $(libkres_LIBS) $(libmemcached_LIBS)
$(call make_c_module,memcached)
......@@ -11,15 +11,6 @@ ifeq ($(ENABLE_DNSTAP),yes)
modules_TARGETS += dnstap
endif
# Memcached
ifeq ($(HAS_libmemcached),yes)
#modules_TARGETS += memcached
endif
# Redis
ifeq ($(HAS_hiredis),yes)
#modules_TARGETS += redis
endif
# List of Lua modules
ifeq ($(HAS_lua),yes)
modules_TARGETS += bogus_log \
......
Redis cache storage
-------------------
This modules provides Redis_ backend for cache storage. Redis is a BSD-license key-value cache and storage server.
Like memcached_ backend, Redis provides master-server replication, but also weak-consistency clustering.
After loading you can see the storage backend registered and useable.
.. code-block:: lua
> modules.load 'redis'
> cache.backends()
[redis://] => true
Redis client support TCP or UNIX sockets.
.. code-block:: lua
> cache.storage = 'redis://127.0.0.1'
> cache.storage = 'redis://127.0.0.1:6398'
> cache.storage = 'redis:///tmp/redis.sock'
It also supports indexed databases if you prefix the configuration string with ``DBID@``.
.. code-block:: lua
> cache.storage = 'redis://9@127.0.0.1'
.. warning:: The Redis client doesn't really support transactions nor pruning. Cache eviction policy shoud be left upon Redis server, see the `Using Redis as an LRU cache <redis-lru_>`_.
Build distributed cache
^^^^^^^^^^^^^^^^^^^^^^^
See `Redis Cluster`_ tutorial.
Dependencies
^^^^^^^^^^^^
Depends on the hiredis_ library, which is usually in the packages / ports or you can install it from sources.
.. _Redis: http://redis.io/
.. _memcached: https://memcached.org/
.. _`Redis Cluster`: http://redis.io/topics/cluster-tutorial
.. _hiredis: https://github.com/redis/hiredis
.. _redis-lru: http://redis.io/topics/lru-cache
/* Copyright (C) 2015-2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/** @file cdb_redis.c
* @brief Implemented all the things that the resolver cache needs (get, set, expiration).
*/
#include <assert.h>
#include <string.h>
#include <uv.h>
#include "modules/redis/redis.h"
#include "contrib/ccan/asprintf/asprintf.h"
#include "contrib/cleanup.h"
#include "contrib/ucw/lib.h"
#include "lib/cache/cdb_api.h"
#include "lib/cache.h"
#include "lib/utils.h"
#include "lib/defines.h"
#define REDIS_BATCHSIZE 100
static int cli_connect(struct redis_cli *cli)
{
/* Connect to either UNIX socket or TCP */
if (cli->port == 0) {
cli->handle = redisConnectUnix(cli->addr);
} else {
cli->handle = redisConnect(cli->addr, cli->port);
}
/* Catch errors */
if (!cli->handle) {
return kr_error(ENOMEM);
} else if (cli->handle->err) {
redisFree(cli->handle);
cli->handle = NULL;
return kr_error(ECONNREFUSED);
}
/* Set max bufsize */
cli->handle->reader->maxbuf = REDIS_BUFSIZE;
/* Select database */
redisReply *reply = redisCommand(cli->handle, "SELECT %d", cli->database);
if (!reply) {
redisFree(cli->handle);
cli->handle = NULL;
return kr_error(ENOTDIR);
}
freeReplyObject(reply);
return kr_ok();
}
static void cli_decommit(struct redis_cli *cli)
{
redis_freelist_t *freelist = &cli->freelist;
for (unsigned i = 0; i < freelist->len; ++i) {
freeReplyObject(freelist->at[i]);
}
freelist->len = 0;
}
static void cli_free(struct redis_cli *cli)
{
if (cli->handle) {
redisFree(cli->handle);
}
cli_decommit(cli);
array_clear(cli->freelist);
free(cli->addr);
free(cli);
}
/** @internal Make redis options. */
static struct redis_cli *cli_make(const char *conf_)
{
auto_free char *conf = strdup(conf_);
struct redis_cli *cli = malloc(sizeof(*cli));
if (!cli || !conf) {
free(cli);
return NULL;
}
/* Parse database */
memset(cli, 0, sizeof(*cli));
char *bp = conf;
char *p = strchr(bp, '@');
if (p) {
*p = '\0';
cli->database = atoi(conf);
bp = (p + 1);
}
/* Parse host / ip / sock */
if (access(bp, W_OK) == 0) { /* UNIX */
cli->addr = strdup(bp);
return cli;
}
struct sockaddr_in6 ip6;
p = strchr(bp, ':');
if (!p) { /* IPv4 */
cli->addr = strdup(bp);
cli->port = REDIS_PORT;
return cli;
}
if (!strchr(p + 1, ':')) { /* IPv4 + port */
*p = '\0';
cli->addr = strdup(bp);
cli->port = atoi(p + 1);
} else { /* IPv6 */
if (uv_ip6_addr(bp, 0, &ip6) == 0) {
cli->addr = strdup(bp);
cli->port = REDIS_PORT;
} else { /* IPv6 + port */
p = strrchr(bp, ':');
*p = '\0';
cli->addr = strdup(bp);
cli->port = atoi(p + 1);
}
}
return cli;
}
static int cdb_init(knot_db_t **cache, struct kr_cdb_opts *opts, knot_mm_t *pool)
{
if (!cache || !opts) {
return kr_error(EINVAL);
}
/* Clone redis cli and connect */
struct redis_cli *cli = cli_make(opts->path);
if (!cli) {
return kr_error(ENOMEM);
}
int ret = cli_connect(cli);
if (ret != 0) {
cli_free(cli);
return ret;
}
*cache = cli;
return ret;
}
static void cdb_deinit(knot_db_t *cache)
{
struct redis_cli *cli = cache;
cli_free(cli);
}
static int cdb_sync(knot_db_t *cache)
{
if (!cache) {
return kr_error(EINVAL);
}
struct redis_cli *cli = cache;
cli_decommit(cli);
return 0;
}
/* Disconnect client */
#define CLI_DISCONNECT(cli) \
if ((cli)->handle->err != REDIS_ERR_OTHER) { \
redisFree((cli)->handle); \
(cli)->handle = NULL; \
}
/* Attempt to reconnect */
#define CLI_KEEPALIVE(cli_) \
if ((cli_)->freelist.len > REDIS_MAXFREELIST) { \
cli_decommit(cli_); \
} \
if (!(cli_)->handle) { \
int ret = cli_connect((cli_)); \
if (ret != 0) { \
return ret; \
} \
}
static int cdb_count(knot_db_t *cache)
{
if (!cache) {
return kr_error(EINVAL);
}
int ret = 0;
struct redis_cli *cli = cache;
CLI_KEEPALIVE(cli);
redisReply *reply = redisCommand(cli->handle, "DBSIZE");
if (!reply) {
CLI_DISCONNECT(cli);
return kr_error(EIO);
}
if (reply->type == REDIS_REPLY_INTEGER) {
ret = reply->integer;
}
freeReplyObject(reply);
return ret;
}
static int cdb_clear(knot_db_t *cache)
{
if (!cache) {
return kr_error(EINVAL);
}
struct redis_cli *cli = cache;
CLI_KEEPALIVE(cli);
redisReply *reply = redisCommand(cli->handle, "FLUSHDB");
if (!reply) {
CLI_DISCONNECT(cli);
return kr_error(EIO);
}
freeReplyObject(reply);
return kr_ok();
}
static int cdb_readv(knot_db_t *cache, const knot_db_val_t *key, knot_db_val_t *val,
int maxcount)
{
if (!cache || !key || !val) {
return kr_error(EINVAL);
}
struct redis_cli *cli = cache;
CLI_KEEPALIVE(cli);
/* Build command pipeline */
for (int i = 0; i < maxcount; ++i) {
redisAppendCommand(cli->handle, "GET %b", key[i].data, key[i].len);
}
/* Gather replies */
for (int i = 0; i < maxcount; ++i) {
redisReply *reply = NULL;
redisGetReply(cli->handle, (void **)&reply);
if (!reply) {
CLI_DISCONNECT(cli);
return kr_error(EIO);
}
/* Track reply in a freelist for this transaction */
if (array_push(cli->freelist, reply) < 0) {
freeReplyObject(reply); /* Can't track this, must free */
return kr_error(ENOMEM);
}
/* Return value */
if (reply->type != REDIS_REPLY_STRING) {
return kr_error(EPROTO);
}
val[i].data = reply->str;
val[i].len = reply->len;
}
return kr_ok();
}
static int cdb_writev(knot_db_t *cache, const knot_db_val_t *key, knot_db_val_t *val,
int maxcount)
{
if (!cache || !key || !val) {
return kr_error(EINVAL);
}
struct redis_cli *cli = cache;
CLI_KEEPALIVE(cli);
/* Build command pipeline */
for (int i = 0; i < maxcount; ++i) {
if (val->len < 2) {
/* @note Special values/namespaces, not a RR entry with TTL. */
redisAppendCommand(cli->handle, "SET %b %b", key[i].data, key[i].len, val[i].data, val[i].len);
} else {
/* @warning This expects usage only for recursor cache, if anyone
* desires to port this somewhere else, TTL shouldn't be interpreted. */
struct kr_cache_entry *entry = val[i].data;
redisAppendCommand(cli->handle, "SETEX %b %d %b", key[i].data, key[i].len, entry->ttl, val[i].data, val[i].len);
}
}
/* Gather replies */
for (int i = 0; i < maxcount; ++i) {
redisReply *reply = NULL;
redisGetReply(cli->handle, (void **)&reply);
if (!reply) {
CLI_DISCONNECT(cli);
return kr_error(EIO);
}
freeReplyObject(reply);
}
return kr_ok();
}
static int cdb_remove(knot_db_t *cache, knot_db_val_t *key, int maxcount)
{
if (!cache || !key) {
return kr_error(EINVAL);
}
struct redis_cli *cli = cache;
CLI_KEEPALIVE(cli);
/* Build command pipeline */
for (int i = 0; i < maxcount; ++i) {
redisAppendCommand(cli->handle, "DEL %b", key[i].data, key[i].len);
}