Commit 97729953 authored by Marek Vavrusa's avatar Marek Vavrusa

Merge remote-tracking branch 'origin/master' into query-trace

parents 9ca537e8 f4e78b04
Pipeline #31464 passed with stages
in 6 minutes and 44 seconds
......@@ -48,7 +48,6 @@ _obj
/doc/html
/daemon/kresd
/daemon/lua/*.inc
/daemon/lua/kres.lua
/daemon/lua/trust_anchors.lua
/daemon/lua/zonefile.lua
/tests/test_array
......@@ -67,3 +66,4 @@ libkres.amalg.c
/libkres.pc
/modules/version/version.lua
/tags
/coverage
......@@ -6,6 +6,12 @@ variables:
GIT_SUBMODULE_STRATEGY: recursive
COVERAGE: '1'
stages:
- build
- test
- coverage
- deploy
build:linux:amd64:
stage: build
script:
......@@ -30,7 +36,7 @@ test:linux:amd64:
stage: test
script:
- PREFIX=$(pwd)/.local make -k check
- make coverage-c COVERAGE_STAGE=gcov-check
- PREFIX=$(pwd)/.local MAKEFLAGS="--jobs $(nproc)" make coverage-c COVERAGE_STAGE=gcov-check
dependencies:
- build:linux:amd64
artifacts:
......@@ -45,8 +51,8 @@ test:linux:amd64:
installcheck:linux:amd64:
stage: test
script:
- PREFIX=$(pwd)/.local make -k installcheck
- make coverage-c coverage-lua COVERAGE_STAGE=gcov-installcheck
- PREFIX=$(pwd)/.local MAKEFLAGS="--jobs $(nproc) --keep-going" make -k installcheck
- PREFIX=$(pwd)/.local MAKEFLAGS="--jobs $(nproc)" make coverage-c coverage-lua COVERAGE_STAGE=gcov-installcheck
dependencies:
- build:linux:amd64
artifacts:
......@@ -62,7 +68,7 @@ deckard:linux:amd64:
stage: test
script:
- PREFIX=$(pwd)/.local MAKEFLAGS="--jobs $(nproc) --keep-going" make check-integration
- make coverage-c coverage-lua COVERAGE_STAGE=gcov-deckard
- PREFIX=$(pwd)/.local MAKEFLAGS="--jobs $(nproc)" make coverage-c coverage-lua COVERAGE_STAGE=gcov-deckard
dependencies:
- build:linux:amd64
artifacts:
......@@ -127,11 +133,15 @@ test:linux:amd64:valgrind:
respdiff:iter:udp:linux:amd64:
stage: test
script:
- source <(./scripts/coverage_env.sh "$(pwd)" "$(pwd)/coverage.stats/respdiff" "iter/udp" --export)
- ulimit -n "$(ulimit -Hn)" # applies only for kresd ATM
- PREFIX=$(pwd)/.local ./ci/respdiff/start-resolvers.sh
- ./ci/respdiff/run-respdiff-tests.sh udp
- cat results/respdiff.txt
- echo 'test if mismatch rate >= 1 %'
- grep -q '^target diagrees.*0\.[0-9][0-9] %' results/respdiff.txt
- killall --wait kresd
- PREFIX=$(pwd)/.local MAKEFLAGS="--jobs $(nproc)" make coverage-c coverage-lua COVERAGE_STAGE=gcov-respdiff-iter-udp
dependencies:
- build:linux:amd64
artifacts:
......@@ -139,6 +149,7 @@ respdiff:iter:udp:linux:amd64:
expire_in: '1 week'
paths:
- results/*.txt
- ./*.info
tags:
- docker
- linux
......@@ -147,11 +158,14 @@ respdiff:iter:udp:linux:amd64:
respdiff:iter:tcp:linux:amd64:
stage: test
script:
- source <(./scripts/coverage_env.sh "$(pwd)" "$(pwd)/coverage.stats/respdiff" "iter/tcp" --export)
- PREFIX=$(pwd)/.local ./ci/respdiff/start-resolvers.sh
- ./ci/respdiff/run-respdiff-tests.sh tcp
- cat results/respdiff.txt
- echo 'test if mismatch rate >= 1 %'
- grep -q '^target diagrees.*0\.[0-9][0-9] %' results/respdiff.txt
- killall --wait kresd
- PREFIX=$(pwd)/.local MAKEFLAGS="--jobs $(nproc)" make coverage-c coverage-lua COVERAGE_STAGE=gcov-respdiff-iter-tcp
dependencies:
- build:linux:amd64
artifacts:
......@@ -159,6 +173,7 @@ respdiff:iter:tcp:linux:amd64:
expire_in: '1 week'
paths:
- results/*.txt
- ./*.info
tags:
- docker
- linux
......@@ -167,11 +182,14 @@ respdiff:iter:tcp:linux:amd64:
respdiff:iter:tls:linux:amd64:
stage: test
script:
- source <(./scripts/coverage_env.sh "$(pwd)" "$(pwd)/coverage.stats/respdiff" "iter/tls" --export)
- PREFIX=$(pwd)/.local ./ci/respdiff/start-resolvers.sh
- ./ci/respdiff/run-respdiff-tests.sh tls
- cat results/respdiff.txt
- echo 'test if mismatch rate >= 1 %'
- grep -q '^target diagrees.*0\.[0-9][0-9] %' results/respdiff.txt
- killall --wait kresd
- PREFIX=$(pwd)/.local MAKEFLAGS="--jobs $(nproc)" make coverage-c coverage-lua COVERAGE_STAGE=gcov-respdiff-iter-tls
dependencies:
- build:linux:amd64
artifacts:
......@@ -179,25 +197,49 @@ respdiff:iter:tls:linux:amd64:
expire_in: '1 week'
paths:
- results/*.txt
- ./*.info
tags:
- docker
- linux
- amd64
coverage:linux:amd64:
stage: deploy
# compute coverage for all runs
coverage:
stage: coverage
script:
- make coverage
- PREFIX=$(pwd)/.local make coverage
artifacts:
expire_in: '1 week'
paths:
- coverage
coverage: '/lines\.+:\s(\d+.\d+\%)/'
dependencies:
- build:linux:amd64
- test:linux:amd64
- installcheck:linux:amd64
- deckard:linux:amd64
- respdiff:iter:udp:linux:amd64
- respdiff:iter:tcp:linux:amd64
- respdiff:iter:tls:linux:amd64
tags:
- docker
- linux
- amd64
# publish coverage only for master branch
pages:
stage: deploy
only:
- master
dependencies:
- coverage
script:
- mv coverage/ public/
artifacts:
expire_in: '30 days'
paths:
- public
#arm_build:
# image: cznic/armhf-ubuntu:16.04
# stage: build
......
......@@ -30,11 +30,12 @@ before_script:
- BOOTSTRAP_CLEANUP=1 ./scripts/bootstrap-depends.sh ${HOME}/.local
- rvm get stable || true
script:
- CFLAGS="-O2 -g -fno-omit-frame-pointer -DDEBUG" make -j2 install check V=1 COVERAGE=1 PREFIX=${HOME}/.local DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}
- CFLAGS="-O2 -g -fno-omit-frame-pointer -DDEBUG" make -j2 install check V=1 PREFIX=${HOME}/.local DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}
- ./daemon/kresd -h
- ./daemon/kresd -V
- echo "quit()" | ./daemon/kresd -a 127.0.0.1@53535 .
- CFLAGS="-O2 -g -fno-omit-frame-pointer -DDEBUG" make -j2 check-integration COVERAGE=1 PREFIX=${HOME}/.local DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}
# Deckard should be OK just on Linux
# - CFLAGS="-O2 -g -fno-omit-frame-pointer -DDEBUG" make -j2 check-integration PREFIX=${HOME}/.local DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}
after_success:
- if test $TRAVIS_OS_NAME = linux; then coveralls -i lib -i daemon -x ".c" --gcov-options '\-lp'; fi
sudo: false
......
......@@ -6,25 +6,12 @@ all: info lib daemon client modules etc
install: lib-install daemon-install client-install modules-install etc-install
check: all tests
clean: contrib-clean lib-clean daemon-clean client-clean modules-clean \
tests-clean doc-clean bench-clean
tests-clean doc-clean bench-clean coverage-clean
doc: doc-html
lint: $(patsubst %.lua.in,%.lua,$(wildcard */*/*.lua.in))
luacheck --codes --formatter TAP .
coverage-c:
@echo "# C coverage in $(COVERAGE_STAGE).c.info"
@$(LCOV) --no-external --capture --directory . --output-file $(COVERAGE_STAGE).c.info > /dev/null
coverage-lua: $(wildcard */*/luacov.stats.out)
@echo "# Lua coverage in $(COVERAGE_STAGE).lua.info"
@if [ ! -z "$^" ]; then ./scripts/luacov_to_info.lua $^ > $(COVERAGE_STAGE).lua.info; fi
coverage:
@$(LCOV) $(addprefix --add-tracefile ,$(wildcard $(COVERAGE_STAGE)*.info)) --output-file coverage.info
.PHONY: all install check clean doc info
# Options
ifdef COVERAGE
BUILD_CFLAGS += --coverage
endif
.PHONY: all install check clean doc info lint
# Dependencies
KNOT_MINVER := 2.4.0
......@@ -186,6 +173,7 @@ $(DESTDIR)$(ETCDIR):
# Sub-targets
include contrib/contrib.mk
include coverage.mk
include lib/lib.mk
include client/client.mk
include daemon/daemon.mk
......
# Knot DNS Resolver
[![Build Status](https://img.shields.io/travis/CZ-NIC/knot-resolver/master.svg)](https://travis-ci.org/CZ-NIC/knot-resolver)
[![Coverage Status](https://img.shields.io/coveralls/CZ-NIC/knot-resolver.svg)](https://coveralls.io/r/CZ-NIC/knot-resolver)
[![Build Status](https://gitlab.labs.nic.cz/knot/knot-resolver/badges/master/pipeline.svg?x)](https://gitlab.labs.nic.cz/knot/knot-resolver/commits/master)
[![Coverage Status](https://gitlab.labs.nic.cz/knot/knot-resolver/badges/master/coverage.svg?x)](https://knot.pages.labs.nic.cz/knot-resolver/)
[![Coverity](https://img.shields.io/coverity/scan/3912.svg)](https://scan.coverity.com/projects/3912)
[![Documentation Status](https://readthedocs.org/projects/knot-resolver/badge/?version=latest)](https://readthedocs.org/projects/knot-resolver/?badge=latest)
[![Join the chat at gitter.im/CZ-NIC/knot-resolver](https://badges.gitter.im/Join%20Chat.svg?x)](https://gitter.im/CZ-NIC/knot-resolver?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Knot DNS Resolver is a caching full resolver implementation written in C and [LuaJIT][luajit], both a resolver library and a daemon. The core architecture is tiny and efficient, and provides a foundation and
......
-- Refer to manual: https://knot-resolver.readthedocs.io/en/latest/daemon.html#configuration
-- Listen on localhost and external interface
net.listen('127.0.0.1', 5353)
net.listen('::1', 5353)
......
......@@ -21,6 +21,8 @@ MODULEDIR ?= $(LIBDIR)/kdns_modules
ETCDIR ?= $(PREFIX)/etc/kresd
ROOTHINTS ?= $(ETCDIR)/root.hints
COVERAGE_STAGE ?= gcov
COVERAGE_STATSDIR ?= $(CURDIR)/coverage.stats
TOPSRCDIR := $(CURDIR)
# Tools
CC ?= cc
......
# Measure code coverage using luacov and gcov
# C and Lua code is measured separately and resutls are combined together
# Define COVERAGE=1 during build *and* test runs to enable measurement.
#
# Beware: Tests are typically run in parallel and neither luacov not gcov
# support that, so we have to store results from each run separatelly
# and combine them.
coverage-c-combine-gcda:
@# combine trees of gcda files into one info file per tree
@mkdir -p '$(COVERAGE_STATSDIR)/tmp.c'
@LCOV=$(LCOV) ./scripts/coverage_c_combine.sh '$(TOPSRCDIR)' '$(COVERAGE_STATSDIR)' '$(COVERAGE_STATSDIR)/tmp.c'
coverage-c: coverage-c-combine-gcda
@# combine info files for each tree into resulting c.info file
@$(LCOV) -q $(addprefix --add-tracefile ,$(wildcard $(COVERAGE_STATSDIR)/tmp.c/*.info)) --output-file '$(COVERAGE_STAGE).c.info'
@$(RM) -r '$(COVERAGE_STATSDIR)/tmp.c'
LUA_STATS_OUT := $(shell find '$(COVERAGE_STATSDIR)' -type f -name 'luacov.stats.out' 2> /dev/null)
LUA_INFOS_OUT := $(patsubst %.stats.out,%.lua.info,$(LUA_STATS_OUT))
coverage-lua-fix-paths: $(LUA_STATS_OUT)
@# map Lua install paths to source paths
@$(MAKE) PREFIX=$(PREFIX) install --dry-run --always-make | scripts/map_install_src.lua --sed > .luacov_path_map
@sed -i -f .luacov_path_map $^
@$(RM) .luacov_path_map
luacov.empty_stats.out:
@# generate list of all Lua files to fill holes in luacov stats
@$(MAKE) PREFIX=$(PREFIX) install --dry-run --always-make | scripts/map_install_src.lua | cut -f 2 | grep '\.lua$$' | scripts/luacov_gen_empty.sh > luacov.empty_stats.out
%.lua.info: %.stats.out coverage-lua-fix-paths
@scripts/luacov_to_info.lua $*.stats.out > $@
coverage-lua: $(LUA_INFOS_OUT) luacov.empty_stats.out
@echo '# Lua coverage in $(COVERAGE_STAGE).lua.info'
@# add missing files to luacov stats
@scripts/luacov_to_info.lua luacov.empty_stats.out > luacov.empty_stats.lua.info
@# combine info files for each tree into resulting lua.info file
@$(LCOV) -q $(addprefix --add-tracefile ,$(LUA_INFOS_OUT)) --add-tracefile luacov.empty_stats.lua.info --output-file '$(COVERAGE_STAGE).lua.info'
@$(RM) luacov.empty_stats.out luacov.empty_stats.lua.info
coverage:
@$(LCOV) $(addprefix --add-tracefile ,$(wildcard $(COVERAGE_STAGE)*.info)) --output-file coverage.info
@$(GENHTML) --no-function-coverage --no-branch-coverage -q -o coverage -p '$(realpath $(CURDIR))' -t 'Knot DNS Resolver $(VERSION)-$(PLATFORM) coverage report' --legend coverage.info
coverage-clean:
@$(RM) -rf '$(COVERAGE_STATSDIR)'
.PHONY: coverage-c-combine-gcda coverage-c coverage-lua-fix-paths coverage-lua coverage coverage-clean
# Options
ifdef COVERAGE
BUILD_CFLAGS += --coverage
endif
......@@ -398,6 +398,187 @@ static int net_tls(lua_State *L)
return 1;
}
static int print_tls_param(const char *key, void *val, void *data)
{
if (!val) {
return 0;
}
struct tls_client_paramlist_entry *entry = (struct tls_client_paramlist_entry *)val;
lua_State *L = (lua_State *)data;
lua_createtable(L, 0, 3);
lua_createtable(L, entry->pins.len, 0);
for (size_t i = 0; i < entry->pins.len; ++i) {
lua_pushnumber(L, i + 1);
lua_pushstring(L, entry->pins.at[i]);
lua_settable(L, -3);
}
lua_setfield(L, -2, "pins");
lua_createtable(L, entry->ca_files.len, 0);
for (size_t i = 0; i < entry->ca_files.len; ++i) {
lua_pushnumber(L, i + 1);
lua_pushstring(L, entry->ca_files.at[i]);
lua_settable(L, -3);
}
lua_setfield(L, -2, "ca_files");
lua_createtable(L, entry->hostnames.len, 0);
for (size_t i = 0; i < entry->hostnames.len; ++i) {
lua_pushnumber(L, i + 1);
lua_pushstring(L, entry->hostnames.at[i]);
lua_settable(L, -3);
}
lua_setfield(L, -2, "hostnames");
lua_setfield(L, -2, key);
return 0;
}
static int print_tls_client_params(lua_State *L)
{
struct engine *engine = engine_luaget(L);
if (!engine) {
return 0;
}
struct network *net = &engine->net;
if (!net) {
return 0;
}
if (net->tls_client_params.root == 0 ) {
return 0;
}
lua_newtable(L);
map_walk(&net->tls_client_params, print_tls_param, (void *)L);
return 1;
}
static int net_tls_client(lua_State *L)
{
struct engine *engine = engine_luaget(L);
if (!engine) {
return 0;
}
struct network *net = &engine->net;
if (!net) {
return 0;
}
/* Only return current credentials. */
if (lua_gettop(L) == 0) {
return print_tls_client_params(L);
}
const char *full_addr = NULL;
bool pin_exists = false;
bool ca_file_exists = false;
if ((lua_gettop(L) == 1) && lua_isstring(L, 1)) {
full_addr = lua_tostring(L, 1);
} else if ((lua_gettop(L) == 2) && lua_isstring(L, 1) && lua_istable(L, 2)) {
full_addr = lua_tostring(L, 1);
pin_exists = true;
} else if ((lua_gettop(L) == 3) && lua_isstring(L, 1) && lua_istable(L, 2)) {
full_addr = lua_tostring(L, 1);
ca_file_exists = true;
} else if ((lua_gettop(L) == 4) && lua_isstring(L, 1) &&
lua_istable(L, 2) && lua_istable(L, 3)) {
full_addr = lua_tostring(L, 1);
pin_exists = true;
ca_file_exists = true;
} else {
format_error(L, "net.tls_client takes one parameter (\"address\"), two parameters (\"address\",\"pin\"), three parameters (\"address\", \"ca_file\", \"hostname\") or four ones: (\"address\", \"pin\", \"ca_file\", \"hostname\")");
lua_error(L);
}
char addr[INET6_ADDRSTRLEN];
uint16_t port = 0;
if (kr_straddr_split(full_addr, addr, sizeof(addr), &port) != kr_ok()) {
format_error(L, "invalid IP address");
lua_error(L);
}
if (port == 0) {
port = 853;
}
if (!pin_exists && !ca_file_exists) {
int r = tls_client_params_set(&net->tls_client_params,
addr, port, NULL, NULL, NULL);
if (r != 0) {
lua_pushstring(L, strerror(ENOMEM));
lua_error(L);
}
lua_pushboolean(L, true);
return 1;
}
if (pin_exists) {
/* iterate over table with pins
* http://www.lua.org/manual/5.1/manual.html#lua_next */
lua_pushnil(L); /* first key */
while (lua_next(L, 2)) { /* pin table is in stack at index 2 */
/* pin now at index -1, key at index -2*/
const char *pin = lua_tostring(L, -1);
int r = tls_client_params_set(&net->tls_client_params,
addr, port, NULL, NULL, pin);
if (r != 0) {
lua_pushstring(L, strerror(ENOMEM));
lua_error(L);
}
lua_pop(L, 1);
}
}
int ca_table_index = 2;
int hostname_table_index = 3;
if (ca_file_exists) {
if (pin_exists) {
ca_table_index = 3;
hostname_table_index = 4;
}
} else {
lua_pushboolean(L, true);
return 1;
}
/* iterate over ca filenames */
lua_pushnil(L);
while (lua_next(L, ca_table_index)) {
const char *ca_file = lua_tostring(L, -1);
int r = tls_client_params_set(&net->tls_client_params,
addr, port, ca_file, NULL, NULL);
if (r != 0) {
lua_pushstring(L, strerror(ENOMEM));
lua_error(L);
}
/* removes 'value'; keeps 'key' for next iteration */
lua_pop(L, 1);
}
/* iterate over hostnames */
lua_pushnil(L);
while (lua_next(L, hostname_table_index)) {
const char *hostname = lua_tostring(L, -1);
int r = tls_client_params_set(&net->tls_client_params,
addr, port, NULL, hostname, NULL);
if (r != 0) {
lua_pushstring(L, strerror(ENOMEM));
lua_error(L);
}
/* removes 'value'; keeps 'key' for next iteration */
lua_pop(L, 1);
}
lua_pushboolean(L, true);
return 1;
}
static int net_tls_padding(lua_State *L)
{
struct engine *engine = engine_luaget(L);
......@@ -508,6 +689,8 @@ int lib_net(lua_State *L)
{ "bufsize", net_bufsize },
{ "tcp_pipeline", net_pipeline },
{ "tls", net_tls },
{ "tls_server", net_tls },
{ "tls_client", net_tls_client },
{ "tls_padding", net_tls_padding },
{ "outgoing_v4", net_outgoing_v4 },
{ "outgoing_v6", net_outgoing_v6 },
......@@ -1252,7 +1435,7 @@ static int wrk_resolve(lua_State *L)
/* Add initialisation callback */
if (lua_isfunction(L, 5)) {
lua_pushvalue(L, 5);
lua_pushlightuserdata(L, &task->req);
lua_pushlightuserdata(L, worker_task_request(task));
(void) execute_callback(L, 1);
}
......
......@@ -663,6 +663,34 @@ static void update_state(uv_timer_t *handle)
lru_apply(engine->resolver.cache_rtt, update_stat_item, NULL);
}
/**
* Start luacov measurement and store results to file specified by
* KRESD_COVERAGE_STATS environment variable.
* Do nothing if the variable is not set.
*/
static void init_measurement(struct engine *engine)
{
const char * const statspath = getenv("KRESD_COVERAGE_STATS");
if (!statspath)
return;
char * snippet = NULL;
int ret = asprintf(&snippet,
"_luacov_runner = require('luacov.runner')\n"
"_luacov_runner.init({\n"
" statsfile = '%s',\n"
" exclude = {'test', 'tapered', 'lua/5.1'},\n"
"})\n"
"jit.off()\n", statspath
);
assert(ret > 0);
ret = luaL_loadstring(engine->L, snippet);
assert(ret == 0);
lua_call(engine->L, 0, 0);
free(snippet);
}
int engine_init(struct engine *engine, knot_mm_t *pool)
{
if (engine == NULL) {
......@@ -677,6 +705,7 @@ int engine_init(struct engine *engine, knot_mm_t *pool)
if (ret != 0) {
engine_deinit(engine);
}
init_measurement(engine);
/* Initialize resolver */
ret = init_resolver(engine);
if (ret != 0) {
......
......@@ -33,6 +33,8 @@
} \
} while (0)
void io_release(uv_handle_t *handle);
static void check_bufsize(uv_handle_t* handle)
{
/* We want to buffer at least N waves in advance.
......@@ -48,15 +50,18 @@ static void check_bufsize(uv_handle_t* handle)
static void session_clear(struct session *s)
{
assert(s->outgoing || s->tasks.len == 0);
assert(s->tasks.len == 0 && s->waiting.len == 0);
array_clear(s->tasks);
array_clear(s->waiting);
tls_free(s->tls_ctx);
tls_client_ctx_free(s->tls_client_ctx);
memset(s, 0, sizeof(*s));
}
void session_free(struct session *s)
{
if (s) {
assert(s->tasks.len == 0 && s->waiting.len == 0);
session_clear(s);
free(s);
}
......@@ -89,6 +94,8 @@ static void session_release(struct worker_ctx *worker, uv_handle_t *handle)
if (!s) {
return;
}
assert(s->waiting.len == 0 && s->tasks.len == 0);
assert(s->buffering == NULL);
if (!s->outgoing && handle->type == UV_TCP) {
worker_end_tcp(worker, handle); /* to free the buffering task */
}
......@@ -101,14 +108,15 @@ static void session_release(struct worker_ctx *worker, uv_handle_t *handle)
}
}
static uv_stream_t *handle_alloc(uv_loop_t *loop)
static uv_stream_t *handle_borrow(uv_loop_t *loop)
{
uv_stream_t *handle = calloc(1, sizeof(*handle));
if (!handle) {
struct worker_ctx *worker = loop->data;
void *req = worker_iohandle_borrow(worker);
if (!req) {
return NULL;
}
return handle;
return (uv_stream_t *)req;
}
static void handle_getbuf(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf)
......@@ -139,13 +147,19 @@ void udp_recv(uv_udp_t *handle, ssize_t nread, const uv_buf_t *buf,
{
uv_loop_t *loop = handle->loop;
struct worker_ctx *worker = loop->data;
struct session *s = handle->data;
if (s->closing) {
return;
}
if (nread <= 0) {
if (nread < 0) { /* Error response, notify resolver */
worker_submit(worker, (uv_handle_t *)handle, NULL, addr);
} /* nread == 0 is for freeing buffers, we don't need to do this */
return;
}
if (addr->sa_family == AF_UNSPEC) {
return;
}
knot_pkt_t *query = knot_pkt_new(buf->base, nread, &worker->pkt_pool);
if (query) {
query->max_size = KNOT_WIRE_MAX_PKTSIZE;
......@@ -158,8 +172,11 @@ static int udp_bind_finalize(uv_handle_t *handle)
{
check_bufsize((uv_handle_t *)handle);
/* Handle is already created, just create context. */
handle->data = session_new();
assert(handle->data);
struct session *session = session_new();
assert(session);
session->outgoing = false;
session->handle = handle;
handle->data = session;
return io_start_read((uv_handle_t *)handle);
}
......@@ -189,20 +206,16 @@ int udp_bindfd(uv_udp_t *handle, int fd)
return udp_bind_finalize((uv_handle_t *)handle);
}
static void tcp_timeout(uv_handle_t *timer)
{
uv_handle_t *handle = timer->data;
uv_close(handle, io_free);
}
static void tcp_timeout_trigger(uv_timer_t *timer)
{
uv_handle_t *handle = timer->data;
struct session *session = handle->data;
struct session *session = timer->data;
assert(session->outgoing == false);
if (session->tasks.len > 0) {
uv_timer_again(timer);
} else {
uv_close((uv_handle_t *)timer, tcp_timeout);
uv_timer_stop(timer);
worker_session_close(session);
}
}
......@@ -210,12 +223,24 @@ static void tcp_recv(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf)
{
uv_loop_t *loop = handle->loop;
struct session *s = handle->data;
if (s->closing) {
return;
}
/* nread might be 0, which does not indicate an error or EOF.
* This is equivalent to EAGAIN or EWOULDBLOCK under read(2). */
if (nread == 0) {
return;
}
if (nread == UV_EOF) {
nread = 0;
}
struct worker_ctx *worker = loop->data;
/* TCP pipelining is rather complicated and requires cooperation from the worker
* so the whole message reassembly and demuxing logic is inside worker */
int ret = 0;
if (s->has_tls) {
ret = tls_process(worker, handle, (const uint8_t *)buf->base, nread);
ret = s->outgoing ? tls_client_process(worker, handle, (const uint8_t *)buf->base, nread) :
tls_process(worker, handle, (const uint8_t *)buf->base, nread);
} else {
ret = worker_process_tcp(worker, handle, (const uint8_t *)buf->base, nread);
}
......@@ -226,14 +251,14 @@ static void tcp_recv(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf)
if (!s->outgoing && !uv_is_closing((uv_handle_t *)&s->timeout)) {
uv_timer_stop(&s->timeout);
if (s->tasks.len == 0) {
uv_close((uv_handle_t *)&s->timeout, tcp_timeout);
worker_session_close(s);
} else { /* If there are tasks running, defer until they finish. */
uv_timer_start(&s->timeout, tcp_timeout_trigger, 1, KR_CONN_RTT_MAX/2);
}
}
/* Connection spawned at least one request, reset its deadline for next query.
* https://tools.ietf.org/html/rfc7766#section-6.2.3 */
} else if (ret > 0 && !s->outgoing) {
} else if (ret > 0 && !s->outgoing && !s->closing) {
uv_timer_again(&s->timeout);
}
mp_flush(worker->pkt_pool.ctx);
......@@ -244,14 +269,15 @@ static void _tcp_accept(uv_stream_t *master, int status, bool tls)
if (status != 0) {
return;
}
uv_stream_t *client = handle_alloc(master->loop);
uv_stream_t *client = handle_borrow(master->loop);
if (!client) {
return;
}
memset(client, 0, sizeof(*client));
io_create(master->loop, (uv_handle_t *)client, SOCK_STREAM);
if (uv_accept(master, client) != 0) {
uv_close((uv_handle_t *)client, io_free);
uv_close((uv_handle_t *)client, io_release);
return;
}
......@@ -259,13 +285,21 @@ static void _tcp_accept(uv_stream_t *master, int status, bool tls)
* It will re-check every half of a request time limit if the connection
* is idle and should be terminated, this is an educated guess. */
struct session *session = client->data;
assert(session->outgoing == false);
struct sockaddr *addr = &(session->peer.ip);
int addr_len = sizeof(union inaddr);
int ret = uv_tcp_getpeername((uv_tcp_t *)client, addr, &addr_len);