Commit 2ad4dc74 authored by Filip Siroky's avatar Filip Siroky Committed by Daniel Salzman

conf: cache optimization

parent d761de23
......@@ -122,12 +122,23 @@ static void init_cache(
}
conf->cache.srv_max_ipv6_udp_payload = conf_int(&val);
val = conf_get(conf, C_SRV, C_TCP_HSHAKE_TIMEOUT);
conf->cache.srv_tcp_hshake_timeout = conf_int(&val);
val = conf_get(conf, C_SRV, C_TCP_IDLE_TIMEOUT);
conf->cache.srv_tcp_idle_timeout = conf_int(&val);
val = conf_get(conf, C_SRV, C_TCP_REPLY_TIMEOUT);
conf->cache.srv_tcp_reply_timeout = conf_int(&val);
val = conf_get(conf, C_SRV, C_MAX_TCP_CLIENTS);
conf->cache.srv_max_tcp_clients = conf_int(&val);
val = conf_get(conf, C_SRV, C_RATE_LIMIT_SLIP);
conf->cache.srv_rate_limit_slip = conf_int(&val);
conf->cache.srv_nsid = conf_get(conf, C_SRV, C_NSID);
conf->cache.srv_max_tcp_clients = conf_get(conf, C_SRV, C_MAX_TCP_CLIENTS);
conf->cache.srv_tcp_hshake_timeout = conf_get(conf, C_SRV, C_TCP_HSHAKE_TIMEOUT);
conf->cache.srv_tcp_idle_timeout = conf_get(conf, C_SRV, C_TCP_IDLE_TIMEOUT);
conf->cache.srv_tcp_reply_timeout = conf_get(conf, C_SRV, C_TCP_REPLY_TIMEOUT);
conf->cache.srv_rate_limit_slip = conf_get(conf, C_SRV, C_RATE_LIMIT_SLIP);
conf->cache.srv_rate_limit_whitelist = conf_get(conf, C_SRV, C_RATE_LIMIT_WHITELIST);
}
......
......@@ -93,12 +93,12 @@ typedef struct {
struct {
int16_t srv_max_ipv4_udp_payload;
int16_t srv_max_ipv6_udp_payload;
int32_t srv_tcp_hshake_timeout;
int32_t srv_tcp_idle_timeout;
int32_t srv_tcp_reply_timeout;
int32_t srv_max_tcp_clients;
int32_t srv_rate_limit_slip;
conf_val_t srv_nsid;
conf_val_t srv_max_tcp_clients;
conf_val_t srv_tcp_hshake_timeout;
conf_val_t srv_tcp_idle_timeout;
conf_val_t srv_tcp_reply_timeout;
conf_val_t srv_rate_limit_slip;
conf_val_t srv_rate_limit_whitelist;
} cache;
......
......@@ -88,8 +88,7 @@ static int dnsproxy_fwd(int state, knot_pkt_t *pkt, struct query_data *qdata, vo
}
/* Forward request. */
conf_val_t *val = &conf()->cache.srv_tcp_reply_timeout;
int timeout = conf_int(val) * 1000;
int timeout = 1000 * conf()->cache.srv_tcp_reply_timeout;
ret = knot_requestor_exec(&re, req, timeout);
knot_request_free(req, re.mm);
......
......@@ -455,7 +455,7 @@ static int ratelimit_apply(int state, knot_pkt_t *pkt, knot_layer_t *ctx)
}
/* Now it is slip or drop. */
int slip = conf_int(&conf()->cache.srv_rate_limit_slip);
int slip = conf()->cache.srv_rate_limit_slip;
if (slip > 0 && rrl_slip_roll(slip)) {
/* Answer slips. */
if (process_query_err(ctx, pkt) != KNOT_STATE_DONE) {
......
......@@ -242,8 +242,7 @@ static int remote_forward(conf_t *conf, struct knot_request *request, conf_remot
}
/* Execute the request. */
conf_val_t *val = &conf->cache.srv_tcp_reply_timeout;
int timeout = conf_int(val) * 1000;
int timeout = 1000 * conf->cache.srv_tcp_reply_timeout;
ret = knot_requestor_exec(&re, req, timeout);
knot_request_free(req, re.mm);
......@@ -339,8 +338,7 @@ static void send_update_response(conf_t *conf, const zone_t *zone, struct knot_r
}
if (net_is_stream(req->fd)) {
conf_val_t *val = &conf->cache.srv_tcp_reply_timeout;
int timeout = conf_int(val) * 1000;
int timeout = 1000 * conf->cache.srv_tcp_reply_timeout;
net_dns_tcp_send(req->fd, req->resp->wire, req->resp->size,
timeout);
} else {
......
......@@ -294,8 +294,7 @@ static int zone_query_request(knot_pkt_t *query, const conf_remote_t *remote,
}
/* Send the queries and process responses. */
conf_val_t *val = &param->conf->cache.srv_tcp_reply_timeout;
int timeout = conf_int(val) * 1000;
int timeout = 1000 * param->conf->cache.srv_tcp_reply_timeout;
ret = knot_requestor_exec(&re, req, timeout);
/* Cleanup. */
......
......@@ -114,8 +114,7 @@ static int tcp_handle(tcp_context_t *tcp, int fd,
/* Timeout. */
rcu_read_lock();
conf_val_t *val = &conf()->cache.srv_tcp_reply_timeout;
int timeout = conf_int(val) * 1000;
int timeout = 1000 * conf()->cache.srv_tcp_reply_timeout;
rcu_read_unlock();
/* Receive data. */
......@@ -178,8 +177,7 @@ int tcp_accept(int fd)
#ifdef SO_RCVTIMEO
struct timeval tv;
rcu_read_lock();
conf_val_t *val = &conf()->cache.srv_tcp_idle_timeout;
tv.tv_sec = conf_int(val);
tv.tv_sec = conf()->cache.srv_tcp_idle_timeout;
rcu_read_unlock();
tv.tv_usec = 0;
if (setsockopt(incoming, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) < 0) {
......@@ -207,8 +205,8 @@ static int tcp_event_accept(tcp_context_t *tcp, unsigned i)
/* Update watchdog timer. */
rcu_read_lock();
conf_val_t *val = &conf()->cache.srv_tcp_hshake_timeout;
fdset_set_watchdog(&tcp->set, next_id, conf_int(val));
int timeout = conf()->cache.srv_tcp_hshake_timeout;
fdset_set_watchdog(&tcp->set, next_id, timeout);
rcu_read_unlock();
return KNOT_EOK;
......@@ -228,8 +226,8 @@ static int tcp_event_serve(tcp_context_t *tcp, unsigned i)
if (ret == KNOT_EOK) {
/* Update socket activity timer. */
rcu_read_lock();
conf_val_t *val = &conf()->cache.srv_tcp_idle_timeout;
fdset_set_watchdog(&tcp->set, i, conf_int(val));
int timeout = conf()->cache.srv_tcp_idle_timeout;
fdset_set_watchdog(&tcp->set, i, timeout);
rcu_read_unlock();
}
......@@ -248,8 +246,8 @@ static int tcp_wait_for_events(tcp_context_t *tcp)
if (!is_throttled) {
/* Configuration limit, infer maximal pool size. */
rcu_read_lock();
conf_val_t *val = &conf()->cache.srv_max_tcp_clients;
unsigned max_per_set = MAX(conf_int(val) / conf_tcp_threads(conf()), 1);
int clients = conf()->cache.srv_max_tcp_clients;
unsigned max_per_set = MAX(clients / conf_tcp_threads(conf()), 1);
rcu_read_unlock();
/* Subtract master sockets check limits. */
is_throttled = (set->n - tcp->client_threshold) >= max_per_set;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment