Commit c3a754dd authored by Petr Špaček's avatar Petr Špaček

Merge branch 'kr_cache_gc' into 'master'

cache garbage collector

Closes #257

See merge request !817
parents 06d01bf0 f9a6781c
......@@ -14,14 +14,16 @@ Improvements
- http module: also send intermediate TLS certificate to clients,
if available and luaossl >= 20181207 (!819)
- systemd: basic watchdog is now available and turned on by default (#275)
- experimental cache garbage collector daemon is available (#257)
Bugfixes
--------
- TCP to upstream: don't send wrong message length (unlikely, !816)
- http module: fix problems around maintenance of ephemeral certs (!819)
- http module: also send intermediate TLS certificate to clients,
if available and luaossl >= 20181207 (!819)
- send EDNS with SERVFAILs, e.g. on validation failures (#180, !827)
Knot Resolver 4.0.0 (2019-04-18)
================================
......
/* Copyright (C) 2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*!
* \brief Simple write-once allocation-optimal dynamic array.
*
* Include it into your .c file
*
* prefix - identifier prefix, e.g. ptr -> struct ptr_dynarray, ptr_dynarray_add(), ...
* ntype - data type to be stored. Let it be a number, pointer or small struct
* initial_capacity - how many data items will be allocated on stac and copied with assignment
*
* prefix_dynarray_add() - add a data item
* prefix_dynarray_fix() - call EVERYTIME the array is copied from some already invalid stack
* prefix_dynarray_free() - call EVERYTIME you dismiss all copies of the array
*
*/
#include <stdlib.h>
#include <assert.h>
#pragma once
#define DYNARRAY_VISIBILITY_STATIC static
#define DYNARRAY_VISIBILITY_PUBLIC
#define DYNARRAY_VISIBILITY_LIBRARY __public__
#define dynarray_declare(prefix, ntype, visibility, initial_capacity) \
typedef struct prefix ## _dynarray { \
ssize_t capacity; \
ssize_t size; \
ntype *(*arr)(struct prefix ## _dynarray *dynarray); \
ntype init[initial_capacity]; \
ntype *_arr; \
} prefix ## _dynarray_t; \
\
visibility ntype *prefix ## _dynarray_arr(prefix ## _dynarray_t *dynarray); \
visibility void prefix ## _dynarray_add(prefix ## _dynarray_t *dynarray, \
ntype const *to_add); \
visibility void prefix ## _dynarray_free(prefix ## _dynarray_t *dynarray);
#define dynarray_foreach(prefix, ntype, ptr, array) \
for (ntype *ptr = prefix ## _dynarray_arr(&(array)); \
ptr < prefix ## _dynarray_arr(&(array)) + (array).size; ptr++)
#define dynarray_define(prefix, ntype, visibility) \
\
static void prefix ## _dynarray_free__(struct prefix ## _dynarray *dynarray) \
{ \
if (dynarray->capacity > sizeof(dynarray->init) / sizeof(*dynarray->init)) { \
free(dynarray->_arr); \
} \
} \
\
__attribute__((unused)) \
visibility ntype *prefix ## _dynarray_arr(struct prefix ## _dynarray *dynarray) \
{ \
assert(dynarray->size <= dynarray->capacity); \
return (dynarray->capacity <= sizeof(dynarray->init) / sizeof(*dynarray->init) ? \
dynarray->init : dynarray->_arr); \
} \
\
static ntype *prefix ## _dynarray_arr_init__(struct prefix ## _dynarray *dynarray) \
{ \
assert(dynarray->capacity == sizeof(dynarray->init) / sizeof(*dynarray->init)); \
return dynarray->init; \
} \
\
static ntype *prefix ## _dynarray_arr_arr__(struct prefix ## _dynarray *dynarray) \
{ \
assert(dynarray->capacity > sizeof(dynarray->init) / sizeof(*dynarray->init)); \
return dynarray->_arr; \
} \
\
__attribute__((unused)) \
visibility void prefix ## _dynarray_add(struct prefix ## _dynarray *dynarray, \
ntype const *to_add) \
{ \
if (dynarray->capacity < 0) { \
return; \
} \
if (dynarray->capacity == 0) { \
dynarray->capacity = sizeof(dynarray->init) / sizeof(*dynarray->init); \
dynarray->arr = prefix ## _dynarray_arr_init__; \
} \
if (dynarray->size >= dynarray->capacity) { \
ssize_t new_capacity = dynarray->capacity * 2 + 1; \
ntype *new_arr = calloc(new_capacity, sizeof(ntype)); \
if (new_arr == NULL) { \
prefix ## _dynarray_free__(dynarray); \
dynarray->capacity = dynarray->size = -1; \
return; \
} \
if (dynarray->capacity > 0) { \
memcpy(new_arr, prefix ## _dynarray_arr(dynarray), \
dynarray->capacity * sizeof(ntype)); \
} \
prefix ## _dynarray_free__(dynarray); \
dynarray->_arr = new_arr; \
dynarray->capacity = new_capacity; \
dynarray->arr = prefix ## _dynarray_arr_arr__; \
} \
prefix ## _dynarray_arr(dynarray)[dynarray->size++] = *to_add; \
} \
\
__attribute__((unused)) \
visibility void prefix ## _dynarray_free(struct prefix ## _dynarray *dynarray) \
{ \
prefix ## _dynarray_free__(dynarray); \
memset(dynarray, 0, sizeof(*dynarray)); \
}
......@@ -613,6 +613,30 @@ For more details, see ``kresd.systemd(7)``.
.. note:: On recent Linux supporting ``SO_REUSEPORT`` (since 3.9, backported to RHEL 2.6.32) it is also able to bind to the same endpoint and distribute the load between the forked processes. If your OS doesn't support it, use only one daemon process.
Cache Garbage Collector
=======================
.. warning:: Garbage collector is experimental and subject to change in future
releases. This feature isn't currently turned on by default.
By default, kresd uses the available cache until it's full. When more space is
required, the entire cache is dropped. To avoid starting over with an empty
cache, a separate garbage collector daemon is available to periodically trim
the cache instead.
The cache garbage collector daemon (``kres_cache_gc``) monitors the cache usage
and attempts to free up space when a threshold is reached. To spawn the daemon
and configure it to run every minute, use:
.. code-block:: bash
$ kres_cache_gc -c /var/cache/knot-resolver -d 10000
It's also possible to run this under systemd. However, a dedicated systemd unit
is not currently part of the upstream package. See `message#167`_ on our
mailing list for an example of such a unit file.
Using CLI tools
===============
......@@ -665,3 +689,4 @@ Code reference
.. _`real process managers`: http://blog.crocodoc.com/post/48703468992/process-managers-the-good-the-bad-and-the-ugly
.. _`socket activation`: http://0pointer.de/blog/projects/socket-activation.html
.. _`dnsproxy module`: https://www.knot-dns.cz/docs/2.7/html/modules.html#dnsproxy-tiny-dns-proxy
.. _`message#167`: https://lists.nic.cz/pipermail/knot-resolver-users/2019/000167.html
......@@ -30,3 +30,4 @@ usr/lib/knot-resolver/kres_modules/view.lua
usr/lib/knot-resolver/kres_modules/workarounds.lua
usr/sbin/kresc
usr/sbin/kresd
usr/sbin/kres_cache_gc
......@@ -251,6 +251,7 @@ getent passwd knot-resolver >/dev/null || useradd -r -g knot-resolver -d %{_sysc
%attr(750,knot-resolver,knot-resolver) %dir %{_localstatedir}/cache/%{name}
%{_sbindir}/kresd
%{_sbindir}/kresc
%{_sbindir}/kres_cache_gc
%{_libdir}/libkres.so.*
%dir %{_libdir}/knot-resolver
%{_libdir}/knot-resolver/*.so
......
......@@ -201,7 +201,7 @@ int kr_cache_clear(struct kr_cache *cache)
}
/* When going stricter, BEWARE of breaking entry_h_consistent_NSEC() */
struct entry_h * entry_h_consistent(knot_db_val_t data, uint16_t type)
struct entry_h * entry_h_consistent_E(knot_db_val_t data, uint16_t type)
{
(void) type; /* unused, for now */
if (!data.data) return NULL;
......@@ -599,7 +599,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
eh->rank = 0;
assert(false);
}
assert(entry_h_consistent(val_new_entry, rr->type));
assert(entry_h_consistent_E(val_new_entry, rr->type));
#if 0 /* Occasionally useful when debugging some kinds of changes. */
{
......@@ -802,7 +802,7 @@ static int peek_exact_real(struct kr_cache *cache, const knot_dname_t *name, uin
if (!ret) ret = entry_h_seek(&val, type);
if (ret) return kr_error(ret);
const struct entry_h *eh = entry_h_consistent(val, type);
const struct entry_h *eh = entry_h_consistent_E(val, type);
if (!eh || eh->is_packet) {
// TODO: no packets, but better get rid of whole kr_cache_peek_exact().
return kr_error(ENOENT);
......
......@@ -234,7 +234,7 @@ int entry_h_splice(
}
/* val is on the entry, in either case (or error) */
if (!ret) {
eh_orig = entry_h_consistent(val, type);
eh_orig = entry_h_consistent_E(val, type);
}
} else {
/* We want to fully overwrite the entry, so don't even read it. */
......
......@@ -63,7 +63,8 @@ struct entry_apex;
/** Check basic consistency of entry_h for 'E' entries, not looking into ->data.
* (for is_packet the length of data is checked)
*/
struct entry_h * entry_h_consistent(knot_db_val_t data, uint16_t type);
KR_EXPORT
struct entry_h * entry_h_consistent_E(knot_db_val_t data, uint16_t type);
struct entry_apex * entry_apex_consistent(knot_db_val_t val);
......@@ -71,12 +72,22 @@ struct entry_apex * entry_apex_consistent(knot_db_val_t val);
static inline struct entry_h * entry_h_consistent_NSEC(knot_db_val_t data)
{
/* ATM it's enough to just extend the checks for exact entries. */
const struct entry_h *eh = entry_h_consistent(data, KNOT_RRTYPE_NSEC);
const struct entry_h *eh = entry_h_consistent_E(data, KNOT_RRTYPE_NSEC);
bool ok = eh != NULL;
ok = ok && !eh->is_packet && !eh->has_optout;
return ok ? /*const-cast*/(struct entry_h *)eh : NULL;
}
static inline struct entry_h * entry_h_consistent(knot_db_val_t data, uint16_t type)
{
switch (type) {
case KNOT_RRTYPE_NSEC:
case KNOT_RRTYPE_NSEC3:
return entry_h_consistent_NSEC(data);
default:
return entry_h_consistent_E(data, type);
}
}
/* nsec_p* - NSEC* chain parameters */
......@@ -226,7 +237,7 @@ int entry_h_splice(
const struct kr_query *qry, struct kr_cache *cache, uint32_t timestamp);
/** Parse an entry_apex into individual items. @return error code. */
int entry_list_parse(const knot_db_val_t val, entry_list_t list);
KR_EXPORT int entry_list_parse(const knot_db_val_t val, entry_list_t list);
static inline size_t to_even(size_t n)
{
......
......@@ -258,7 +258,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
knot_db_val_t val = { NULL, 0 };
ret = cache_op(cache, read, &key, &val, 1);
const struct entry_h *eh;
if (ret || !(eh = entry_h_consistent(val, KNOT_RRTYPE_SOA))) {
if (ret || !(eh = entry_h_consistent_E(val, KNOT_RRTYPE_SOA))) {
assert(ret); /* only want to catch `eh` failures */
VERBOSE_MSG(qry, "=> SOA missed\n");
return ctx->state;
......@@ -472,7 +472,7 @@ static int found_exact_hit(kr_layer_t *ctx, knot_pkt_t *pkt, knot_db_val_t val,
int ret = entry_h_seek(&val, qry->stype);
if (ret) return ret;
const struct entry_h *eh = entry_h_consistent(val, qry->stype);
const struct entry_h *eh = entry_h_consistent_E(val, qry->stype);
if (!eh) {
assert(false);
return kr_error(ENOENT);
......@@ -532,7 +532,7 @@ static int try_wild(struct key *k, struct answer *ans, const knot_dname_t *clenc
return ret;
}
/* Check if the record is OK. */
const struct entry_h *eh = entry_h_consistent(val, type);
const struct entry_h *eh = entry_h_consistent_E(val, type);
if (!eh) {
assert(false);
return kr_error(ret);
......@@ -697,7 +697,7 @@ static int check_NS_entry(struct key *k, const knot_db_val_t entry, const int i,
} else {
type = EL2RRTYPE(i);
/* Find the entry for the type, check positivity, TTL */
const struct entry_h *eh = entry_h_consistent(entry, type);
const struct entry_h *eh = entry_h_consistent_E(entry, type);
if (!eh) {
VERBOSE_MSG(qry, "=> EH not consistent\n");
assert(false);
......
......@@ -155,9 +155,9 @@ subdir('contrib')
subdir('lib')
## Remaining code
subdir('client')
subdir('daemon')
subdir('modules')
subdir('utils')
if get_option('bench') == 'enabled'
subdir('bench')
endif
......@@ -243,6 +243,7 @@ run_target(
s_managed_ta = managed_ta ? 'enabled' : 'disabled'
s_systemd_socket = libsystemd.found() ? 'enabled' : 'disabled'
s_build_client = build_client ? 'enabled' : 'disabled'
s_build_utils = build_utils ? 'enabled' : 'disabled'
s_build_dnstap = build_dnstap ? 'enabled' : 'disabled'
s_build_unit_tests = build_unit_tests ? 'enabled' : 'disabled'
s_build_config_tests = build_config_tests ? 'enabled' : 'disabled'
......@@ -270,6 +271,7 @@ message('''
optional components
client: @0@'''.format(s_build_client) + '''
utils: @0@'''.format(s_build_utils) + '''
dnstap: @0@'''.format(s_build_dnstap) + '''
unit_tests: @0@'''.format(s_build_unit_tests) + '''
config_tests: @0@'''.format(s_build_config_tests) + '''
......
......@@ -102,6 +102,18 @@ option(
description: 'build kresc client binary',
)
option(
'utils',
type: 'combo',
choices: [
'auto',
'enabled',
'disabled',
],
value: 'auto',
description: 'build kres utilities',
)
option(
'dnstap',
type: 'combo',
......
#include "categories.h"
#include <libknot/libknot.h>
#include "lib/utils.h"
static bool rrtype_is_infrastructure(uint16_t r)
{
switch (r) {
case KNOT_RRTYPE_NS:
case KNOT_RRTYPE_DS:
case KNOT_RRTYPE_DNSKEY:
case KNOT_RRTYPE_A:
case KNOT_RRTYPE_AAAA:
return true;
default:
return false;
}
}
static int get_random(int to)
{
// We don't need these to be really unpredictable,
// but this should be cheap enough not to be noticeable.
return kr_rand_bytes(1) % to;
}
// TODO this is just an example, make this more clever
category_t kr_gc_categorize(gc_record_info_t * info)
{
category_t res;
if (!info->valid)
return CATEGORIES - 1;
switch (info->no_labels) {
case 0: /* root zone */
res = 5;
break;
case 1: /* TLD */
res = 10;
break;
default: /* SLD and below */
res = (rrtype_is_infrastructure(info->rrtype) ? 15 : 20);
if (info->entry_size > 300)
/* Penalty for big answers */
res += 30;
break;
}
if (info->expires_in <= 0) {
res += 40;
}
return res + get_random(5);
}
#pragma once
#include "kr_cache_gc.h"
typedef uint8_t category_t;
#define CATEGORIES 100 // number of categories
category_t kr_gc_categorize(gc_record_info_t * info);
// #define DEBUG 1
#include "db.h"
#include <lib/cache/impl.h>
//#include <lib/defines.h>
#include <ctype.h> //DEBUG
#include <time.h>
#include <sys/stat.h>
struct libknot_lmdb_env {
bool shared;
unsigned dbi;
void *env;
knot_mm_t *pool;
};
struct kres_lmdb_env {
size_t mapsize;
unsigned dbi;
void *env;
// sub-struct txn ommited
};
static knot_db_t *knot_db_t_kres2libknot(const knot_db_t * db)
{
const struct kres_lmdb_env *kres_db = db; // this is struct lmdb_env as in resolver/cdb_lmdb.c
struct libknot_lmdb_env *libknot_db = malloc(sizeof(*libknot_db));
if (libknot_db != NULL) {
libknot_db->shared = false;
libknot_db->pool = NULL;
libknot_db->env = kres_db->env;
libknot_db->dbi = kres_db->dbi;
}
return libknot_db;
}
int kr_gc_cache_open(const char *cache_path, struct kr_cache *kres_db,
knot_db_t ** libknot_db)
{
char cache_data[strlen(cache_path) + 10];
snprintf(cache_data, sizeof(cache_data), "%s/data.mdb", cache_path);
struct stat st = { 0 };
if (stat(cache_path, &st) || !(st.st_mode & S_IFDIR) || stat(cache_data, &st)) {
printf("Error: %s does not exist or is not a LMDB.\n", cache_path);
return -ENOENT;
}
size_t cache_size = st.st_size;
struct kr_cdb_opts opts = { cache_path, cache_size };
int ret = kr_cache_open(kres_db, NULL, &opts, NULL);
if (ret || kres_db->db == NULL) {
printf("Error opening Resolver cache (%s).\n", kr_strerror(ret));
return -EINVAL;
}
*libknot_db = knot_db_t_kres2libknot(kres_db->db);
if (*libknot_db == NULL) {
printf("Out of memory.\n");
return -ENOMEM;
}
return 0;
}
void kr_gc_cache_close(struct kr_cache *kres_db, knot_db_t * knot_db)
{
free(knot_db);
kr_cache_close(kres_db);
}
const uint16_t *kr_gc_key_consistent(knot_db_val_t key)
{
const static uint16_t NSEC1 = KNOT_RRTYPE_NSEC;
const static uint16_t NSEC3 = KNOT_RRTYPE_NSEC3;
const uint8_t *kd = key.data;
ssize_t i;
/* CACHE_KEY_DEF */
if (key.len >= 2 && kd[0] == '\0') {
/* Beware: root zone is special and starts with
* a single \0 followed by type sign */
i = 1;
} else {
/* find the first double zero in the key */
for (i = 2; kd[i - 1] || kd[i - 2]; ++i) {
if (i >= key.len)
return NULL;
}
}
// the next character can be used for classification
switch (kd[i]) {
case 'E':
if (i + 1 + sizeof(uint16_t) > key.len) {
assert(!EINVAL);
return NULL;
}
return (uint16_t *) & kd[i + 1];
case '1':
return &NSEC1;
case '3':
return &NSEC3;
default:
return NULL;
}
}
/// expects that key is consistent! CACHE_KEY_DEF
static uint8_t entry_labels(knot_db_val_t * key, uint16_t rrtype)
{
uint8_t lab = 0, *p = key->data;
while (*p != 0) {
while (*p++ != 0) {
if (p - (uint8_t *) key->data >= key->len) {
return 0;
}
}
lab++;
}
if (rrtype == KNOT_RRTYPE_NSEC3) {
// We don't know the number of labels so easily,
// but let's classify everything as directly
// below the zone apex (that's most common).
++lab;
}
return lab;
}
#ifdef DEBUG
void debug_printbin(const char *str, unsigned int len)
{
putchar('"');
for (int idx = 0; idx < len; idx++) {
char c = str[idx];
if (isprint(c))
putchar(c);
else
printf("`%02x`", c);
}
putchar('"');
}
#endif
/** Return one entry_h reference from a cache DB value. NULL if not consistent/suitable. */
static const struct entry_h *val2entry(const knot_db_val_t val, uint16_t ktype)
{
if (ktype != KNOT_RRTYPE_NS)
return entry_h_consistent(val, ktype);
/* Otherwise we have a multi-purpose entry.
* Well, for now we simply choose the most suitable entry;
* the only realistic collision is DNAME in apex where we'll prefer NS. */
entry_list_t el;
if (entry_list_parse(val, el))
return NULL;
for (int i = ENTRY_APEX_NSECS_CNT; i < EL_LENGTH; ++i) {
if (el[i].len)
return entry_h_consistent(el[i], EL2RRTYPE(i));
}
/* Only NSEC* meta-data inside. */
return NULL;
}
int kr_gc_cache_iter(knot_db_t * knot_db, kr_gc_iter_callback callback, void *ctx)
{
#ifdef DEBUG
unsigned int counter_iter = 0;
unsigned int counter_gc_consistent = 0;
unsigned int counter_kr_consistent = 0;
#endif
knot_db_txn_t txn = { 0 };
knot_db_iter_t *it = NULL;
const knot_db_api_t *api = knot_db_lmdb_api();
gc_record_info_t info = { 0 };
int64_t now = time(NULL);
int ret = api->txn_begin(knot_db, &txn, KNOT_DB_RDONLY);
if (ret != KNOT_EOK) {
printf("Error starting DB transaction (%s).\n", knot_strerror(ret));
return ret;
}
it = api->iter_begin(&txn, KNOT_DB_FIRST);
if (it == NULL) {
printf("Error iterationg database.\n");
api->txn_abort(&txn);
return KNOT_ERROR;
}
while (it != NULL) {
knot_db_val_t key = { 0 }, val = { 0 };
ret = api->iter_key(it, &key);
if (key.len == 4 && memcmp("VERS", key.data, 4) == 0) {
/* skip DB metadata */
goto skip;
}
if (ret == KNOT_EOK) {
ret = api->iter_val(it, &val);
}
#ifdef DEBUG
counter_iter++;
#endif
info.entry_size = key.len + val.len;
info.valid = false;
const uint16_t *entry_type =
ret == KNOT_EOK ? kr_gc_key_consistent(key) : NULL;
const struct entry_h *entry = NULL;
if (entry_type != NULL) {
#ifdef DEBUG
counter_gc_consistent++;
#endif
entry = val2entry(val, *entry_type);
}
/* TODO: perhaps improve some details around here:
* - xNAME have .rrtype NS
* - DNAME hidden on NS name will not be considered here
* - if zone has NSEC* meta-data but no NS, it will be seen
* here as kr_inconsistent */
if (entry != NULL) {
info.valid = true;
info.rrtype = *entry_type;
info.expires_in = entry->time + entry->ttl - now;
info.no_labels = entry_labels(&key, *entry_type);
}
#ifdef DEBUG
counter_kr_consistent += info.valid;
printf("GC %sconsistent, KR %sconsistent, size %zu, key len %zu: ",
entry_type ? "" : "in", entry ? "" : "IN", (key.len + val.len),
key.len);
debug_printbin(key.data, key.len);
printf("\n");
#endif
ret = callback(&key, &info, ctx);
if (ret != KNOT_EOK) {
printf("Error iterating database (%s).\n", knot_strerror(ret));
api->iter_finish(it);
api->txn_abort(&txn);
return ret;
}
skip:
it = api->iter_next(it);
}
api->txn_abort(&txn);
#ifdef DEBUG
printf("DEBUG: iterated %u items, gc consistent %u, kr consistent %u\n",
counter_iter, counter_gc_consistent, counter_kr_consistent);
#endif
return KNOT_EOK;
}
#pragma once
#include <lib/cache/api.h>
#include <libknot/libknot.h>
#include "kr_cache_gc.h"
int kr_gc_cache_open(const char *cache_path, struct kr_cache *kres_db,
knot_db_t ** libknot_db);
void kr_gc_cache_close(struct kr_cache *kres_db, knot_db_t * knot_db);
typedef int (*kr_gc_iter_callback)(const knot_db_val_t * key, gc_record_info_t * info,
void *ctx);
int kr_gc_cache_iter(knot_db_t * knot_db, kr_gc_iter_callback callback, void *ctx);
const uint16_t *kr_gc_key_consistent(knot_db_val_t key);
// standard includes
#include <inttypes.h>
#include <limits.h>
#include <stdio.h>
#include <time.h>
// libknot includes
#include <libknot/libknot.h>
// resolver includes
#include <contrib/dynarray.h>
#include <lib/cache/api.h>
#include <lib/cache/impl.h>
#include <lib/defines.h>
#include "kr_cache_gc.h"
#include "categories.h"
#include "db.h"
// section: timer
// TODO replace/move to contrib
typedef struct timespec gc_timer_t;
static gc_timer_t gc_timer_internal = { 0 };
static void gc_timer_start(gc_timer_t * t)
{
(void)clock_gettime(CLOCK_MONOTONIC, t == NULL ? &gc_timer_internal : t);
}
static double gc_timer_end(gc_timer_t * t)
{
gc_timer_t *start = t == NULL ? &gc_timer_internal : t;
gc_timer_t end = { 0 };
(void)clock_gettime(CLOCK_MONOTONIC, &end);
return (((double)end.tv_sec - (double)start->tv_sec) +
((double)end.tv_nsec - (double)start->tv_nsec) / 1e9);
}
static unsigned long gc_timer_usecs(gc_timer_t * t)
{
gc_timer_t *start = t == NULL ? &gc_timer_internal : t;
gc_timer_t end = { 0 };
(void)clock_gettime(CLOCK_MONOTONIC, &end);
return ((end.tv_sec - start->tv_sec) * 1000000UL +
(end.tv_nsec - start->tv_nsec) / 1000UL);