Commit 677fa779 authored by Daniel Kahn Gillmor's avatar Daniel Kahn Gillmor

Fix minor spelling and grammar nits.

parent 32118aab
......@@ -239,7 +239,7 @@ Improvements
------------
- major feature: support for forwarding with validation (#112).
The old policy.FORWARD action now does that; the previous non-validating
mode is still avaliable as policy.STUB except that also uses caching (#122).
mode is still available as policy.STUB except that also uses caching (#122).
- command line: specify ports via @ but still support # for compatibility
- policy: recognize 100.64.0.0/10 as local addresses
- layer/iterate: *do* retry repeatedly if REFUSED, as we can't yet easily
......@@ -420,8 +420,8 @@ Bugfixes:
- Free TCP buffer on cancelled connection.
- Fix crash in hints module on empty hints file, and fix non-lowercase hints.
Miscelaneous:
-------------
Miscellaneous:
--------------
- It now requires knot >= 2.3.1 to link successfully.
- The API+ABI for modules changed slightly.
- New LRU implementation.
......
......@@ -440,7 +440,7 @@ static int cdb_clear(knot_db_t *db)
return kr_error(errno);
}
close(ret);
/* We aquired lockfile. Now find whether *.mdb are what we have open now. */
/* We acquired lockfile. Now find whether *.mdb are what we have open now. */
struct stat old_stat, new_stat;
if (fstat(fd, &new_stat) || stat(mdb_datafile, &old_stat)) {
ret = errno;
......@@ -536,7 +536,7 @@ static int cdb_writev(knot_db_t *db, const knot_db_val_t *key, knot_db_val_t *va
/* This is LMDB specific optimisation,
* if caller specifies value with NULL data and non-zero length,
* LMDB will preallocate the entry for caller and leave write
* transaction open, caller is responsible for syncing thus comitting transaction.
* transaction open, caller is responsible for syncing thus committing transaction.
*/
unsigned mdb_flags = 0;
if (val[i].len > 0 && val[i].data == NULL) {
......
......@@ -98,7 +98,7 @@ knot_db_val_t key_exact_type_maypkt(struct key *k, uint16_t type);
/** There may be multiple entries within, so rewind `val` to the one we want.
*
* ATM there are multiple types only for the NS ktype - it also accomodates xNAMEs.
* ATM there are multiple types only for the NS ktype - it also accommodates xNAMEs.
* \note `val->len` represents the bound of the whole list, not of a single entry.
* \note in case of ENOENT, `val` is still rewound to the beginning of the next entry.
* \return error code
......
......@@ -467,7 +467,7 @@ int kr_nsec_existence_denial(const knot_pkt_t *pkt, knot_section_t section_id,
/* denial of existence proved accordignly to 4035 5.4 -
* NSEC proving either rrset non-existance or
* qtype non-existance has been found,
* and no wildcard expansion occured.
* and no wildcard expansion occurred.
*/
return kr_ok();
} else if (kr_nsec_rrset_noexist(flags)) {
......
......@@ -665,7 +665,7 @@ int kr_nsec3_no_data(const knot_pkt_t *pkt, knot_section_t section_id,
* Also satisfies ERRATA 3441 8.5 (QTYPE != DS), 3rd paragraph.
* - (wildcard) empty nonterminal
* derived from unsecure delegation.
* Denial of existance can not be proven.
* Denial of existence can not be proven.
* Set error code to proceed unsecure.
*/
ret = kr_error(DNSSEC_OUT_OF_RANGE);
......
......@@ -204,7 +204,7 @@ struct lru_item;
#define LRU_TRACKED ((64 - sizeof(size_t) * LRU_ASSOC) / 4 - 1)
struct lru_group {
uint16_t counts[LRU_TRACKED+1]; /*!< Occurence counters; the last one is special. */
uint16_t counts[LRU_TRACKED+1]; /*!< Occurrence counters; the last one is special. */
uint16_t hashes[LRU_TRACKED+1]; /*!< Top halves of hashes; the last one is unused. */
struct lru_item *items[LRU_ASSOC]; /*!< The full items. */
} CACHE_ALIGNED;
......
......@@ -673,7 +673,7 @@ static int check_signer(kr_layer_t *ctx, knot_pkt_t *pkt)
if (ctx->state == KR_STATE_YIELD) {
/* Already yielded for revalidation.
* It means that trust chain is OK and
* transition to INSECURE hasn't occured.
* transition to INSECURE hasn't occurred.
* Let the validation logic ask about RRSIG. */
return KR_STATE_DONE;
}
......@@ -867,7 +867,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
ctx->state = KR_STATE_DONE;
} else if (ctx->state == KR_STATE_YIELD) {
/* Transition to unsecure state
was occured during revalidation.
occurred during revalidation.
if state remains YIELD, answer will not be cached.
Let cache layers to work. */
ctx->state = KR_STATE_DONE;
......
......@@ -271,7 +271,7 @@ static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name
(cut_found.key == NULL)) {
/* No DNSKEY was found for cached cut.
* If no glue were fetched for this cut,
* we have got circular dependancy - must fetch A\AAAA
* we have got circular dependency - must fetch A\AAAA
* from authoritative, but we have no key to verify it.
* TODO - try to refetch cut only if no glue were fetched */
kr_zonecut_deinit(&cut_found);
......@@ -334,7 +334,7 @@ static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param)
qry->flags.NO_THROTTLE = true; /* Pick even bad SBELT servers */
return kr_error(EAGAIN);
}
/* No IPv4 nor IPv6, flag server as unuseable. */
/* No IPv4 nor IPv6, flag server as unusable. */
VERBOSE_MSG(qry, "=> unresolvable NS address, bailing out\n");
qry->ns.reputation |= KR_NS_NOIP4 | KR_NS_NOIP6;
kr_nsrep_update_rep(&qry->ns, qry->ns.reputation, ctx->cache_rep);
......@@ -850,7 +850,7 @@ static void update_nslist_rtt(struct kr_context *ctx, struct kr_query *qry, cons
static void update_nslist_score(struct kr_request *request, struct kr_query *qry, const struct sockaddr *src, knot_pkt_t *packet)
{
struct kr_context *ctx = request->ctx;
/* On sucessful answer, update preference list RTT and penalise timer */
/* On successful answer, update preference list RTT and penalise timer */
if (request->state != KR_STATE_FAIL) {
/* Update RTT information for preference list */
update_nslist_rtt(ctx, qry, src);
......
......@@ -6,7 +6,7 @@ Detect discontinuous jumps in the system time
This module detect discontinuous jumps in the system time when resolver
is running. It clears cache when a significant backward time jumps occurs.
Time jumps are usualy created by NTP time change or by admin intervention.
Time jumps are usually created by NTP time change or by admin intervention.
These change can affect cache records as they store timestamp and TTL in real
time.
......@@ -14,7 +14,7 @@ If you want to preserve cache during time travel you should disable
this module by ``modules.unload('detect_time_jump')``.
Due to the way monotonic system time works on typical systems,
suspend-resume cycles will be perceived as a foward time jumps,
suspend-resume cycles will be perceived as forward time jumps,
but this direction of shift does not have the risk of using records
beyond their intended TTL, so forward jumps do not cause erasing the cache.
......@@ -6,7 +6,7 @@ mod.threshold = 10 * min
local event_id = nil
-- Get time of last cache clear. Compute difference between realtime
-- and monotonic time. Compute difference of actual realtime and monotonic
-- time. In ideal case these differences should be almost same.
-- If they differ more than mod.threshold value then clear cache.
local function check_time()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment