Commit a16455b6 authored by Petr Špaček's avatar Petr Špaček

merge tcp and edns2019 tests into one set of tools

Tools later in the pipeline now require argument specifying
type of scan (= set of tests) which will be performed.

Names of tools were generalized and some names of output files were
changed, see updated docs.
parent 44ce5992
......@@ -2,6 +2,6 @@
*.pickle
__pycache__
ednscomp.input
ednscompresult-*
edns2019compresult-*
zone
zone.nodnssec
EDNS compliance scanner for DNS zones
=====================================
DNS compliance scanner for DNS zones
====================================
This repo contains set of scripts to scan all delegated domains from single
zone file for EDNS compliance problems and to evaluate practical impact of
EDNS Flag Day 2019 (see https://dnsflagday.net/) on particular zone.
zone file for EDNS and DNS-over-TCP compliance problems and to evaluate
practical impact of EDNS Flag Days 2019 and 2020
(see https://dnsflagday.net/) on particular zone.
Testing methodology is described in file doc/methodology.rst.
Testing methodology is described in file doc/methodology_edns2019.rst
and doc/methodology_tcp.rst.
Before you start please follow instructions in file doc/prerequisites.rst,
it contains important information.
......
......@@ -8,13 +8,11 @@ import sys
import dns.name
import domain2ipset
import ednsevalzone
import ednscomp2pickle
import tcpcomp2pickle
import evalzone
import genednscomp
import nsname2ipset
import testedns
import testtcp
import report2pickle
import rungenreport
import zone2pickle
def main():
......@@ -85,65 +83,28 @@ def main():
remaining,
remaining/total * 100)
domain2ipset.save(domain_nsset, netstats, domain_ipset)
if args.scan_type == 'edns2019':
summary = scan_edns2019(domain_nsset, nsname_ipsets, domain_ipset)
elif args.scan_type == 'tcp':
summary = scan_tcp(domain_nsset, nsname_ipsets, domain_ipset)
print(summary.text)
def scan_edns2019(domain_nsset, nsname_ipsets, domain_ipset):
logging.info('generating input data for genreport tool')
with open('ednscomp.input', 'w') as ednscomp_input:
ednscomp_input.writelines(genednscomp.generate(nsname_ipsets, domain_ipset))
logging.info('executing EDNS tests')
testedns.repeat_genreport(10)
ednscompresults = glob.glob('ednscompresult-*')
if not ednscompresults:
logging.critical('error: no ednscompresult-* files from previous step found, exiting')
sys.exit(2)
logging.info('processing genreport output in EDNS strict mode')
nsstats_strict = ednscomp2pickle.collect_server_stats(ednscomp2pickle.eval_edns_strict, ednscompresults)
ednscomp2pickle.save(nsstats_strict, 'strict')
logging.info('processing genreport output in EDNS permissive mode')
nsstats_permissive = ednscomp2pickle.collect_server_stats(ednscomp2pickle.eval_edns_permissive, ednscompresults)
ednscomp2pickle.save(nsstats_permissive, 'permissive')
logging.info('generating input data for genreport tool')
with open('ednscomp.input', 'w') as ednscomp_input:
ednscomp_input.writelines(genednscomp.generate(nsname_ipsets, domain_ipset))
summary, results_strict, results_permissive = ednsevalzone.evaluate(nsstats_strict, nsstats_permissive, domain_nsset, nsname_ipsets, domain_ipset)
ednsevalzone.save_pickle(results_strict, 'strict')
ednsevalzone.save_pickle(results_permissive, 'permissive')
ednsevalzone.save_summary(summary)
return summary
logging.info('executing {} tests'.format(args.scan_type))
rungenreport.run(10, args.scan_type)
globname = '{}compresult-*'.format(args.scan_type)
ednscompresults = glob.glob(globname)
if not ednscompresults:
logging.critical('error: no {} files from previous step found, exiting'.format(globname))
sys.exit(2)
def scan_tcp(domain_nsset, nsname_ipsets, domain_ipset):
logging.info('generating input data for genreport tool')
with open('ednscomp.input', 'w') as ednscomp_input:
ednscomp_input.writelines(genednscomp.generate(nsname_ipsets, domain_ipset))
nsstats_strict, nsstats_permissive = report2pickle.main(args.scan_type, ednscompresults)
logging.info('executing TCP tests')
testtcp.repeat_genreport(10)
tcpcompresults = glob.glob('tcpcompresult-*')
if not tcpcompresults:
logging.critical('error: no tcpcompresult-* files from previous step found, exiting')
sys.exit(2)
logging.info('processing genreport output in TCP-mandatory mode (small UDP buffer sizes)')
nsstats_strict = tcpcomp2pickle.collect_server_stats(tcpcomp2pickle.eval_tcp_strict, tcpcompresults)
tcpcomp2pickle.save(nsstats_strict, 'strict')
logging.info('processing genreport output in TCP-not-mandatory mode (big UDP buffer sizes)')
nsstats_permissive = tcpcomp2pickle.collect_server_stats(tcpcomp2pickle.eval_tcp_permissive, tcpcompresults)
tcpcomp2pickle.save(nsstats_permissive, 'permissive')
summary, results_strict, results_permissive = ednsevalzone.evaluate(nsstats_strict, nsstats_permissive, domain_nsset, nsname_ipsets, domain_ipset)
ednsevalzone.save_pickle(results_strict, 'strict')
ednsevalzone.save_pickle(results_permissive, 'permissive')
ednsevalzone.save_summary(summary)
return summary
summary, results_strict, results_permissive = evalzone.evaluate(nsstats_strict, nsstats_permissive, domain_nsset, nsname_ipsets, domain_ipset)
evalzone.save_pickle(results_strict, args.scan_type, 'strict')
evalzone.save_pickle(results_permissive, args.scan_type, 'permissive')
evalzone.save_summary(summary, args.scan_type)
print(summary.text)
if __name__ == "__main__":
testedns.check_env()
rungenreport.check_env()
main()
#!/usr/bin/bash
set -o errexit -o xtrace
test -f zone || wget -O zone https://www.internic.net/domain/in-addr.arpa
ldns-read-zone -zc -E SOA -E NS -E A -E AAAA zone > zone.normalized
allinone.py tcp zone.normalized cz
printresults.py tcp new
printresults.py tcp new --ns
printresults.py tcp all permissive dead --ns
......@@ -7,9 +7,9 @@ zone2pickle.py zone.normalized in-addr.arpa
nsname2ipset.py
domain2ipset.py
genednscomp.py > ednscomp.input
testedns.py 2
ednscomp2pickle.py ednscompresult-*
ednsevalzone.py
printresults.py new
printresults.py new --ns
printresults.py all permissive dead --ns
rungenreport.py edns2019 2
report2pickle.py edns2019 edns2019compresult-*
evalzone.py edns2019
printresults.py edns2019 new
printresults.py edns2019 new --ns
printresults.py edns2019 all permissive dead --ns
......@@ -7,9 +7,9 @@ zone2pickle.py zone.normalized in-addr.arpa
nsname2ipset.py
domain2ipset.py
genednscomp.py > ednscomp.input
testtcp.py 2
tcpcomp2pickle.py tcpcompresult-*
ednsevalzone.py
printresults.py new
printresults.py new --ns
printresults.py all permissive dead --ns
rungenreport.py tcp 2
report2pickle.py tcp tcpcompresult-*
evalzone.py tcp
printresults.py tcp new
printresults.py tcp new --ns
printresults.py tcp all permissive dead --ns
......@@ -8,7 +8,7 @@ import pickle
from typing import Dict, Set
import dns.name
from ednsevalzone import AnIPAddress
from evalzone import AnIPAddress
def load_nsname2ipset() -> Dict[dns.name.Name, Set[AnIPAddress]]:
"""raises FileNotFoundError"""
......
......@@ -56,12 +56,12 @@ Algorithm
6. Evaluation in the "permissive" = state before the DNS Flag Day 2019:
- IP addresses which pass only the basic test 'dns' but fail other tests
with 'timeout' will eventually work and are categorized as 'high_latency'.
with 'timeout' will eventually work and are categorized as 'half_dead'.
- IP addresses which do not pass even 'dns' test are categorized as 'dead'.
7. Evaluation in the "strict" mode = state after the DNS Flag Day 2019:
- IP addresses which fail any DNS or EDNS 0 test with 'timeout'
are categorized as 'dead'. In strict mode there is no 'high_latency'
are categorized as 'dead'. In strict mode there is no 'half_dead'
caused by EDNS non-compliance.
8. Results for individual IP addresses are combined to overall result for each
......@@ -71,7 +71,7 @@ Algorithm
(IP evaluation depends on mode, see above)
- remaining domains with at least one un-resolvable NS IP address
+ remaining domains with at least one NS IP address 'dead'
are 'high_latency' (resolvers must retry queries)
are 'half_dead' (resolvers must retry queries)
- remaining domains have their results set to the worst result
from their respective NS set
(e.g. 2 IP addresses 'ok' + 2 'compatible' => 'compatible')
......
Methodology
===========
This section roughly describes algorithm used to categorize domains.
Please note that categorization depends on "mode", i.e. results differ
for situation before the DNS Flag Day 2020 and after it. See below.
Assumptions
-----------
Beware that the algorithm is optimized using following assumption:
TCP support on a given IP address does not depend on domain name
used for test as long as the IP address is authoritative
for the domain.
E.g. if two zones example.com. and example.net. are hosted at
the same IP address 192.0.2.1, it is expected that the IP address
exhibits the same behavior if tests are done with example.com.
and example.net.
This assumption allows us to test each IP address just N times
intead of N*(number of domains hosted on that IP address).
Algorithm
---------
This tools does zone data pre-processing and then users "genreport" tool
written by ISC to perform technical checks. Following set of genreport
TCP test results is considered to be a fatal failure for a domain:
timeout, failed, reset, connection-refused, eof, malformed, mismatch,
rcode15, servfail, refused, nxdomain.
Please see source code for ISC genreport for more details about each result.
1. Each delegation (NS record) in the zone file is converted to mapping
domain => set of NS names => set of IP addresses
using glue data from the DNS zone + local resolver for names which do not
have glue in the zone.
2. Each individual name server IP address is tested to check if the NS
responds authoritatively for given domain. An IP is considered "dead"
if it does not respond at all to DNS query "domain. NS"
or if it is not authoritative for a given domain.
3. Each NS IP address which is authoritative for at least one domain
is then tested for TCP compliance using genreport tool by ISC.
Each IP address is tested once during one pass, i.e. one NS which is
authoritative for 300k domains in zone will be tested only once.
4. TCP tests are repeated multiple times to eliminate effect
of random network glitches to overall result.
Multiple runs of individual tests for a single IP are combined using
simple majority. This should eliminate random network failures.
E.g. if genreport test 'ednstcp' times out once and passes ('ok') 9 times
only the 'ok' result is used.
5. For each IP address its individual test results from genreport
are combined together to get overall state of that particular IP address:
- if all tests are 'ok' -> overall result is 'ok'
- if no result is fatal (see list above) -> overall result is 'compatible'
(it does not support all features but at least it does not break when
queried over TCP)
- further categorization depends on "mode", see below.
6. Evaluation in the "permissive" mode = state before the DNS Flag Day 2020:
- IP addresses which pass only the basic test 'do' but produce fatal
TCP test failures are not resolvable today by some subset of clients
and are categorized as 'half_dead'.
- IP addresses which do not pass even 'do' test are categorized as 'dead'.
7. Evaluation in the "strict" mode = state after the DNS Flag Day 2020:
- IP addresses which exhibit any fatal TCP test failure are categorized
as 'dead'. In strict mode there is no 'half_dead' caused
by DNS-over-TCP non-compliance because after the DNS flag day 2020
majority of clients will depend on working TCP
(at least for big answers).
8. Results for individual IP addresses are combined to overall result for each
domain delegated in the zone file:
- domains without any working authoritative NS are 'dead'
- remaining domains with all NS IP addresses 'dead' are 'dead'
(IP evaluation depends on mode, see above)
- remaining domains with at least one un-resolvable NS IP address
+ remaining domains with at least one NS IP address 'dead'
are 'half_dead' because resolvers on networks where IP fragmentation
does not work will have problems resolving that domain.
- remaining domains have their results set to the worst result
from their respective NS set
(e.g. 2 IP addresses 'ok' + 2 'compatible' => 'compatible')
Limitations
-----------
1. This toolchain tests DNS over TCP compliance only on DNS delegations
in given zone and does not evaluate any other data.
For example, the DNS domain `example.com.` might contain this CNAME record:
`www.example.com. CNAME broken.cdn.test.`
If the tested zone file contains delegation `example.com. NS`,
the result will show only state of `example.com.`'s DNS servers
but will not reflect state of the target CDN which might be source
of compliance problems. As a result, the domain `example.com.`
could be categorized as `ok` but application running on `www.example.com.`
might be unavailable because of depedency on a broken CDN.
2. Anycast routing limits what can be tested from a single vantage point.
It is technically possible for authoritatives to use different implementations
in different anycast domains.
3. Of course, when evaluating impact it needs to be taken into accoutn that
not all domains are equally important for users.
......@@ -16,6 +16,9 @@ Preparation
0. Beware! Processing huge zone file requires several gigabytes
of operating memory and it might take tens of minutes
to convert the data from text to binary. Use a beefy machine.
E.g. net. zone requires a machine with 16 GB of operating memory.
Do not even try com. zone, it will blow up your machine
- it is not optimized enough.
1. All the tools work with data in current working directory.
Make sure it is writeable and has enough free space (comparable
......@@ -33,8 +36,8 @@ Preparation
Copy your zone file into /home/test before you proceed.)
3. Canonicalize the zone file and strip out unnecessary data
to speed up further processing. Do not skip this step, missing canonicalization
might cause problems down the road::
to speed up further processing. Do not skip this step,
MISSING CANONICALIZATION WILL CAUSE PROBLEMS down the road::
$ ldns-read-zone -zc -E SOA -E NS -E A -E AAAA input_zone > zone.nodnssec
......@@ -43,11 +46,11 @@ Running scan
------------
Usage::
$ allinone.py edns2019 <canonicalized zone file> <zone origin>
$ allinone.py tcp <canonicalized zone file> <zone origin>
Example::
$ allinone.py edns2019 zone.nodnssec example.net.
$ allinone.py tcp zone.nodnssec example.net.
Once the zone is loaded into memory the script will print informational
messages about progress. Make a coffee or let it run overnight ...
......@@ -55,31 +58,33 @@ messages about progress. Make a coffee or let it run overnight ...
Reading results
---------------
First of all remember to read file doc/methodology.rst.
First of all remember to read file doc/methodology_tcp.rst.
Statistical results are stored in files summary.csv and summary.txt.
Statistical results are stored in files summary_tcp.csv and summary_tcp.txt.
Example summary.txt::
Example summary_tcp.txt::
Mode | Permissive (<= 2018) | Strict (2019+)
-------------+-----------------------+----------------------
Ok | 191 82.68 % | 191 82.68 %
Compatible | 0 0.00 % | 0 0.00 %
High latency | 39 16.88 % | 38 16.45 %
Dead | 1 0.43 % | 2 0.87 %
Mode | Before flag day | After flag day
---------------+-----------------------+----------------------
Ok | 191 82.68 % | 191 82.68 %
Compatible | 0 0.00 % | 0 0.00 %
Partially dead | 39 16.88 % | 38 16.45 %
Dead | 1 0.43 % | 2 0.87 %
This table indicates that 1 domain is already dead and that 1 other domain
will die after the EDNS flag day.
will die after the TCP flag day. 191 domains is 100 % compliant,
and remaining 38 domains have problems unrelated to DNS flag day
on subset of their name servers.
To get list of domains which will die after the 2019 DNS flag day run::
To get list of domains which will die after the DNS flag day run::
$ printresults.py new
strict dead 48.in-addr.arpa. ; EDNS behavior consistent for all servers
$ printresults.py tcp new
strict dead 48.in-addr.arpa. ; behavior consistent for all servers
To get list of domains which are dead already (even before the flag day)
along with their NS names run::
$ printresults.py all permissive dead --ns
$ printresults.py tcp all permissive dead --ns
permissive dead 55.in-addr.arpa. ns01.army.mil. ; no working NS is authoritative for this domain
permissive dead 55.in-addr.arpa. ns02.army.mil. ; no working NS is authoritative for this domain
permissive dead 55.in-addr.arpa. ns03.army.mil. ; no working NS is authoritative for this domain
......@@ -94,8 +99,6 @@ to get statistical results for the whole zone.
Steps which require communication across network should be
run multiple times to smooth out network glitches like timeouts etc.
(This repetition is normally done by allinone script but individual tools
do not automate repetition.)
With all this in mind you can use the following script.
Please read comments below and report bugs to CZ.NIC Gitlab:
......@@ -109,7 +112,7 @@ wget -O zone 'https://www.internic.net/domain/in-addr.arpa'
# canonicalize the zone
# and strip DNSSEC records to speed up processing
ldns-read-zone -E SOA -E NS -E A -E AAAA zone > zone.nodnssec
ldns-read-zone -zc -E SOA -E NS -E A -E AAAA input_zone > zone.nodnssec
# transform zonefile into Python objects
# NOTE: change "<example.origin.>" to zone origin, e.g. "cz."
......@@ -129,32 +132,32 @@ nsname2ipset.py
domain2ipset.py
# (see stats at the very end of output)
# generate input for EDNS compliance test suite
# generate input for compliance test suite
genednscomp.py > ednscomp.input
# run EDNS compliance test suite
# run TCP compliance test suite
# the script runs genreport binary in a loop
# it is recommended to collect at least 10 full runs to eliminate network noise
# (feel free to terminate the script with SIGTERM)
# result of each run is stored in file ednscompresult-<timestamp>
# Hint: You can run testedns.py in parallel, possibly on multiple machines
PATH=$PATH:<path to genreport tool> testedns.py
# (monitor number of ednscompresult- files and terminate as necessary;
# result of each run is stored in file tcpcompresult-<timestamp>
# Hint: You can execute rungenreport.py in parallel, possibly on multiple machines
PATH=$PATH:<path to genreport tool> rungenreport.py tcp
# (monitor number of tcpcompresult- files and terminate as necessary;
# the script will do 10 full scans to eliminate random network failures)
# merge all text results from EDNS test suite into Python objects
ednscomp2pickle.py ednscompresult-*
# merge all text results from TCP test suite into Python objects
report2pickle.py tcp tcpcompresult-*
# process EDNS stats for given zone
ednsevalzone.py
# process stats for given zone
evalzone.py tcp
# output includes statistical results for whole zone file
# print list of domains which are going to break in 2019
# print list of domains which are going to break in 2020
# i.e. list of domains which are clasified as "high latency"
# in the permissive mode but are "dead" in strict mode
printresults.py new
printresults.py tcp new
# alternatively print dead domains + list of their NSses
# some of the NSes might be broken for other reasons than EDNS,
# some of the NSes might be broken for other reasons than TCP,
# e.g. some might not be authoritative for domain in question etc.
printresults.py new --ns
printresults.py tcp new --ns
......@@ -19,7 +19,7 @@ import weakref
import dns.message
import dns.query
from ednsevalzone import AnIPAddress
from evalzone import AnIPAddress
class IP_state(enum.Enum):
timeout = 0
......
#!/usr/bin/python3
import collections
import ipaddress
import logging
import pickle
import re
import sys
from typing import Counter, Dict, List, Tuple
from ednsevalzone import EDNSResult, AnIPAddress
# zsstmesto.cz. @89.187.140.136 (01.dns.services.dmdox.com.): dns=ok edns=ok edns1=noerror,badversion,soa edns@512=ok ednsopt=ok edns1opt=noerror,badversion,soa do=ok ednsflags=ok optlist=ok,nsid signed=ok ednstcp=ok
# seznam.cz. @2a02:598:4444::4 (ams.seznam.cz.): dns=ok edns=ok,nsid edns1=noerror,badversion,soa,nsid edns@512=ok ednsopt=ok,nsid edns1opt=noerror,badversion,soa,nsid do=ok ednsflags=ok,nsid optlist=ok,nsid signed=ok ednstcp=ok
def parse_nsip_line(line: str) -> Tuple[AnIPAddress, Dict[str, List[str]]]:
"""parse one line from ednscomp log"""
matches = re.match('^[^ ]*\\. @(?P<ip>[^ ]+) \\([^)]+\\): (?P<results>dns=.*)$', line)
if not matches:
raise ValueError('line "{}" does not have expected format, skipping'.format(line))
tests_list = matches.group('results').split()
try:
tests_results = {test.split('=')[0]:
test.split('=')[1].split(',')
for test in tests_list}
except IndexError:
raise ValueError('skipping nonsense test results "{}"'.format(line))
return ipaddress.ip_address(matches.group("ip")), tests_results
def eval_edns_strict(edns0_results: Dict[str, List[str]]) -> EDNSResult:
"""
Evaluate EDNS0 query timeout impact after 2019 DNS flag day.
Timeouts as result of EDNS 0 non-compliance will not cause retry.
"""
return EDNSResult.dead
def eval_edns_permissive(edns0_results: Dict[str, List[str]]) -> EDNSResult:
"""
Evaluate EDNS0 query timeout impact before 2019 DNS flag day.
Timeouts as result of EDNS 0 non-compliance will trigger retry without EDNS.
"""
if 'ok' in edns0_results['dns']:
return EDNSResult.high_latency # plain DNS works but EDNS forces retries
else:
return EDNSResult.dead
def eval_edns(tests_results: Dict[str, List[str]], timeout_evaluator) -> EDNSResult:
"""
Combine individual tests into overall result for a single IP address.
"""
if all('ok' in results for results in tests_results.values()):
return EDNSResult.ok
# ignore EDNS1, it is not affected by 2019 DNS flag day
edns0_results = {testname: values
for testname, values in tests_results.items()
if 'edns1' not in testname}
# if EDNS0 queries do not time out it is kind of "compatible" with DNS 2019 flag day
if all('timeout' not in results for results in edns0_results.values()):
return EDNSResult.compatible
else: # impact of timeouts depend is different before and after the flag day
return timeout_evaluator(edns0_results)
def collect_server_stats(eval_edns_func, edns_infns: str) -> Dict[AnIPAddress, Counter[EDNSResult]]:
"""
Combine results from all files with ednscomp output and summarize stats
"""
server_stats = {} # type: Dict[AnIPAddress, Counter[EDNSResult]]
i = 1
for infilename in edns_infns:
logging.info('processed file no. {}, file name "{}"'.format(i, infilename))
with open(infilename) as infile:
for line in infile:
line = line.strip()
try:
ip, edns_results = parse_nsip_line(line)
combined_result = eval_edns(edns_results, eval_edns_func)
server = server_stats.setdefault(ip, collections.Counter())
server[combined_result] += 1
except ValueError as ex:
logging.warning('%s', ex)
#raise
i += 1
return server_stats
def save(nsstats, criteria: str) -> None:
"""
param criteria: name of criteria - strict / permissive
"""
filename = 'ednsstats_{}.pickle'.format(criteria)
logging.info('saving EDNS results into {}'.format(filename))
pickle.dump(nsstats, open(filename, 'wb'))
def main(infiles):
"""
infiles - names of files with output from ISC genreport
"""
logging.info('processing input in EDNS strict mode')
nsstats_strict = collect_server_stats(eval_edns_strict, infiles)
save(nsstats_strict, 'strict')
logging.info('processing input in EDNS permissive mode')
nsstats_permissive = collect_server_stats(eval_edns_permissive, infiles)
save(nsstats_permissive, 'permissive')
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
if len(sys.argv) < 2:
print('Usage: {} ednscomp_output1 [ednscomp_output2] ...'.format(sys.argv[0]))
sys.exit(1)
main(sys.argv[1:])
This diff is collapsed.
......@@ -9,7 +9,7 @@ from typing import Dict, Set
import dns.name
import dataapi
from ednsevalzone import AnIPAddress
from evalzone import AnIPAddress
def gen_ip_to_nsname(nsname2ipset: Dict[dns.name.Name, Set[AnIPAddress]]) -> Dict[AnIPAddress, dns.name.Name]:
"""
......
......@@ -10,7 +10,7 @@ import dns.name
import dns.rdatatype
import dns.resolver
from ednsevalzone import AnIPAddress
from evalzone import AnIPAddress
def yield_ns_name(nsnames, mapping):
......
......@@ -12,9 +12,9 @@ from typing import Dict, Optional, Set
import dns.name
from ednsevalzone import EDNSResult
from evalzone import Result
def print_domain(mode: str, result: EDNSResult, domain: dns.name.Name,
def print_domain(mode: str, result: Result, domain: dns.name.Name,
nsset: Optional[Set[dns.name.Name]], reason) \
-> None:
if not nsset:
......@@ -26,11 +26,11 @@ def print_domain(mode: str, result: EDNSResult, domain: dns.name.Name,
def new_domains(permissive, strict,
args, domain2ns: Dict[dns.name.Name, Set[dns.name.Name]]) -> None:
logging.info('computing domains with NS which are going to stop working')
edns_broken_domains = set(strict[EDNSResult.dead].keys()) \
- set(permissive[EDNSResult.dead].keys())
edns_broken_domains = set(strict[Result.dead].keys()) \
- set(permissive[Result.dead].keys())
for domain in edns_broken_domains:
nsset = args.ns and domain2ns[domain]
print_domain('strict', EDNSResult.dead, domain, nsset, strict[EDNSResult.dead][domain])
print_domain('strict', Result.dead, domain, nsset, strict[Result.dead][domain])
def print_all(permissive, strict, args, domain2ns) -> None:
if not args.mode:
......@@ -40,10 +40,10 @@ def print_all(permissive, strict, args, domain2ns) -> None:
modes = {args.mode: locals()[args.mode]}
if not args.result:
args.result = EDNSResult
args.result = Result
else:
# workaround for non-functional argparse choices=Enum
args.result = (getattr(EDNSResult, args.result), )
args.result = (getattr(Result, args.result), )
for modename, modedata in modes.items():
for result in args.result:
......@@ -59,13 +59,14 @@ def main():
print test result for each domain in format:
<mode> <result> <domain name> [ns name] ; commentary
''')
parser.add_argument('scan_type', choices=['edns2019', 'tcp'], help='set of tests')
subparsers = parser.add_subparsers(dest='cmd')
allcmd = subparsers.add_parser('all', help='list results for all domains (see list --help)')
allcmd.add_argument('mode', nargs='?', choices=['permissive', 'strict'],
help='limit listing to specified mode')
# workaround for non-functional argparse choices=Enum
allcmd.add_argument('result', nargs='?', choices=EDNSResult.__members__,
allcmd.add_argument('result', nargs='?', choices=Result.__members__,
help='limit listing to specified result category')
allcmd.add_argument('--ns', action='store_true', default=False,
help='print NS for each domain')
......@@ -84,10 +85,10 @@ print test result for each domain in format:
# this can be optimized but it is probably not worth the effort
logging.info('loading permissive mode results')
with open('results_permissive.pickle', 'rb') as pickle_bin:
with open('results_{}_permissive.pickle'.format(args.scan_type), 'rb') as pickle_bin:
permissive = pickle.load(pickle_bin)
logging.info('loading strict mode results')
with open('results_strict.pickle', 'rb') as pickle_bin:
with open('results_{}_strict.pickle'.format(args.scan_type), 'rb') as pickle_bin:
strict = pickle.load(pickle_bin)
if args.ns:
......
#!/usr/bin/python3
import argparse
import collections
import ipaddress
import logging
......@@ -8,7 +9,7 @@ import re
import sys
from typing import Counter, Dict, FrozenSet, List, Tuple
from ednsevalzone import EDNSResult, AnIPAddress
from evalzone import Result, AnIPAddress
# hichina.com. @2400:3200:2000:59::1 (ns2.hichina.com.): tcp=failed do=timeout signed=timeout ednstcp=failed
# hichina.com. @106.11.211.54 (ns2.hichina.com.): tcp=connection-refused do=ok signed=ok ednstcp=connection-refused
......@@ -17,7 +18,7 @@ from ednsevalzone import EDNSResult, AnIPAddress
def parse_nsip_line(line: str) -> Tuple[AnIPAddress, Dict[str, FrozenSet[str]]]:
"""parse one line from genreport log"""
matches = re.match('^[^ ]*\\. @(?P<ip>[^ ]+) \\([^)]+\\): (?P<results>tcp=.*)$', line)
matches = re.match('^[^ ]*\\. @(?P<ip>[^ ]+) \\([^)]+\\): (?P<results>.*=.*)$', line)
if not matches:
raise ValueError('line "{}" does not have expected format, skipping'.format(line))
......@@ -31,44 +32,78 @@ def parse_nsip_line(line: str) -> Tuple[AnIPAddress, Dict[str, FrozenSet[str]]]:
tests_results.pop('signed', None) # ignore DNSSEC things
return ipaddress.ip_address(matches.group("ip")), tests_results
def eval_tcp_strict(tcp_results: Dict[str, List[str]]) -> EDNSResult:
def eval_tcp_strict(tcp_results: Dict[str, List[str]]) -> Result:
"""
Evaluate impact of TCP failures on clients with small EDNS buffer size.
Failure will prevent clients with small buffer size from retrieving data.
"""
return EDNSResult.dead
return Result.dead
def eval_tcp_permissive(tcp_results: Dict[str, List[str]]) -> EDNSResult:
def eval_tcp_permissive(tcp_results: Dict[str, List[str]]) -> Result:
"""
Evaluate impact of TCP failures on clients with big EDNS buffer size.
Timeouts force retries elsewhere and add high latency.
Timeouts force retries elsewhere and add high latency
but it usually gets through.
"""
if 'ok' in tcp_results['do']:
return EDNSResult.high_latency # EDNS over UDP works but TCP has issues
return Result.half_dead # EDNS over UDP works but TCP has issues
else:
return EDNSResult.dead
return Result.dead
hard_breakage_types = frozenset(['timeout', 'failed', 'reset', 'connection-refused', 'eof',
tcp_hard_breakage_types = frozenset(['timeout', 'failed', 'reset', 'connection-refused', 'eof',
'malformed', 'mismatch', 'rcode15', 'servfail', 'refused', 'nxdomain'])
def eval_tcp(tests_results: Dict[str, FrozenSet[str]], timeout_evaluator) -> EDNSResult:
def eval_tcp(tests_results: Dict[str, FrozenSet[str]], timeout_evaluator) -> Result:
"""
Combine individual tests into overall result for a single IP address.
"""
if all('ok' in results for results in tests_results.values()):
return EDNSResult.ok
return Result.ok
# it is not 100% compliant but answers in a reasonable way
# -> it is kind of "compatible" but does not get highest grade
if (all(hard_breakage_types.intersection(results) == 0
if (all(tcp_hard_breakage_types.intersection(results) == 0
for results in tests_results.values())):
return EDNSResult.compatible
return Result.compatible
else: # impact of not working TCP is different before and after the flag day
return timeout_evaluator(tests_results)
def collect_server_stats(eval_tcp_func, edns_infns: str) -> Dict[AnIPAddress, Counter[EDNSResult]]:
def eval_edns_strict(edns0_results: Dict[str, List[str]]) -> Result: