Commit f7bd1155 authored by Edvard Rejthar's avatar Edvard Rejthar

package format, installation steps

parent e24ff7f7
.idea
__pycache__
*.pem
\ No newline at end of file
......@@ -9,7 +9,7 @@ PROFILE_COUNT=21
apt install software-properties-common
add-apt-repository "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) main universe restricted multiverse"
apt update
apt install firefox python3 mariadb-server
apt install firefox python3 mariadb-server xvfb
pip3 install xvfbwrapper pymysql peewee jinja2 pyyaml bs4 pygments pillow requests
# current dir
......@@ -19,7 +19,7 @@ cd $DIR
# mariadb setup
systemctl start mariadb.service
mysql -u root < mdmaug-installation.sql # populate db
mysql -uroot -e "CREATE USER 'mdmaug'@'localhost' IDENTIFIED BY 'fidFDSs676'; GRANT ALL PRIVILEGES ON mdmaug. * TO 'mdmaug'@'%';" # new user
mysql -uroot -e "CREATE USER 'mdmaug'@'localhost' IDENTIFIED BY 'fidFDSs676'; GRANT ALL PRIVILEGES ON mdmaug. * TO 'mdmaug'@'localhost';" # new user
# adding user the server will be run under
useradd -m -d $DESTINATION mdmaug
......@@ -41,8 +41,11 @@ do
fi
done
# adopt all files to the new user
chown mdmaug:mdmaug -R $DESTINATION
# make the new user able to use the display (needed on Ubuntu 17.10 at least)
xhost +local:mdmaug
......@@ -4,10 +4,13 @@ Scans a website for a sign of a parasite hosts or commands.
## Installation
1. ```git clone git@gitlab.labs.nic.cz:csirt/mdmaug.git /tmp/mdmaug```
2. edit mdmaug/lib/config.py
3. you should generate certificate `openssl req -new -x509 -keyout cert-mdmaug.pem -out cert-mdmaug.pem -days 365 -nodes` to `mdmaug/cert-mdmaug.pem`
4. ```/tmp/mdmaug/INSTALL```
1. Download ```git clone git@gitlab.labs.nic.cz:csirt/mdmaug.git /tmp/mdmaug```
2. Edit mdmaug/lib/config.py
3. You should generate certificate `openssl req -new -x509 -keyout cert-mdmaug.pem -out cert-mdmaug.pem -days 365 -nodes` to `mdmaug/cert-mdmaug.pem`
4. Perform installation: ```/tmp/mdmaug/INSTALL```
5. Everything should be located in `/opt/mdmaug`.
6. Launch under newly created `mdmaug` user: `su - mdmaug -c 'python3 -m mdmaug'`
7. Connect in the browser at: https://localhost:8000
### Notes
......@@ -15,6 +18,7 @@ Scans a website for a sign of a parasite hosts or commands.
* Certificate error: Make sure that the browser doesn't blockt the MDM-Augmented server if used from MDM.
* If you want other count of profiles than 21, change INSTALL + config.py + profiles.ini
* You may put ```03 1,7,13,19 * * * ~/mdmaug-launch``` in ```crontab -e``` of user mdmaug.
* We are using Python3.6+
## What is done to Firefox profiles?
......
/__pycache__/
/lib/__pycache__/
/lib/analysis/__pycache__/
/lib/analysis/parser/__pycache__/
/nbproject/
/templates/__pycache__/
cert-mdmaug.pem
#!/usr/bin/env python3
import logging
import os
import ssl
import threading
import logging
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
from http.server import HTTPServer
from xvfbwrapper import Xvfb
from lib.config import Config
from lib.controller.server import Server
from lib.controller.api import Api
# import ipdb; ipdb.set_trace()
#logging.basicConfig(level=logging.WARNING, format="%(levelname)s: %(message)s",filename="logger.log")
from .lib.config import Config
from .lib.controller.server import Server
from .lib.controller.api import Api
# assure the logging dir
if not os.path.exists(Config.LOG_DIR):
os.makedirs(Config.LOG_DIR)
# setup multithreading server
# server setup
Api.reset()
httpd = HTTPServer(('0.0.0.0', Config.APP_PORT), Server)
address = '0.0.0.0'
httpd = HTTPServer((address, Config.APP_PORT), Server)
httpd.socket = ssl.wrap_socket(httpd.socket,
server_side=True,
certfile= Config.DIR + 'python.pem', # together private + cert, http://stackoverflow.com/questions/19705785/python-3-https-webserver
# together private + cert, http://stackoverflow.com/questions/19705785/python-3-https-webserver
certfile=Config.DIR + 'cert-mdmaug.pem',
ssl_version=ssl.PROTOCOL_TLSv1)
vdisplay = Xvfb()
vdisplay.start()
display = Xvfb()
display.start()
try:
print('Listening at https://0.0.0.0:{}'.format(Config.APP_PORT))
print(f'Listening at https://{address}:{Config.APP_PORT}')
for _ in range(Config.profileCount):
threading.Thread(target=httpd.serve_forever).start()
except (KeyboardInterrupt, SystemExit):
vdisplay.stop()
display.stop()
'''
XX TO BE DELETED:
How to debug mysql:
conn = pymysql.connect(host='localhost', user='root', passwd='lopuch', db='mdmaug', charset='utf8')
cur = conn.cursor()
......@@ -55,4 +57,4 @@ quit()
#from urllib.parse import parse_qs
#from urllib.parse import urlparse
#quit()
'''
\ No newline at end of file
'''
/mnt/mdmaug/home/mdmaug/.mozilla/extensions/{ec8030f7-c20a-464f-9b0e-13a3a9e97384}/mdmaug@jetpack/resources/mdmaug/
\ No newline at end of file
import threading
import os
import logging
from glob import glob
import os
import threading
from peewee import MySQLDatabase
class Config:
profileCount = 21 # pocet profilu vytvorenych ve firefoxu. Tyto je treba vytvorit rucne. Nazev profilu je cislo - 0,1...
browser = 'firefox' # iceweasel, firefox. Ktery prohlizec se spousti.
configFile = '/opt/mdmaug/.cache/mdmaug-scans/_tmp/queue.cache' # RAM disk byl maly: '/tmp/mdm/queue.cache'
profileCount = 21 # number of Firefox profiles. Its name is just a number – 0,1...
browser = 'firefox' # iceweasel, firefox. What browser gets launched.
configFile = '/opt/mdmaug/.cache/mdmaug-scans/_tmp/queue.cache' # RAM disk was too small: '/tmp/mdm/queue.cache'
APP_PORT = 8000
APP_DOMAIN = 'https://217.31.202.41:' + str(APP_PORT) #csirt.csirt.office.nic.cz
LOG_DIR = "/opt/mdmaug/.cache/mdmaug-scans/_tmp/" # X /tmp/mdm/
CACHE_DIR = "/opt/mdmaug/.cache/mdmaug-scans/"
APP_DOMAIN = 'https://217.31.202.41:' + str(APP_PORT) # csirt.csirt.office.nic.cz
LOG_DIR = "/opt/mdmaug/.cache/mdmaug-scans/_tmp/"
CACHE_DIR = "/opt/mdmaug/.cache/mdmaug-scans/"
DIR = os.path.dirname(os.path.realpath(__file__)) + "/../"
myDB = ""
lock = threading.RLock() # doufam, ze kdyz je lock tady, ze je funknci. Closure...? XX nejak otestovat
myDB: None
lock = threading.RLock() # doufam, ze kdyz je lock tady, ze je funknci. Closure...? XX nejak otestovat
THUMBNAIL_SIZE = 640, 640
MAX_WHOIS_DOMAIN_THREADS = 10 # spusti maximalne 10 threadu doraz, jednou mi to totiz preteklo (kazda domena spusti jeste tolik threadu, kolik ma IP, ale tech byva jen par)
MAX_BROWSER_RUN_TIME = 25 # maximalni cas, ktery muze browser bezet
MAX_BROWSER_EXPIRATION = 15 # pocet vterin, ktere muzeme max cekat, nez se browser zavre (trva, nez zapise soubory)
MAX_WHOIS_DOMAIN_THREADS = 10 # spusti maximalne 10 threadu doraz, jednou mi to totiz preteklo (kazda domena spusti jeste tolik threadu, kolik ma IP, ale tech byva jen par)
MAX_BROWSER_RUN_TIME = 25 # maximum time for a browser to run
MAX_BROWSER_EXPIRATION = 15 # seconds that we wait before killing the browser (waiting for the files to be written)
def connect():
# XX resim problem peewee.OperationalError: (2006, "MySQL server has gone away (BrokenPipeError(32, 'Broken pipe'))") po 7 hodinach timeoutu
# XX kupodivu pripojeni nemuze byt v dbp DBModel.connect. Prestoze type je pak spravne (MySQLDatabase), nic udelat nejde a pokusy o select konci NoneType.
logging.debug("Connecting to DB.")
Config.myDB = MySQLDatabase("mdmaug", host='localhost', port=3306, user="mdmaug", passwd="fidFDSs676") # XX dal jsem pryc: , threadlocals=False
Config.myDB.register_fields({'primary_key': 'BIGINT AUTOINCREMENT'})
Config.myDB = MySQLDatabase("mdmaug", host='localhost', port=3306, user="mdmaug",
passwd="fidFDSs676") # XX dal jsem pryc: , threadlocals=False
Config.connect()
import json
import subprocess
import logging
from lib.config import Config
from lib.controller.scan_controller import ScanController
from lib.model.dbp import Status, Export, Turris, Whitelist
from lib.analysis.parser.traffic_log_parser import TrafficLogParser
import subprocess
from peewee import IntegrityError
from ...templates.crawl_view import CrawlView
from .scan_controller import ScanController
from ..config import Config
from ..model.dbp import Turris, Whitelist
from ..parser.traffic_log_parser import TrafficLogParser
class Api:
website = "" # http://site.cz
websiteDomain = "" # site.cz
website = "" # http://site.cz
websiteDomain = "" # site.cz
def __init__(self, path):
self.path = path
def run(self, request):
""" Accept command
:type path: dict from URL request. /api/analyze=cache/http://example.com → {"api": True, "analyze": cache, "page": "http://example.com"}
"""
def run(self, cmd):
""" Accept command """
if cmd == "analyze":
return ScanController().launch(self.path)
if cmd == "analyze=cached":
return ScanController().launch(self.path, cached = 1)
if cmd == "analyze=weekcache":
return ScanController().launch(self.path, cached = 7)
if cmd == "analyze=oldcache":
return ScanController().launch(self.path, cached = True)
elif cmd == "export=view": # XX deprecated?
return Export.exportView()
elif cmd == "export=confirm": # XX deprecated?
return Export.exportConfirm()
elif cmd == "decide": # XX deprecated?
return self.getUndecided()
elif cmd == "nicify":
url = self.path.split("/", 3)
return TrafficLogParser.getStylesheet() + TrafficLogParser.nicifyFile(url[3])
elif cmd == "vote": # /api/vote/block/example.org/10.0.0.1
if "analyze" in request:
crawl = ScanController().launch(request["page"], {"cached": 1, "weekcache":7, "oldcache": True, True: None}[request["analyze"]])
if request["api"] == "json":
return CrawlView.output_json(crawl)
else:
return CrawlView.output_html(crawl)
elif "decide" in request: # XX deprecated?
return self.get_undecided()
elif "nicify" in request:
return TrafficLogParser.getStylesheet() + TrafficLogParser.nicifyFile(request["page"])
elif "vote" in request: # /api/vote/block/example.org/10.0.0.1
logging.debug("vote cmd")
url = self.path.split("/", 4)
logging.debug(url[3])
return Turris.vote(url[3], url[4])
elif cmd == "whitelist": # XXX not implemented yet
url = self.path.split("/", 3)
return Turris.vote(request["vote"], request["page"])
elif "whitelist" in request: # XXX not implemented yet
"""url = path.split("/", 3)
if len(url) > 3:
self._setWebsite(url[2]) # osetrit self.website, ze je URL, a nikoli shell
logging.debug("XXX nejsem si jist, zda url je spravne na url[2]") # XXX
logging.debug(url) # XXX
quit() # XXX
self._setWebsite(url[2]) # osetrit self.website, ze je URL, a nikoli shell
logging.debug("XXX nejsem si jist, zda url je spravne na url[2]") # XXX
logging.debug(url) # XXX
quit() # XXX
logging.debug(self.website)
logging.debug(self.websiteDomain)
return self.whitelist()
elif cmd == "reset":
Server.reset()
return self.whitelist()"""
return "Implement first if needed."
elif "reset" in request:
self.reset()
return "reset"
@staticmethod
def reset():
logging.debug("resetting running browsers")
with open(Config.configFile, 'w') as f: # clear the queue
with open(Config.configFile, 'w') as f: # clear the queue
json.dump({}, f)
subprocess.call(["pkill", Config.browser]) # kill frozen browsers
subprocess.call(["pkill", Config.browser]) # kill frozen browsers
#prida 2ld domenu mezi whitelistovane
# prida 2ld domenu mezi whitelistovane
def whitelist(self):
logging.debug("whitelistuju")
#Db.cur = Db.connection.cursor()
#self._logging.debug(Db.cur.execute("""REPLACE INTO whitelist set domain = %s""", (self.websiteDomain, )))
#Db.connection.commit()
#Db.cur.close()
try:Whitelist.insert(domain=self.websiteDomain).execute()
except IntegrityError:pass # jiz je vlozeno
# Db.cur = Db.connection.cursor()
# self._logging.debug(Db.cur.execute("""REPLACE INTO whitelist set domain = %s""", (self.websiteDomain, )))
# Db.connection.commit()
# Db.cur.close()
try:
Whitelist.insert(domain=self.websiteDomain).execute()
except IntegrityError:
pass # jiz je vlozeno
def getUndecided(self):
@staticmethod
def get_undecided():
logging.debug("XXX jeste jsem neudelal - ma vylezt tabulka vsech nerozhodlych domen od posledniho exportu")
pass
\ No newline at end of file
pass
import json
import datetime
import time
import json
import logging
import os
import subprocess
import time
import traceback
import logging
from glob import glob
from random import randint
from lib.config import Config
from lib.domains import Domains
from lib.model.crawl import Crawl
from templates.crawl_view import CrawlView
from lib.parser.traffic_log_parser import TrafficLogParser
from lib.parser.nspr_log_parser import NsprLogParser
from lib.parser.metadata_parser import MetadataParser
from lib.parser.screenshot_parser import ScreenshotParser
from lib.parser.spy_parser import SpyParser
class ScanController:
from ..config import Config
from ..domains import Domains
from ..model.crawl import Crawl
from ..parser.metadata_parser import MetadataParser
from ..parser.nspr_log_parser import NsprLogParser
from ..parser.screenshot_parser import ScreenshotParser
from ..parser.spy_parser import SpyParser
from ..parser.traffic_log_parser import TrafficLogParser
from ...templates.crawl_view import CrawlView
class ScanController:
FF_INFO_FILE = "cache.dir"
CRAWL_FILE = "crawlSave.yaml"
profile = "-1" #bookovany profile firefoxu
profile = "-1" # bookovany profile firefoxu
queueFF = {}
#cacheDir = None
#logDir = None
# cacheDir = None
# logDir = None
##
# @param cached Pokud chceme zobrazit cachovanou verzi analyzy, dejme True. Pokud dame int, je to maximalni stari (ve dnech). Kdyz se nenalezne, zanazyzuje se znovu.
def launch(self, url, cached = None):
url = url.split("/", 3)
if len(url) <= 3:
return "Wrong url"
else:
url = url[3]
def launch(self, url, cached=None):
if cached:
# """ Pokud je k dispozici analyza, vratit ji """
dir = Config.CACHE_DIR + Domains.domain2dir(url) + "/"
if os.path.isdir(dir):
snapdirs = [str(dir + subdir) for subdir in os.listdir(dir) # adresare vsech moznych snapshotu
if os.path.isdir(str(dir + subdir)) and os.path.isfile(dir+subdir + "/"+ScanController.CRAWL_FILE)]
snapdirs = [str(dir + subdir) for subdir in os.listdir(dir) # adresare vsech moznych snapshotu
if os.path.isdir(str(dir + subdir)) and os.path.isfile(dir + subdir + "/" + ScanController.CRAWL_FILE)]
if snapdirs:
cacheDir = max(snapdirs, key = os.path.getmtime)+ "/" # nejnovejsi dir analyzy
if type(cached) != int or os.path.getmtime(cacheDir) > time.time()-3600*24*cached: # maximalni stari analyzy
cache_dir = max(snapdirs, key=os.path.getmtime) + "/" # nejnovejsi dir analyzy
if type(cached) != int or os.path.getmtime(
cache_dir) > time.time() - 3600 * 24 * cached: # maximalni stari analyzy
try:
logging.debug("returning")
return CrawlView.outputHtml(Crawl.loadFromFile(cacheDir + ScanController.CRAWL_FILE)) #"crawlSave.tmp"
logging.debug(f"Returning a previous crawl from: {cache_dir + ScanController.CRAWL_FILE}")
crawl = Crawl.load_from_file(cache_dir + ScanController.CRAWL_FILE)
return crawl
except ValueError:
pass
logging.debug("({-1}) Cachovana analyza nenalezena")
# provest novou analyzu
if self.queue(): # /api/analyze/web - zaradi web do fronty
print ("({}) start crawl".format(self.profile))
if self.queue(): # /api/analyze/web - zaradi web do fronty
print("({}) start crawl".format(self.profile))
self.url = Domains.assureUrl(url)
try:
crawl = self.analyze()
......@@ -66,38 +64,42 @@ class ScanController:
return ("PROFILE EXCEPTION ({}) {} See logs, i.e. mdmaug/nohup.out. ".format(self.profile, e))
logging.debug("SAVE")
crawl.saveToFile(crawl.cacheDir + ScanController.CRAWL_FILE) # ulozit vysledky hledani
return CrawlView.outputHtml(crawl)
crawl.save_to_file(crawl.cacheDir + ScanController.CRAWL_FILE) # ulozit vysledky hledani
return crawl
else:# analyza se nepodarilo si zabookovat FF profil
else: # analyza se nepodarilo si zabookovat FF profil
logging.debug("no free slots")
result = "failed - no free slots. <a href='" + Config.APP_DOMAIN + "/reset'>Reset</a>" # volny profil jsme nenasli
result = "failed - no free slots. <a href='" + Config.APP_DOMAIN + "/reset'>Reset</a>" # volny profil jsme nenasli
return "<div id='analysis-results'>{}</div>".format(result)
def analyze(self):
# spustit firefox pod profilem
print ("({}) browser launch".format(self.profile))
print("({}) browser launch".format(self.profile))
logDir, cacheDir = self.assureDirs() #pripravit log a cache adresar
logDir, cacheDir = self.assureDirs() # pripravit log a cache adresar
logfile = logDir + "log{}.log".format(self.profile)
#max_time = 3 # XXX
#,nsSocketTransport:5,nsStreamPump:5,nsHostResolver:5
logging.debug("({}) FF -P {} -no-remote {}".format(self.profile,self.profile,self.url))
command = "export NSPR_LOG_MODULES=timestamp,nsHttp:5 ; export NSPR_LOG_FILE={} ; {} -P {} -no-remote '{}'".format(logfile, Config.browser, self.profile , "http://localhost/redirect/" + self.url) # http://localhost/redirect/ gets stripped by the extension
# max_time = 3 # XXX
# ,nsSocketTransport:5,nsStreamPump:5,nsHostResolver:5
logging.debug("({}) FF -P {} -no-remote {}".format(self.profile, self.profile, self.url))
command = "export NSPR_LOG_MODULES=timestamp,nsHttp:5 ; export NSPR_LOG_FILE={} ; {} -P {} -no-remote '{}'".format(logfile,
Config.browser,
self.profile,
"http://localhost/redirect/" + self.url) # http://localhost/redirect/ gets stripped by the extension
# terminate Config.browser if hes not able to (everything has to be in single command because there is no heritance of $! amongst subprocesses)
command += " & echo $!;ii=0; while [ -n \"`ps -p $! | grep {}`\" ];do echo \"({}) running\" ;ii=$((ii+1)); if [ $ii -gt {} ]; then echo '({}) kill';kill $!; break;fi; sleep 1; done".format(Config.browser, self.profile , Config.MAX_BROWSER_RUN_TIME,self.profile) #(pokud bezi proces $! (posledni backgroudovany process), spi 1 s)
#> /dev/null
command += " & echo $!;ii=0; while [ -n \"`ps -p $! | grep {}`\" ];do echo \"({}) running\" ;ii=$((ii+1)); if [ $ii -gt {} ]; then echo '({}) kill';kill $!; break;fi; sleep 1; done".format(
Config.browser, self.profile, Config.MAX_BROWSER_RUN_TIME,
self.profile) # (pokud bezi proces $! (posledni backgroudovany process), spi 1 s)
# > /dev/null
logging.debug(command)
subprocess.call([command], shell=True)
logging.debug("({}) stopped!".format(self.profile))
# shromazdit informace z analyz
crawl = Crawl(host = self.url, logDir = logDir, cacheDir = cacheDir)
crawl = Crawl(host=self.url, log_dir=logDir, cache_dir=cacheDir)
expiration = 0
while os.path.isfile(logfile) == False: # i po zavreni FF nekdy trva, nez se soubor zapise
while not os.path.isfile(logfile): # i po zavreni FF nekdy trva, nez se soubor zapise
expiration += 1
logging.debug("({}) waiting to close...".format(self.profile))
if expiration > Config.MAX_BROWSER_EXPIRATION:
......@@ -105,18 +107,15 @@ class ScanController:
raise FileNotFoundError("time is run - browser expired")
time.sleep(1)
NsprLogParser(logfile, crawl)
self.unbookProfile() # uvolnit browser profil
TrafficLogParser(crawl) # obohatit crawl vysledky o analyzu z browseru
self.unbookProfile() # uvolnit browser profil
TrafficLogParser(crawl) # obohatit crawl vysledky o analyzu z browseru
SpyParser(crawl)
MetadataParser(crawl, Domains.url2domain(self.url)) # cekame na whois servery - az po uvolneni profilu
MetadataParser(crawl, Domains.url2domain(self.url)) # cekame na whois servery - az po uvolneni profilu
ScreenshotParser(crawl)
print ("({}) thread parsers ends".format(self.profile))
print("({}) thread parsers ends".format(self.profile))
return crawl
def _getCacheDirStamp():
# pro archiv logu pouzit timestamp: #return "current"
return datetime.datetime.fromtimestamp(time.time()).strftime('%y%m%d%H%M%S')
......@@ -129,27 +128,22 @@ class ScanController:
os.remove(file)
return dirName
def assureDirs(self):
""" Vytvori adresar logu a cache, pokud nejsou
Cache ex: /home/mdmaug/.cache/mdmaug-scans/example.com/150507151215/ - sem nageneruje xpi logy
"""
logDir = ScanController._assureDir(Config.LOG_DIR + str(self.profile) + "-log/")
cacheDir = ScanController._assureDir(Config.CACHE_DIR + Domains.domain2dir(self.url) + "/" + ScanController._getCacheDirStamp() + "/")
logDir = ScanController._assureDir(Config.LOG_DIR + str(self.profile) + "-log/")
cacheDir = ScanController._assureDir(
Config.CACHE_DIR + Domains.domain2dir(self.url) + "/" + ScanController._getCacheDirStamp() + "/")
# info pro FF
with open(logDir + ScanController.FF_INFO_FILE,"w") as f: # v logDiru mu dame odkaz do cacheDiru
f.write(cacheDir) #napoveda, kde FF najde cache dir (protoze FF najde log dir podle nazvu profilu)
with open(logDir + ScanController.FF_INFO_FILE, "w") as f: # v logDiru mu dame odkaz do cacheDiru
f.write(cacheDir) # napoveda, kde FF najde cache dir (protoze FF najde log dir podle nazvu profilu)
return logDir, cacheDir
def _loadProfileQueue(self):
#load queue from config file
# load queue from config file
try:
with open(Config.configFile, 'r') as f:
self.queueFF = json.load(f)
......@@ -159,16 +153,17 @@ class ScanController:
self.queueFF = {}
def bookProfile(self):
#zabookovat profil firefoxu
# zabookovat profil firefoxu
self.queueFF[self.profile] = "loading"
with open(Config.configFile, 'w') as f:
json.dump(self.queueFF, f)
def unbookProfile(self):
def dump():
with open(Config.configFile, 'w') as f:
json.dump(self.queueFF, f)
#logging.debug("UNKBOOK")
with open(Config.configFile, 'w') as f:
json.dump(self.queueFF, f)
# logging.debug("UNKBOOK")
try:
self.queueFF.pop(self.profile)
dump()
......@@ -177,33 +172,33 @@ class ScanController:
logging.debug(self.queueFF)
raise
except OSError:
logging.debug("({}) OS Error - interferuje s pustenym FF, ktere zere prilis pameti. Zkusime pockat.".format(self.profile))
time.sleep(10) # XX jestli funkcionalitu zachovat, dat sem pocitadlo, at je na konzoli videt akce
logging.debug(
"({}) OS Error - interferuje s pustenym FF, ktere zere prilis pameti. Zkusime pockat.".format(self.profile))
time.sleep(10) # XX jestli funkcionalitu zachovat, dat sem pocitadlo, at je na konzoli videt akce
try:
dump()
except OSError:
logging.debug("({}) System se nezotavil.".format(self.profile))
return "Memory may be exhausted. See mdmaug-server/scan_controller.py for details." # FF sezral vsechnu pamet asi. Stranka je problematicka. UrlQuery podle me taky selze.
#logging.debug("UNKBOOKED")
return "Memory may be exhausted. See mdmaug-server/scan_controller.py for details." # FF sezral vsechnu pamet asi. Stranka je problematicka. UrlQuery podle me taky selze.
# logging.debug("UNKBOOKED")
def queue(self):
""" Reads from queue.cache what profile is available and books it """
self._loadProfileQueue()
self.profile = -1
for _ in range(4): #na volny slot zkusime nekolikrat pockat
for i in range(Config.profileCount): #i = 10 if i ==10:
self.profile = -1
for _ in range(4): # na volny slot zkusime nekolikrat pockat
for i in range(Config.profileCount): # i = 10 if i ==10:
if self.queueFF.get(str(i)) == None:
self.profile = i
self.profile = i
self.bookProfile()
break
if self.profile == -1:
if self.profile == -1:
logging.debug("(-1) FULL, let's wait few secs")
time.sleep(randint(5, 10)) #pockame par vterin
time.sleep(randint(5, 10)) # pockame par vterin
else:
break # we found a free slot, let's proceed
break # we found a free slot, let's proceed
logging.debug(self.queueFF)
#povedlo se zabookovat profil FF?
return self.profile > -1
\ No newline at end of file
# povedlo se zabookovat profil FF?
return self.profile > -1
from http.server import SimpleHTTPRequestHandler
from jinja2 import Environment
from jinja2 import FileSystemLoader
from lib.config import Config
from lib.controller.api import Api
from lib.model.dbp import DbModel
from lib.model.dbp import Export
import logging
import mimetypes
import os
from http.server import SimpleHTTPRequestHandler
from jinja2 import Environment
from jinja2 import FileSystemLoader
from ..config import Config
from ..controller.api import Api
from ..model.dbp import DbModel
from ..model.dbp import Export
env = Environment()
env.loader = FileSystemLoader(Config.DIR + "templates/")
class Server(SimpleHTTPRequestHandler):
def favicon(self):
with open('favicon.ico', 'rb') as f:
self.output(f.read(), "image/x-icon")
def render_template(self, filename, ** kwargs):
def render_template(self, filename, **kwargs):
self.output(env.get_template(filename).render(kwargs))
def output(self, contents, contentType="text/html"):
def output(self, contents, content_type="text/html"):
self.send_response(200)
self.send_header("Content-type", contentType)
self.send_header("Content-type", content_type)
self.end_headers()
try:
self.wfile.write(contents)
......@@ -34,26 +37,61 @@ class Server(SimpleHTTPRequestHandler):
self.render_template("homepage.html")
def static_file(self, url):
is_binary_string = lambda bytes: bool(bytes.translate(None, bytearray([7, 8, 9, 10, 12, 13, 27]) + bytearray(range(0x20, 0x100))))
type = 'rb' if is_binary_string(open('/usr/bin/python', 'rb').read(1024)) else 'r'
with open(url, type) as f:
self.output(f.read(), contentType=mimetypes.guess_type(url))
# 'rb' if is a binary string, else 'r'
type_ = 'rb' if bool(open('/usr/bin/python', 'rb').read(1024).translate(None,
bytearray([7, 8, 9, 10, 12, 13, 27]) + bytearray(
range(0x20, 0x100)))) else 'r'
with open(url, type_) as f:
self.output(f.read(), content_type=mimetypes.guess_type(url))
def do_GET(self):
path = self.path.split("/")
"""
Routing table:
/ → homepage
/existing-file → return the static file from /static
/(destination=example.com/)api... → if set, the output will be HTML5-postMessaged to other tab at the destination (with https protocol)
/api(=json)/ → output might be either in JSON, or else in HTML
/api/analyze(=...)/URI
/api/vote/...
/api/reset
/export/(days) → CSV of last X days
"""
_, path = self.path.split("/", 1)
path, *_ = path.split("?", 1)
logging.debug("Request: {}".format(path[1]))
if path[1] == "":
if path == "":
return self.homepage()
elif os.path.isfile(Config.DIR + "static/" + path[1]): #faviconka, nebo jiny existujici soubor
return self.static_file(Config.DIR + "static/" + path[1])
elif os.path.isfile(Config.DIR + "static/" + path): # favicon or any other existing file
return self.static_file(Config.DIR + "static/" + path)
DbModel.assureConnection()
if path[1] == "api": # /api/analyze/web
cmd = path[2]
api = Api(self.path)
# send everything up, we are in an iframe
self.render_template("_message.html", contents=api.run(cmd), cmd=cmd, url=self.path, destination="https://mdm.nic.cz/")
elif path[1] == "export": # /export/{days} - csv za poslednich 7 dni
# parse the request url into a friendly dictionary
request = {"page": ""}
page = False
for l in self.path.split("/")[1:]:
if not page:
c, *d = l.split("=", 1)
if c in ["http:", "https:"]:
page = True
else:
request[c] = d[0] if len(d) else True
continue
request["page"] += l + "/"
if request["page"]: # strip last slash
request["page"] = request["page"][:-1]
logging.debug("Request: {}".format(request))
if "api" in request: # /api/analyze/web
output = Api().run(request)
if "destination" in request:
# send everything up, we are in an iframe
self.render_template("_message.html", contents=output, cmd=request, url=self.path,
destination=f"https://{request['destination']}/")
else:
self.output(output)