...
 
Commits (24)
FROM debian:stable FROM debian:unstable
ENV HOME=/root ENV HOME=/root
...@@ -7,9 +7,10 @@ RUN \ ...@@ -7,9 +7,10 @@ RUN \
apt-get -y upgrade && \ apt-get -y upgrade && \
apt-get -y install --no-install-recommends \ apt-get -y install --no-install-recommends \
busybox ca-certificates curl git \ busybox ca-certificates curl git \
make pkg-config gcc \ make cmake pkg-config gcc \
check cppcheck lua-check valgrind \ check cppcheck lua-check valgrind \
libcurl4-openssl-dev libevent-dev libssl-dev \ libcurl4-openssl-dev libevent-dev libssl-dev liburiparser-dev \
libb64-dev uthash-dev \
lua5.1 liblua5.1-0-dev \ lua5.1 liblua5.1-0-dev \
asciidoc lcov markdown libcommon-sense-perl \ asciidoc lcov markdown libcommon-sense-perl \
wget procps && \ wget procps && \
......
...@@ -11,7 +11,7 @@ __pycache__ ...@@ -11,7 +11,7 @@ __pycache__
# The desired results # The desired results
*.html *.html
*.so *.so
/src/opkg-trans/opkg-trans /src/pkgtransaction/pkgtransaction
/src/pkgupdate/pkgupdate /src/pkgupdate/pkgupdate
/src/migrator/pkgmigrate /src/migrator/pkgmigrate
# Some stuff for debugging # Some stuff for debugging
......
...@@ -4,3 +4,6 @@ ...@@ -4,3 +4,6 @@
[submodule "tests/lunit-launch/lunit"] [submodule "tests/lunit-launch/lunit"]
path = tests/lunit-launch/lunit path = tests/lunit-launch/lunit
url = http://repo.or.cz/lunit.git url = http://repo.or.cz/lunit.git
[submodule "tests/usign"]
path = tests/usign
url = git://git.openwrt.org/project/usign.git
UPDATER_VERSION := $(shell (git describe --match 'v*' --dirty || echo 'unknown') | sed -e 's/^v//') UPDATER_VERSION := $(shell (git describe --match 'v*' --dirty || echo 'unknown') | sed -e 's/^v//')
LUA_NAME := $(shell for lua in lua5.1 lua-5.1 lua51 lua ; do if pkg-config $$lua ; then echo $$lua ; break ; fi ; done) LUA_NAME := $(shell for lua in lua5.1 lua-5.1 lua51 lua ; do if pkg-config $$lua ; then echo $$lua ; break ; fi ; done)
VALGRIND:=IN_VALGRIND=1 valgrind --leak-check=full --show-leak-kinds=all --track-fds=yes --trace-children=no --child-silent-after-fork=yes --error-exitcode=1 --track-origins=yes VALGRIND:=IN_VALGRIND=1 valgrind --leak-check=full --show-leak-kinds=definite,indirect,possible --track-fds=yes --trace-children=no --child-silent-after-fork=yes --error-exitcode=1 --track-origins=yes
# For picosat, it otherwise needs some headers not available on musl for a feature we don't need. And we need trace enabled. # For picosat, it otherwise needs some headers not available on musl for a feature we don't need. And we need trace enabled.
EXTRA_DEFINES := NGETRUSAGE TRACE UPDATER_VERSION='"$(UPDATER_VERSION)"' EXTRA_DEFINES := NGETRUSAGE TRACE UPDATER_VERSION='"$(UPDATER_VERSION)"'
ifdef BUSYBOX_EXEC ifdef BUSYBOX_EXEC
......
...@@ -7,10 +7,12 @@ configuration scripts. ...@@ -7,10 +7,12 @@ configuration scripts.
Dependencies Dependencies
------------ ------------
Binary dependencies: Binary dependencies:
* C compiler (gcc preferred) * C compiler (gcc preferred) with C11 support
* Lua 5.1 * Lua 5.1
* libcurl * libcurl
* libevent2 * libevent2
* libb64
* uthash
Runtime dependencies: Runtime dependencies:
* usign (for signatures validation) * usign (for signatures validation)
......
This diff is collapsed.
...@@ -25,11 +25,18 @@ libupdater_MODULES := \ ...@@ -25,11 +25,18 @@ libupdater_MODULES := \
embed_types \ embed_types \
events \ events \
subprocess \ subprocess \
download \
uri \
uri_lua \
journal \ journal \
locks \ locks \
picosat \ picosat \
util \ util \
logging syscnf \
multiwrite \
logging \
pkgsorter
ifdef COV ifdef COV
libupdater_MODULES += lcoverage.embed libupdater_MODULES += lcoverage.embed
endif endif
...@@ -39,9 +46,9 @@ endif ...@@ -39,9 +46,9 @@ endif
libupdater_MODULES_3RDPARTY := picosat-965/picosat libupdater_MODULES_3RDPARTY := picosat-965/picosat
libupdater_PKG_CONFIGS := $(LUA_NAME) libevent libcurl libcrypto libupdater_PKG_CONFIGS := $(LUA_NAME) libevent libcurl libcrypto liburiparser
# Workaround, lua.pc doesn't containd -ldl, even when it uses dlopen # Workaround, lua.pc doesn't containd -ldl, even when it uses dlopen
libupdater_SO_LIBS += dl libupdater_SO_LIBS += dl b64
LIB_DOCS := \ LIB_DOCS := \
journal \ journal \
......
...@@ -77,10 +77,6 @@ static const char *opt_help[COT_LAST] = { ...@@ -77,10 +77,6 @@ static const char *opt_help[COT_LAST] = {
"--exclude=<name> Exclude this from output.\n", "--exclude=<name> Exclude this from output.\n",
[COT_USIGN] = [COT_USIGN] =
"--usign=<path> Path to usign tool used to verify packages signature. In default /usr/bin/usign.\n", "--usign=<path> Path to usign tool used to verify packages signature. In default /usr/bin/usign.\n",
[COT_MODEL] =
"--model=<model> Set/override target system model (e.g. Turris Omnia)\n",
[COT_BOARD] =
"--board=<board> Set/override target system board (e.g. rtrom01)\n",
[COT_NO_REPLAN] = [COT_NO_REPLAN] =
"--no-replan Don't replan. Install everyting at once. Use this if updater you are running isn't from packages it installs.\n", "--no-replan Don't replan. Install everyting at once. Use this if updater you are running isn't from packages it installs.\n",
[COT_NO_IMMEDIATE_REBOOT] = [COT_NO_IMMEDIATE_REBOOT] =
...@@ -102,8 +98,6 @@ enum option_val { ...@@ -102,8 +98,6 @@ enum option_val {
OPT_TASK_LOG_VAL, OPT_TASK_LOG_VAL,
OPT_EXCLUDE, OPT_EXCLUDE,
OPT_USIGN, OPT_USIGN,
OPT_MODEL,
OPT_BOARD,
OPT_NO_REPLAN, OPT_NO_REPLAN,
OPT_NO_IMMEDIATE_REBOOT, OPT_NO_IMMEDIATE_REBOOT,
OPT_OUT_OF_ROOT, OPT_OUT_OF_ROOT,
...@@ -127,8 +121,6 @@ static const struct option opt_long[] = { ...@@ -127,8 +121,6 @@ static const struct option opt_long[] = {
{ .name = "task-log", .has_arg = required_argument, .val = OPT_TASK_LOG_VAL }, { .name = "task-log", .has_arg = required_argument, .val = OPT_TASK_LOG_VAL },
{ .name = "exclude", .has_arg = required_argument, .val = OPT_EXCLUDE }, { .name = "exclude", .has_arg = required_argument, .val = OPT_EXCLUDE },
{ .name = "usign", .has_arg = required_argument, .val = OPT_USIGN }, { .name = "usign", .has_arg = required_argument, .val = OPT_USIGN },
{ .name = "model", .has_arg = required_argument, .val = OPT_MODEL },
{ .name = "board", .has_arg = required_argument, .val = OPT_BOARD },
{ .name = "no-replan", .has_arg = no_argument, .val = OPT_NO_REPLAN }, { .name = "no-replan", .has_arg = no_argument, .val = OPT_NO_REPLAN },
{ .name = "no-immediate-reboot", .has_arg = no_argument, .val = OPT_NO_IMMEDIATE_REBOOT }, { .name = "no-immediate-reboot", .has_arg = no_argument, .val = OPT_NO_IMMEDIATE_REBOOT },
{ .name = "out-of-root", .has_arg = no_argument, .val = OPT_OUT_OF_ROOT }, { .name = "out-of-root", .has_arg = no_argument, .val = OPT_OUT_OF_ROOT },
...@@ -154,8 +146,6 @@ static const struct simple_opt { ...@@ -154,8 +146,6 @@ static const struct simple_opt {
[OPT_TASK_LOG_VAL] = { COT_TASK_LOG, true, true }, [OPT_TASK_LOG_VAL] = { COT_TASK_LOG, true, true },
[OPT_EXCLUDE] = { COT_EXCLUDE, true, true }, [OPT_EXCLUDE] = { COT_EXCLUDE, true, true },
[OPT_USIGN] = { COT_USIGN, true, true }, [OPT_USIGN] = { COT_USIGN, true, true },
[OPT_MODEL] = { COT_MODEL, true, true },
[OPT_BOARD] = { COT_BOARD, true, true },
[OPT_NO_REPLAN] = { COT_NO_REPLAN, false, true }, [OPT_NO_REPLAN] = { COT_NO_REPLAN, false, true },
[OPT_NO_IMMEDIATE_REBOOT] = { COT_NO_IMMEDIATE_REBOOT, false, true }, [OPT_NO_IMMEDIATE_REBOOT] = { COT_NO_IMMEDIATE_REBOOT, false, true },
[OPT_OUT_OF_ROOT] = { COT_OUT_OF_ROOT, false, false }, [OPT_OUT_OF_ROOT] = { COT_OUT_OF_ROOT, false, false },
...@@ -276,8 +266,6 @@ struct cmd_op *cmd_args_parse(int argc, char *argv[], const enum cmd_op_type acc ...@@ -276,8 +266,6 @@ struct cmd_op *cmd_args_parse(int argc, char *argv[], const enum cmd_op_type acc
case COT_APPROVE: case COT_APPROVE:
case COT_EXCLUDE: case COT_EXCLUDE:
case COT_USIGN: case COT_USIGN:
case COT_MODEL:
case COT_BOARD:
case COT_NO_REPLAN: case COT_NO_REPLAN:
case COT_TASK_LOG: { case COT_TASK_LOG: {
struct cmd_op tmp = result[i]; struct cmd_op tmp = result[i];
......
...@@ -68,10 +68,6 @@ enum cmd_op_type { ...@@ -68,10 +68,6 @@ enum cmd_op_type {
COT_EXCLUDE, COT_EXCLUDE,
// Path to usign tool // Path to usign tool
COT_USIGN, COT_USIGN,
// Target model specification
COT_MODEL,
// Target board specification
COT_BOARD,
// Don't replan (do whole install at once) // Don't replan (do whole install at once)
COT_NO_REPLAN, COT_NO_REPLAN,
// Don't immediatelly reboot system // Don't immediatelly reboot system
......
...@@ -35,10 +35,11 @@ local mkdir = mkdir ...@@ -35,10 +35,11 @@ local mkdir = mkdir
local stat = stat local stat = stat
local events_wait = events_wait local events_wait = events_wait
local run_util = run_util local run_util = run_util
local uri = require "uri"
module "utils" module "utils"
-- luacheck: globals lines2set map set2arr arr2set cleanup_dirs dir_ensure mkdirp read_file write_file clone shallow_copy table_merge arr_append exception multi_index private filter_best strip table_overlay randstr arr_prune arr_inv file_exists -- luacheck: globals lines2set map set2arr arr2set cleanup_dirs dir_ensure mkdirp read_file write_file clone shallow_copy table_merge arr_append exception multi_index private filter_best strip table_overlay table_wrap randstr arr_prune arr_inv file_exists uri_syste_cas uri_no_crl uri_config uri_content
--[[ --[[
Convert provided text into set of lines. Doesn't care about the order. Convert provided text into set of lines. Doesn't care about the order.
...@@ -357,6 +358,18 @@ function table_overlay(table) ...@@ -357,6 +358,18 @@ function table_overlay(table)
}) })
end end
--[[
This function returns always table. If input is not table then it is placed to
table. If input is table then it is returned as is.
]]
function table_wrap(table)
if type(table) == "table" then
return table
else
return {table}
end
end
--[[ --[[
Check whether file exists Check whether file exists
]] ]]
...@@ -370,4 +383,51 @@ function file_exists(name) ...@@ -370,4 +383,51 @@ function file_exists(name)
end end
end end
--[[
This function applies given table of configuration to given uri object.
This is here because we need bridge between old approach of using lua tables and
approach of inherited settings in uri object.
For full support of all fields see language documentation, section Verification.
Any field that is not set in table is ignored (configuration is not changed).
]]
function uri_config(uriobj, config)
-- TODO and how about veri?
if config.ca ~= nil then
uriobj:set_ssl_verify(config.ca)
uriobj:add_ca(nil)
for ca in pairs(table_wrap(config.ca)) do
uriobj:add_ca(ca)
end
end
if config.crl ~= nil then
uriobj:add_crl(nil)
for crl in pairs(table_wrap(config.crl)) do
uriobj:add_crl(crl)
end
end
if config.ocsp ~= nil then
uriobj:set_ocsp(config.ocsp)
end
if config.pubkey ~= nil then
uriobj:add_pubkey(nil)
for pubkey in pairs(table_wrap(config.pubkey)) do
uriobj:add_pubkey(pubkey)
end
end
if config.sig ~= nil then
uriobj:set_sig(config.sig)
end
end
-- Get content of given URI
-- It returns downloaded content as first argument and uri object as second (which
-- can be used as a parent to other uris)
function uri_content(struri, parent, config)
local master = uri.new()
local u = master:to_buffer(struri, parent)
uri_config(u, config)
-- TODO finish error and others?
return u:finish(), u
end
return _M return _M
--[[
Copyright 2018, CZ.NIC z.s.p.o. (http://www.nic.cz/)
This file is part of the turris updater.
Updater is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Updater is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Updater. If not, see <http://www.gnu.org/licenses/>.
]]--
local os = os
local utils = require "utils"
local getcwd = getcwd
local DIE = DIE
module "syscnf"
-- Variables accessed from outside of this module
-- luacheck: globals root_dir status_file info_dir pkg_download_dir pkg_unpacked_dir dir_opkg_collided target_model target_board
-- Functions that we want to access from outside of this module
-- luacheck: globals set_root_dir set_target
local status_file_suffix = "usr/lib/opkg/status"
local info_dir_suffix = "usr/lib/opkg/info/"
local pkg_unpacked_dir_suffix = "usr/share/updater/unpacked/"
local pkg_download_dir_suffix = "usr/share/updater/download/"
local dir_opkg_collided_suffix = "usr/share/updater/collided/"
--[[
Canonizes path to absolute path. It does no change in case path is already an
absolute but it if not then it prepends current working directory. There is also
special handling in case path starts with tilde (~) in that case that character is
replaced with content of HOME environment variable.
]]
local function path2abspath(path)
if path:match("^/") then
return path
elseif path:match("^~/") then
return os.getenv('HOME') .. "/" .. path
else
return getcwd() .. "/" .. path
end
end
--[[
Set all the configurable directories to be inside the provided dir
Effectively sets that the whole system is mounted under some
prefix.
]]
function set_root_dir(dir)
if dir then
dir = (path2abspath(dir) .. "/"):gsub("/+", "/")
else
dir = "/"
end
-- A root directory
root_dir = dir
-- The file with status of installed packages
status_file = dir .. status_file_suffix
-- The directory where unpacked control files of the packages live
info_dir = dir .. info_dir_suffix
-- A directory to which we download packages
pkg_download_dir = dir .. pkg_download_dir_suffix
-- A directory where unpacked packages live
pkg_unpacked_dir = dir .. pkg_unpacked_dir_suffix
-- Directory where we move files and directories that weren't part of any package.
dir_opkg_collided = dir .. dir_opkg_collided_suffix
end
--[[
Set variables taget_model and target_board.
You can explicitly specify model or board or both. If not specified then detection
is performed. That is files from /tmp/sysinfo directory are used.
If no model or board is specified (passed as nil) and detection failed than this
function causes error and execution termination.
]]
function set_target(model, board)
-- Name of the target model (ex: Turris Omnia)
target_model = model or utils.strip(utils.read_file('/tmp/sysinfo/model'))
-- Name of the target board (ex: rtrom01)
target_board = board or utils.strip(utils.read_file('/tmp/sysinfo/board_name'))
if not target_model or not target_board then
DIE("Auto detection of target model or board failed.You can specify them " ..
"explicitly using --model and --board arguments.")
end
end
...@@ -763,13 +763,13 @@ function steal_configs(current_status, installed_confs, configs) ...@@ -763,13 +763,13 @@ function steal_configs(current_status, installed_confs, configs)
end end
--[[ --[[
Move anything on given path to dir_opkg_collided. This backups and removes original files. Move anything on given path to opkg_collided_dir. This backups and removes original files.
When keep is set to true, file is copied instead of moved. When keep is set to true, file is copied instead of moved.
]] ]]
function user_path_move(path, keep) function user_path_move(path, keep)
-- At first create same parent directory relative to dir_opkg_collided -- At first create same parent directory relative to opkg_collided_dir
local fpath = "" local fpath = ""
for dir in (syscnf.dir_opkg_collided .. path):gsub("[^/]*/?$", ""):gmatch("[^/]+") do for dir in (syscnf.opkg_collided_dir .. path):gsub("[^/]*/?$", ""):gmatch("[^/]+") do
local randex = "" local randex = ""
while not utils.dir_ensure(fpath .. "/" .. dir .. randex) do while not utils.dir_ensure(fpath .. "/" .. dir .. randex) do
-- If there is file with same name, then append some random extension -- If there is file with same name, then append some random extension
......
...@@ -33,14 +33,11 @@ local tostring = tostring ...@@ -33,14 +33,11 @@ local tostring = tostring
local error = error local error = error
local WARN = WARN local WARN = WARN
local ERROR = ERROR local ERROR = ERROR
local run_command = run_command
local events_wait = events_wait
local get_updater_version = get_updater_version local get_updater_version = get_updater_version
local utils = require "utils" local utils = require "utils"
local backend = require "backend" local backend = require "backend"
local requests = require "requests" local requests = require "requests"
local syscnf = require "syscnf" local syscnf = require "syscnf"
local uri = require "uri"
local uci_ok, uci = pcall(require, "uci") local uci_ok, uci = pcall(require, "uci")
module "sandbox" module "sandbox"
...@@ -54,7 +51,9 @@ local updater_features = utils.arr2set({ ...@@ -54,7 +51,9 @@ local updater_features = utils.arr2set({
'conflicts', 'conflicts',
'abi_change', 'abi_change',
'abi_change_deep', 'abi_change_deep',
'replan_string' 'replan_string',
'relative_uri',
'no_returns'
}) })
-- Available functions and "constants" from global environment -- Available functions and "constants" from global environment
...@@ -93,8 +92,8 @@ local local_available_funcs = { ...@@ -93,8 +92,8 @@ local local_available_funcs = {
local rest_additional_funcs = { local rest_additional_funcs = {
{"version_match", backend.version_match}, {"version_match", backend.version_match},
{"version_cmp", backend.version_cmp}, {"version_cmp", backend.version_cmp},
{"system_cas", uri.system_cas}, {"system_cas", true},
{"no_crl", uri.no_crl} {"no_crl", false}
} }
state_vars = nil state_vars = nil
...@@ -120,12 +119,11 @@ function load_state_vars() ...@@ -120,12 +119,11 @@ function load_state_vars()
]] ]]
state_vars = { state_vars = {
root_dir = syscnf.root_dir, root_dir = syscnf.root_dir,
model = syscnf.target_model,
board_name = syscnf.target_board,
turris_version = utils.strip(utils.read_file('/etc/turris-version')),
self_version = get_updater_version(), self_version = get_updater_version(),
language_version = 1, language_version = 1,
features = updater_features, features = updater_features,
os_release = syscnf.os_release(),
host_os_release = syscnf.host_os_release(),
--[[ --[[
In case we fail to read that file (it is not there), we match against In case we fail to read that file (it is not there), we match against
an empty string, which produces nil ‒ the element won't be in there. an empty string, which produces nil ‒ the element won't be in there.
...@@ -147,11 +145,6 @@ function load_state_vars() ...@@ -147,11 +145,6 @@ function load_state_vars()
end end
end) end)
} }
events_wait(run_command(function (ecode, _, stdout, _)
if ecode == 0 then
state_vars.serial = utils.strip(stdout)
end
end, nil, nil, -1, -1, '/usr/bin/atsha204cmd', 'serial-number'))
end end
...@@ -404,8 +397,7 @@ function run_sandboxed(chunk, name, sec_level, parent, context_merge, context_mo ...@@ -404,8 +397,7 @@ function run_sandboxed(chunk, name, sec_level, parent, context_merge, context_mo
end end
local context = new(sec_level, parent) local context = new(sec_level, parent)
utils.table_merge(context, context_merge or {}) utils.table_merge(context, context_merge or {})
context_mod = context_mod or function () end if context_mod then context_mod(context) end
context_mod(context)
local func = setfenv(chunk, context.env) local func = setfenv(chunk, context.env)
local ok, err = pcall(func) local ok, err = pcall(func)
if ok then if ok then
......
This diff is collapsed.
...@@ -25,7 +25,6 @@ local pcall = pcall ...@@ -25,7 +25,6 @@ local pcall = pcall
local next = next local next = next
local type = type local type = type
local assert = assert local assert = assert
local unpack = unpack
local table = table local table = table
local string = string local string = string
local events_wait = events_wait local events_wait = events_wait
...@@ -36,112 +35,84 @@ local ERROR = ERROR ...@@ -36,112 +35,84 @@ local ERROR = ERROR
local utils = require "utils" local utils = require "utils"
local backend = require "backend" local backend = require "backend"
local requests = require "requests" local requests = require "requests"
local uri = require "uri"
module "postprocess" module "postprocess"
-- luacheck: globals get_repos deps_canon conflicts_canon available_packages pkg_aggregate run sort_candidates -- luacheck: globals get_repos deps_canon conflicts_canon available_packages pkg_aggregate run sort_candidates
function get_repos() local function repo_parse(repo)
DBG("Getting repos") repo.tp = 'parsed-repository'
--[[ repo.content = {}
The repository index downloads are already in progress since local name = repo.name .. "/" .. repo.index_uri:uri()
the repository objects have been created. We now register -- Get index
callback for the arrival of data. This might happen right local index = repo.index_uri:finish() -- TODO error?
away or later on. Anyway, after we wait, all the indices if index:sub(1, 2) == string.char(0x1F, 0x8B) then -- copressed index
have been downloaded. DBG("Decompressing index " .. name)
local extr = run_util(function (ecode, _, stdout, stderr)
When we get each index, we detect if the data is gzipped if ecode ~= 0 then
or not. If it is not, the repository is parsed right away. error(utils.exception('repo broken', "Couldn't decompress " .. name .. ": " .. stderr))
If it is, extraction is run in the background and parsing
is scheduled for once it finishes. Eventually, we wait for
all the extractions to finish, and at that point everything
is parsed.
]]
local uris = {} -- The uris we wait for to be downloaded
local extract_events = {} -- The extractions we wait for
local errors = {} -- Collect errors as we go
local fatal = false -- Are any of them a reason to abort?
--[[
We don't care about the order in which we register the callbacks
(which may be different from the order in which they are called
anyway).
]]
for _, repo in pairs(requests.known_repositories_all) do
repo.tp = 'parsed-repository'
repo.content = {}
for subrepo, index_uri in pairs(utils.private(repo).index_uri) do
local name = repo.name .. "/" .. index_uri.uri
table.insert(uris, index_uri)
local function broken(why, extra)
ERROR("Index " .. name .. " is broken (" .. why .. "): " .. tostring(extra))
extra.why = why
extra.repo = name
repo.content[subrepo] = extra
table.insert(errors, extra)
fatal = fatal or not utils.arr2set(repo.ignore or {})[why]
end
local function parse(content)
DBG("Parsing index " .. name)
local ok, list = pcall(backend.repo_parse, content)
if ok then
for _, pkg in pairs(list) do
-- Compute the URI of each package (but don't download it yet, so don't create the uri object)
pkg.uri_raw = repo.repo_uri .. subrepo .. '/' .. pkg.Filename
pkg.repo = repo
end
repo.content[subrepo] = {
tp = "pkg-list",
list = list
}
else
broken('syntax', utils.exception('repo broken', "Couldn't parse the index of " .. name .. ": " .. tostring(list)))
end
end
local function decompressed(ecode, _, stdout, stderr)
DBG("Decompression of " .. name .. " done")
if ecode == 0 then
parse(stdout)
else
broken('syntax', utils.exception('repo broken', "Couldn't decompress " .. name .. ": " .. stderr))
end end
index = stdout
end end
local function downloaded(ok, answer) , nil, index, -1, -1, 'gzip', '-dc')
DBG("Received repository index " .. name) events_wait(extr)
if not ok then end
-- Couldn't download -- Parse index
-- TODO: Once we have validation, this could also mean the integrity is broken, not download DBG("Parsing index " .. name)
broken('missing', answer) local ok, list = pcall(backend.repo_parse, index)
elseif answer:sub(1, 2) == string.char(0x1F, 0x8B) then if not ok then
-- It starts with gzip magic - we want to decompress it local msg = "Couldn't parse the index of " .. name .. ": " .. tostring(list)
DBG("Index " .. name .. " is compressed, decompressing") if not repo.optional then
table.insert(extract_events, run_util(decompressed, nil, answer, -1, -1, 'gzip', '-dc')) error(utils.exception('syntax', msg))
else end
parse(answer) WARN(msg)
end -- TODO we might want to ignore this repository in its fulles instead of this
end
for _, pkg in pairs(list) do
-- Compute the URI of each package (but don't download it yet, so don't create the uri object)
pkg.uri_raw = repo.repo_uri .. '/' .. pkg.Filename
pkg.repo = repo
end
repo.content = list
end
local function repos_failed_download(uri_fail)
-- Locate failed repository and check if we can continue
for _, repo in pairs(requests.known_repositories) do
if uri_fail == repo.index_uri then
local message = "Download failed for repository index " ..
repo.name .. " (" .. repo.index_uri:uri() .. "): " ..
tostring(repo.index_uri:download_error())
if not repo.optional then
error(utils.exception('repo missing', message))
end end
index_uri:cback(downloaded) WARN(message)
repo.tp = 'failed-repository'
break
end end
--[[
We no longer need to keep the uris in there, we
wait for them here and after all is done, we want
the contents to be garbage collected.
]]
utils.private(repo).index_uri = nil
end end
-- Make sure everything is downloaded end
uri.wait(unpack(uris))
-- And extracted function get_repos()
events_wait(unpack(extract_events)) DBG("Downloading repositories indexes")
-- Process any errors -- Run download
local multi = utils.exception('multiple', "Multiple exceptions (" .. #errors .. ")") while true do
multi.errors = errors local uri_fail = requests.repositories_uri_master:download()
if fatal then if uri_fail then
error(multi) repos_failed_download(uri_fail)
elseif next(errors) then else
return multi break
else end
return nil end
-- Collect indexes and parse them
for _, repo in pairs(requests.known_repositories) do
if repo.tp == 'repository' then -- ignore failed repositories
local ok, err = pcall(repo_parse, repo)
if not ok then
-- TODO is this fatal?
error(err)
end
end
end end
end end
...@@ -332,21 +303,20 @@ to form single package object. ...@@ -332,21 +303,20 @@ to form single package object.
]] ]]
function pkg_aggregate() function pkg_aggregate()
DBG("Aggregating packages together") DBG("Aggregating packages together")
for _, repo in pairs(requests.known_repositories_all) do for _, repo in pairs(requests.known_repositories) do
for _, cont in pairs(repo.content) do if repo.tp == "parsed-repository" then
if type(cont) == 'table' and cont.tp == 'pkg-list' then -- TODO this content design is invalid as there can be multiple packages of same name in same repository with different versions
for name, candidate in pairs(cont.list) do for name, candidate in pairs(repo.content) do
if not available_packages[name] then if not available_packages[name] then
available_packages[name] = {candidates = {}, modifiers = {}} available_packages[name] = {candidates = {}, modifiers = {}}
end end
table.insert(available_packages[name].candidates, candidate) table.insert(available_packages[name].candidates, candidate)
if candidate.Provides then -- Add this candidate to package it provides if candidate.Provides then -- Add this candidate to package it provides
for p in candidate.Provides:gmatch("[^, ]+") do for p in candidate.Provides:gmatch("[^, ]+") do
if not available_packages[p] then if not available_packages[p] then
available_packages[p] = {candidates = {}, modifiers = {}} available_packages[p] = {candidates = {}, modifiers = {}}
end
table.insert(available_packages[p].candidates, candidate)
end end
table.insert(available_packages[p].candidates, candidate)
end end
end end
end end
...@@ -467,10 +437,7 @@ function pkg_aggregate() ...@@ -467,10 +437,7 @@ function pkg_aggregate()
end end
function run() function run()
local repo_errors = get_repos() get_repos()
if repo_errors then
WARN("Not all repositories are available")
end
pkg_aggregate() pkg_aggregate()
end end
......
...@@ -261,7 +261,7 @@ local function sat_build(sat, pkgs, requests) ...@@ -261,7 +261,7 @@ local function sat_build(sat, pkgs, requests)
} }
-- Go trough requests and add them to SAT -- Go trough requests and add them to SAT
for _, req in ipairs(requests) do for _, req in ipairs(requests) do
if not pkgs[req.package.name] and not utils.arr2set(req.ignore or {})["missing"] then if not pkgs[req.package.name] and not req.optional then
error(utils.exception('inconsistent', "Requested package " .. req.package.name .. " doesn't exists.")) error(utils.exception('inconsistent', "Requested package " .. req.package.name .. " doesn't exists."))
end end
local req_var = sat:var() local req_var = sat:var()
...@@ -388,7 +388,7 @@ local function build_plan(pkgs, requests, sat, satmap) ...@@ -388,7 +388,7 @@ local function build_plan(pkgs, requests, sat, satmap)
inwstack[name] = #wstack + 1 -- Signal that we are working on this package group. inwstack[name] = #wstack + 1 -- Signal that we are working on this package group.
table.insert(wstack, name) table.insert(wstack, name)
for _, p in pkg_dep_iterate(utils.multi_index(pkg, 'modifier', 'deps') or {}) do -- plan package group dependencies for _, p in pkg_dep_iterate(utils.multi_index(pkg, 'modifier', 'deps') or {}) do -- plan package group dependencies
pkg_plan(p, ignore_missing or utils.arr2set(utils.multi_index(pkg, 'modifier', 'ignore') or {})["deps"], false, "Package " .. name .. " requires package") pkg_plan(p, ignore_missing or utils.multi_index(pkg, 'modifier', 'optional'), false, "Package " .. name .. " requires package")
end end
if not next(candidates) then return end -- We have no candidate, but we passed previous check because it's virtual if not next(candidates) then return end -- We have no candidate, but we passed previous check because it's virtual
local r = {} local r = {}
...@@ -401,7 +401,7 @@ local function build_plan(pkgs, requests, sat, satmap) ...@@ -401,7 +401,7 @@ local function build_plan(pkgs, requests, sat, satmap)
else else
no_pkg_candidate = false no_pkg_candidate = false
for _, p in pkg_dep_iterate(utils.multi_index(candidate, 'deps') or {}) do for _, p in pkg_dep_iterate(utils.multi_index(candidate, 'deps') or {}) do
pkg_plan(p, ignore_missing or utils.arr2set(utils.multi_index(pkg, 'modifier', 'ignore') or {})["deps"], false, "Package " .. name .. " requires package") pkg_plan(p, ignore_missing or utils.multi_index(pkg, 'modifier', 'optional'), false, "Package " .. name .. " requires package")
end end
end end
end end
...@@ -434,7 +434,7 @@ local function build_plan(pkgs, requests, sat, satmap) ...@@ -434,7 +434,7 @@ local function build_plan(pkgs, requests, sat, satmap)
for _, req in pairs(requests) do for _, req in pairs(requests) do
if sat[satmap.req2sat[req]] then -- Plan only if we can satisfy given request if sat[satmap.req2sat[req]] then -- Plan only if we can satisfy given request
if req.tp == "install" then -- And if it is install request, uninstall requests are resolved by not being planned. if req.tp == "install" then -- And if it is install request, uninstall requests are resolved by not being planned.
local pln = pkg_plan(req.package, false, utils.arr2set(req.ignore or {})["missing"], 'Requested package') local pln = pkg_plan(req.package, false, req.optional, 'Requested package')
-- Note that if pln is nil than we ignored missing package. We have to compute with that here -- Note that if pln is nil than we ignored missing package. We have to compute with that here
if pln then if pln then
if req.reinstall then if req.reinstall then
......
...@@ -20,18 +20,17 @@ along with Updater. If not, see <http://www.gnu.org/licenses/>. ...@@ -20,18 +20,17 @@ along with Updater. If not, see <http://www.gnu.org/licenses/>.
local next = next local next = next
local error = error local error = error
local ipairs = ipairs local ipairs = ipairs
local pcall = pcall
local table = table local table = table
local WARN = WARN local WARN = WARN
local INFO = INFO local INFO = INFO
local DIE = DIE local DIE = DIE
local md5 = md5
local sha256 = sha256 local sha256 = sha256
local reexec = reexec local reexec = reexec
local LS_CONF = LS_CONF local LS_CONF = LS_CONF
local LS_PLAN = LS_PLAN local LS_PLAN = LS_PLAN
local LS_DOWN = LS_DOWN local LS_DOWN = LS_DOWN
local update_state = update_state local update_state = update_state
local log_event = log_event
local utils = require "utils" local utils = require "utils"
local syscnf = require "syscnf" local syscnf = require "syscnf"
local sandbox = require "sandbox" local sandbox = require "sandbox"
...@@ -56,16 +55,13 @@ end ...@@ -56,16 +55,13 @@ end
local function required_pkgs(entrypoint) local function required_pkgs(entrypoint)
-- Get the top-level script -- Get the top-level script
local tlc = sandbox.new('Full') local entry_chunk, entry_uri = utils.uri_content(entrypoint, nil, {})
local ep_uri = uri(tlc, entrypoint) local merge = {
local ok, tls = ep_uri:get() -- Note: See requests.script for usage of this value
if not ok then error(tls) end ["parent_script_uri"] = entry_uri
}
update_state(LS_CONF) update_state(LS_CONF)
--[[ local err = sandbox.run_sandboxed(entry_chunk, entrypoint, 'Full', nil, merge)
Run the top level script with full privileges.
The script shall be part of updater anyway.
]]
local err = sandbox.run_sandboxed(tls, "", 'Full')
if err and err.tp == 'error' then error(err) end if err and err.tp == 'error' then error(err) end
update_state(LS_PLAN) update_state(LS_PLAN)
-- Go through all the requirements and decide what we need -- Go through all the requirements and decide what we need
...@@ -103,28 +99,24 @@ function tasks_to_transaction() ...@@ -103,28 +99,24 @@ function tasks_to_transaction()
INFO("Downloading packages") INFO("Downloading packages")
update_state(LS_DOWN) update_state(LS_DOWN)
-- Start packages download -- Start packages download
local uri_master = uri:new()
for _, task in ipairs(tasks) do for _, task in ipairs(tasks) do
if task.action == "require" then if task.action == "require" then
-- Strip sig verification off, packages from repos don't have their own .sig task.file = syscnf.pkg_download_dir .. task.name .. '-' .. task.package.Version .. '.ipk'
-- files, but they are checked by hashes in the (already checked) index. task.real_uri = uri_master:to_file(task.package.Filename, task.file, task.package.repo.index_uri)
local veriopts = utils.shallow_copy(task.package.repo) task.real_uri:add_pubkey() -- do not verify signatures (there are none)
local veri = veriopts.verification or utils.private(task.package.repo).context.verification or 'both' -- TODO on failure: log_event('D', task.name .. " " .. task.package.Version)
if veri == 'both' then
veriopts.verification = 'cert'
elseif veri == 'sig' then
veriopts.verification = 'none'
end
task.real_uri = uri(utils.private(task.package.repo).context, task.package.uri_raw, veriopts)
task.real_uri:cback(function()
log_event('D', task.name .. " " .. task.package.Version)
end)
end end
end end
uri_master:download() -- TODO what if error?
-- Now push all data into the transaction -- Now push all data into the transaction
utils.mkdirp(syscnf.pkg_download_dir)
for _, task in ipairs(tasks) do for _, task in ipairs(tasks) do
if task.action == "require" then if task.action == "require" then
local ok, data = task.real_uri:get() local ok, err = pcall(function() task.real_uri:finish() end)
if not ok then error(data) end if not ok then error(err) end
-- TODO check hash
--[[
if task.package.MD5Sum then if task.package.MD5Sum then
local sum = md5(data) local sum = md5(data)
if sum ~= task.package.MD5Sum then if sum ~= task.package.MD5Sum then
...@@ -137,9 +129,8 @@ function tasks_to_transaction() ...@@ -137,9 +129,8 @@ function tasks_to_transaction()
error(utils.exception("corruption", "The sha256 sum of " .. task.name .. " does not match")) error(utils.exception("corruption", "The sha256 sum of " .. task.name .. " does not match"))
end end
end end
local fpath = syscnf.pkg_download_dir .. task.name .. '-' .. task.package.Version .. '.ipk' ]]
utils.write_file(fpath, data) transaction.queue_install_downloaded(task.file, task.name, task.package.Version, task.modifier)
transaction.queue_install_downloaded(fpath, task.name, task.package.Version, task.modifier)
elseif task.action == "remove" then elseif task.action == "remove" then
transaction.queue_remove(task.name) transaction.queue_remove(task.name)
else else
......
This diff is collapsed.
/*
* Copyright 2018, CZ.NIC z.s.p.o. (http://www.nic.cz/)
*
* This file is part of the turris updater.
*
* Updater is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Updater is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Updater. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef UPDATER_DOWNLOAD_H
#define UPDATER_DOWNLOAD_H
#include <stdbool.h>
#include <stdio.h>
#include <stdint.h>
#include <event2/event.h>
#include <curl/curl.h>
#include "logging.h"
struct download_i;
// Download manager object
struct downloader {
struct event_base *ebase; // libevent base
CURLM *cmulti; // Curl multi instance
struct event *ctimer; // Timer used by curl
struct download_i **instances; // Registered instances
size_t i_size, i_allocated; // instances size and allocated size
int pending; // Number of still not downloaded instances
struct download_i *failed; // Latest failed instance (used internally)
};
// Download options (additional options configuring security and more)
struct download_opts {
long timeout; // Download timeout (including download retries)
long connect_timeout; // Timeout for single connection
int retries; // Number of full download retries
bool follow_redirect; // If HTTP request 3xx should be followed
bool ssl_verify; // If SSL should be verified
bool ocsp; // If OCSP should be used for certificate verification
const char *cacert_file; // Path to custom CA certificate bundle
const char *capath; // Path to directory containing CA certificates
const char *crl_file; // Path to custom CA crl
};
enum download_output_type {
DOWN_OUT_T_FILE,
DOWN_OUT_T_BUFFER
};
// Download instance. Identifier of single download.
struct download_i {
bool done; // What ever is download finished
bool success; // If download was successful. Not valid if done is false.
char error[CURL_ERROR_SIZE]; // error message if download fails
int retries; // Number of reties we have
struct downloader *downloader; // parent downloader
enum download_output_type out_t; // What output this instance utilizes
union {
struct {
int fd; // File descriptor
char *fpath; // Path to output file
} *file; // Used when writing to file
struct {
uint8_t *data; // Buffer for output data
size_t size; // Amount of downloaded data
} *buff; // Used when writing to buffer
} out; // Output data
CURL *curl; // easy curl session
};
// Initialize new download manager
// parallel: Number of possible parallel downloadings
// Returns new instance of downloader
struct downloader *downloader_new(int parallel);
// Free given instance of downloader
void downloader_free(struct downloader*) __attribute__((nonnull));
// Run downloader and download all registered URLs
// return: NULL on success otherwise pointer to download instance that failed.
struct download_i *downloader_run(struct downloader*) __attribute__((nonnull));
// Remove all download instances from downloader
void downloader_flush(struct downloader*) __attribute__((nonnull));
// Set default values for download_opts
// opts: Allocated instance of download options to be set to defaults
// Note: strings in download_opts are set to NULL and previous values are NOT
// freed.
void download_opts_def(struct download_opts *opts) __attribute__((nonnull));
// Register given URL to be downloaded to file.
// url: URL data are downloaded from
// output_path: Path where data are going to be stored (written to)
// opts: Download options (does not have to exist during instance existence)
// Returns download instance
struct download_i *download_file(struct downloader *downloader, const char *url,
const char *output_path, const struct download_opts *opts)
__attribute__((nonnull(1, 2, 3, 4)));
// Register given URL to be downloaded to temporally file. Output file path is
// generated using mkstemp function.
// url: URL data are downloaded from
// output_template: Template for path where data are going to be stored (written
// to). Passed string has to end with XXXXXX and is modified to contain used
// path. This string should be freed only after download instance is freed.
// opts: Download options (does not have to exist during instance existence)
// Returns download instance
struct download_i *download_temp_file(struct downloader *downloader,
const char *url, char *output_template, const struct download_opts *opts)
__attribute__((nonnull(1, 2, 3, 4)));
// Register given URL to be downloaded to internal buffer.
// url: URL data are downloaded from
// opts: Download options (does not have to exist during instance existence)
// Returns download instance
struct download_i *download_data(struct downloader *downloader, const char *url,
const struct download_opts *opts) __attribute__((nonnull(1, 2, 3)));
// Free download instance
void download_i_free(struct download_i*) __attribute__((nonnull));
// This is same as download_i_free but where download_i_free just frees downloaded
// buffer, this passes it to caller. Instance is freed the same way as in case of
// download_i_free but data buffer has to be freed later by caller.
// In other words this overtakes allocated buffer and frees rest of instance.
// This can be called only on instance that was created by download_data.
void download_i_collect_data(struct download_i*, uint8_t **data, size_t *size);
#endif
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "inject.h" #include "inject.h"
#include "util.h" #include "util.h"
#include "logging.h" #include "logging.h"
#include <lauxlib.h>
void inject_func_n(lua_State *L, const char *module, const struct inject_func *inject, size_t count) { void inject_func_n(lua_State *L, const char *module, const struct inject_func *inject, size_t count) {
// Inject the functions // Inject the functions
...@@ -55,3 +56,9 @@ void inject_module(lua_State *L, const char *module) { ...@@ -55,3 +56,9 @@ void inject_module(lua_State *L, const char *module) {
// Drop the _M, package, loaded // Drop the _M, package, loaded
lua_pop(L, 3); lua_pop(L, 3);
} }
void inject_metatable_self_index(lua_State *L, const char *meta) {
ASSERT(luaL_newmetatable(L, meta) == 1);
lua_pushvalue(L, -1);
lua_setfield(L, -2, "__index");
}
...@@ -37,5 +37,7 @@ void inject_str_const(lua_State *L, const char *module, const char *name, const ...@@ -37,5 +37,7 @@ void inject_str_const(lua_State *L, const char *module, const char *name, const
void inject_int_const(lua_State *L, const char *module, const char *name, const int value) __attribute__((nonnull)); void inject_int_const(lua_State *L, const char *module, const char *name, const int value) __attribute__((nonnull));
// Make the table on top of the stack a module. Drop the table from the stack. // Make the table on top of the stack a module. Drop the table from the stack.
void inject_module(lua_State *L, const char *module) __attribute__((nonnull)); void inject_module(lua_State *L, const char *module) __attribute__((nonnull));
// Create new metatable on top of stack that is self indexing (__index is a table it self)
void inject_metatable_self_index(lua_State *L, const char *meta) __attribute__((nonnull));
#endif #endif
...@@ -25,7 +25,10 @@ ...@@ -25,7 +25,10 @@
#include "journal.h" #include "journal.h"
#include "locks.h" #include "locks.h"
#include "arguments.h" #include "arguments.h"
#include "syscnf.h"
#include "uri_lua.h"
#include "picosat.h" #include "picosat.h"
#include "pkgsorter.h"
#include <lua.h> #include <lua.h>
#include <lualib.h> #include <lualib.h>
...@@ -948,6 +951,7 @@ struct { ...@@ -948,6 +951,7 @@ struct {
{ LS_FAIL, "LS_FAIL"}, { LS_FAIL, "LS_FAIL"},
{ LST_PKG_SCRIPT, "LST_PKG_SCRIPT"}, { LST_PKG_SCRIPT, "LST_PKG_SCRIPT"},
{ LST_HOOK, "LST_HOOK"}, { LST_HOOK, "LST_HOOK"},
{ LST_USIGN, "LST_USIGN"},
}; };
// Various enum values that we want to inject // Various enum values that we want to inject
...@@ -1020,7 +1024,10 @@ struct interpreter *interpreter_create(struct events *events) { ...@@ -1020,7 +1024,10 @@ struct interpreter *interpreter_create(struct events *events) {
// Some binary embedded modules // Some binary embedded modules
journal_mod_init(L); journal_mod_init(L);
locks_mod_init(L); locks_mod_init(L);
syscnf_mod_init(L);
uri_mod_init(L);
picosat_mod_init(L); picosat_mod_init(L);
pkgsorter_mod_init(L);
#ifdef COVERAGE #ifdef COVERAGE
interpreter_load_coverage(result); interpreter_load_coverage(result);
#endif #endif
......
...@@ -154,11 +154,6 @@ enum log_level log_level_get(const char *level) { ...@@ -154,11 +154,6 @@ enum log_level log_level_get(const char *level) {
return LL_UNKNOWN; return LL_UNKNOWN;
} }
static const char *type_string[] = {
[LST_PKG_SCRIPT] = "pkg-script",
[LST_HOOK] = "hook"
};
// log_subproc cookie // log_subproc cookie
struct c_log_subproc { struct c_log_subproc {
bool err; // Is this out or err bool err; // Is this out or err
...@@ -168,7 +163,7 @@ struct c_log_subproc { ...@@ -168,7 +163,7 @@ struct c_log_subproc {
static ssize_t c_log_subproc_write(void *cookie, const char *buf, size_t size) { static ssize_t c_log_subproc_write(void *cookie, const char *buf, size_t size) {
struct c_log_subproc *cls = (struct c_log_subproc*)cookie; struct c_log_subproc *cls = (struct c_log_subproc*)cookie;
size_t len = size; size_t len = size;
if (would_log(LL_INFO)) if (would_log(cls->lsp->type == LST_USIGN ? LL_DBG : LL_INFO))
len = fwrite(buf, sizeof(char), size, cls->err ? stderr : stdout); len = fwrite(buf, sizeof(char), size, cls->err ? stderr : stdout);
// This is memory buffer so there should be no problem to match system output // This is memory buffer so there should be no problem to match system output
ASSERT(fwrite(buf, sizeof(char), len, cls->lsp->buffer.f) == len); ASSERT(fwrite(buf, sizeof(char), len, cls->lsp->buffer.f) == len);
...@@ -203,7 +198,10 @@ void log_subproc_open(struct log_subproc *lsp, enum log_subproc_type type, const ...@@ -203,7 +198,10 @@ void log_subproc_open(struct log_subproc *lsp, enum log_subproc_type type, const
cls->lsp = lsp; cls->lsp = lsp;
lsp->err = fopencookie(cls, "w", fncs); lsp->err = fopencookie(cls, "w", fncs);
// Print info // Print info
INFO("%s", message); if (type == LST_USIGN)
DBG("%s", message);
else
INFO("%s", message);
} }
void log_subproc_close(struct log_subproc *lsp, char **output) { void log_subproc_close(struct log_subproc *lsp, char **output) {
......
...@@ -19,7 +19,9 @@ ...@@ -19,7 +19,9 @@
#ifndef UPDATER_LOGGING_H #ifndef UPDATER_LOGGING_H
#define UPDATER_LOGGING_H #define UPDATER_LOGGING_H
#ifndef _GNU_SOURCE
#define _GNU_SOURCE #define _GNU_SOURCE
#endif
#include <stdbool.h> #include <stdbool.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
...@@ -107,7 +109,8 @@ void setup_logging(enum log_level tty, enum log_level syslog); ...@@ -107,7 +109,8 @@ void setup_logging(enum log_level tty, enum log_level syslog);
// afterward. // afterward.
enum log_subproc_type { enum log_subproc_type {
LST_PKG_SCRIPT, // This is post/pre install/rm script LST_PKG_SCRIPT, // This is post/pre install/rm script
LST_HOOK // This is updater's hook LST_HOOK, // This is updater's hook
LST_USIGN // This is usign executable (this has explicitly lower logging level set to DBG)
}; };
struct log_subproc { struct log_subproc {
......
...@@ -99,17 +99,18 @@ Function `subprocess` is defined as follows: ...@@ -99,17 +99,18 @@ Function `subprocess` is defined as follows:
predefined constants are as follows: predefined constants are as follows:
- `LST_PKG_SCRIPT` Any script provided by package (pre/post inst/rm) - `LST_PKG_SCRIPT` Any script provided by package (pre/post inst/rm)
- `LST_HOOK` Hook script executed on some updater state - `LST_HOOK` Hook script executed on some updater state
- `LST_USIGN` usign binary used for signature validation
* `message` is string describing what this subprocess is to user. It's human * `message` is string describing what this subprocess is to user. It's human
readable description of executed command. readable description of executed command.
* `timeout` is time in seconds after which subprocess will be automatically * `timeout` is time in milliseconds after which subprocess will be automatically
killed. killed.
* `callback` is optional function that would be called in subprocess just before * `callback` is optional function that would be called in subprocess just before
It executes given command. If you don't want to specify it then you can pass nil It executes given command. If you don't want to specify it then you can pass nil
Or you can just drop it out (in that case command is expeted on this argument Or you can just drop it out (in that case command is expeted on this argument
Place). This functions should has no arguments and shouldn't return anything. Place). This functions should have no arguments and shouldn't return anything.
* `command` is any arbitrary number of string arguments that are passed as command * `command` is any arbitrary number of string arguments that are passed as command
and its additional arguments. and its additional arguments.
...@@ -297,7 +298,7 @@ The format of journal returned by recover is a table with records. ...@@ -297,7 +298,7 @@ The format of journal returned by recover is a table with records.
Each record contains `type` ‒ one of the types above, and `params` ‒ Each record contains `type` ‒ one of the types above, and `params` ‒
table with all the parameters stored with the record. table with all the parameters stored with the record.
Pisocat Picosat
------- -------
Picosat can be used trough module `picosat`. Here is briefly described Picosat can be used trough module `picosat`. Here is briefly described
...@@ -327,6 +328,28 @@ object with variable you are interested in. It returns true or false. ...@@ -327,6 +328,28 @@ object with variable you are interested in. It returns true or false.
It can also return nil if variable was added after `satisfiable` method It can also return nil if variable was added after `satisfiable` method
call. call.
PkgSorter
---------
This is module implementing tree sorting algorithm with priorities.
TODO document what priority for node and for edge means
You can create new instance by calling `pkgsorter.new` function.
It returns object with following methods:
node(name, priority):: Create new node with given priority.
edge(priority, from, to, reverse):: Add edge between two nodes.
Note that if you have edge from low priority node to higher priority
one then priority of such node is increased to be the same.
prune():: Prune edges forming cycles and return report about it.
TODO
isnode(name):: Check if we have node of given name.
iterator(root):: Returns iterator to go trough resulting order. You
have to call it after `prune` is called. As optional argument you
can specify root then instead of looking for all roots only given
one is expanded and iterated trough.
Others Others
------ ------
......
/*
* Copyright 2019, CZ.NIC z.s.p.o. (http://www.nic.cz/)
*
* This file is part of the turris updater.
*
* Updater is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Updater is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Updater. If not, see <http://www.gnu.org/licenses/>.
*/
#define _GNU_SOURCE
#include "multiwrite.h"
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <errno.h>
void mwrite_init(struct mwrite* mw) {
memset(mw, 0, sizeof *mw);
}
bool mwrite_add(struct mwrite *mw, int fd) {
if (fd == -1) // open failed (errno is set by open)
return false;
mw->count++;
mw->fds = realloc(mw->fds, mw->count * sizeof *mw->fds);
mw->fds[mw->count - 1] = fd;
return true;
}
bool mwrite_open(struct mwrite *mw, const char *pathname, int flags) {
int fd = open(pathname, flags, O_WRONLY);
return mwrite_add(mw, fd);
}
bool mwrite_mkstemp(struct mwrite *mw, char *template, int flags) {
int fd = mkostemp(template, flags);
return mwrite_add(mw, fd);
}
enum mwrite_result mwrite_write(struct mwrite *mw, const void *buf, size_t count) {
for (size_t i = 0; i < mw->count; i++) {
const void *lbuf = buf;
size_t tow = count;
do {
int ret = write(mw->fds[i], lbuf, tow);
if (ret < 0) {
if (errno != EINTR)
continue; // just try again
else
return MWRITE_R_STD_ERROR;
}
if (ret == 0)
return MWRITE_R_UNABLE_TO_WRITE;
tow -= ret;
} while (tow > 0);
}
return MWRITE_R_OK;
}
enum mwrite_result mwrite_str_write(struct mwrite *mw, const char *str) {
return mwrite_write(mw, str, strlen(str) * sizeof *str);
}
bool mwrite_close(struct mwrite *mw) {
for (size_t i = 0; i < mw->count; i++) {
int res;
while ((res = close(mw->fds[i])) != 0 && errno == EINTR);
if (res)
return false;
}
free(mw->fds);
mwrite_init(mw);
return true;
}
/*
* Copyright 2019, CZ.NIC z.s.p.o. (http://www.nic.cz/)
*
* This file is part of the Turris Updater.
*
* Updater is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Updater is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Updater. If not, see <http://www.gnu.org/licenses/>.
*/
/* This implements a way to write to multiple files at once. It is not optimal in
* any way. It opens FD for every file and writes data in loop. There seems to be
* no existing approach on how to open multiple files and write to them all at
* once (something like having multiple files under one file descriptor). If there
* is such API or approach possible then this code should be dropped and all usage
* should be replaced with given API.
*/
#ifndef UPDATER_MULTIWRITE_H
#define UPDATER_MULTIWRITE_H
#include <stdlib.h>
#include <stdbool.h>
// MultiWrite handler
struct mwrite {
size_t count;
int *fds;
};
// Result of mwrite_write function
enum mwrite_result {
MWRITE_R_OK = 0, // Write was successful
MWRITE_R_STD_ERROR, // There was an standard error (use errno)
MWRITE_R_UNABLE_TO_WRITE, // Write is unable to proceed (zero bytes written)
};
// Handler initialization function. Please call this before any other function.
void mwrite_init(struct mwrite*);
// Open pathname for writing. All subsequent calls to mwrite_write would write
// also to this file if open is successful.
// You can provide additional flags. These flags are same as in case of open.
// It returns false if error occurred (in such case errno is set), otherwise true
// is returned.
bool mwrite_open(struct mwrite*, const char *pathname, int flags);
// This is same as mwrite_open but instead of using open it uses mkostemp to open
// file descriptor.
bool mwrite_mkstemp(struct mwrite*, char *template, int flags);
// Write data to mwrite
// This is pretty much same as standard write. The only difference is that this
// implementation always writes all provided data unless error is detected.
// This returns MWRITE_R_OK if write was successful. MWRITE_R_STD_ERROR is
// returned when standard error is detected and MWRITE_R_UNABLE_TO_WRITE is
// returned if write is unable to proceed (probably because of not enough space).
// Note that if error is detected that some writes can be completed and others
// might not be. This means that on error there are no guaranties on state of all
// written files.
enum mwrite_result mwrite_write(struct mwrite*, const void *buf, size_t count);
// Same as mwrite_write but calculates size of string using strlen.
enum mwrite_result mwrite_str_write(struct mwrite*, const char *str);
// Close all previously opened files. This effectively returns handler to same
// state as it is after mwrite_init call.
// Returns false if error occurred (in such case errno is set), otherwise true is
// returned. Note that on error not all file descriptors are closed and that there
// is currently no recovery way. You should exit program instead.
bool mwrite_close(struct mwrite*);
#endif
This diff is collapsed.