...
 
Commits (30)
FROM debian:stable
FROM debian:unstable
ENV HOME=/root
......@@ -7,9 +7,10 @@ RUN \
apt-get -y upgrade && \
apt-get -y install --no-install-recommends \
busybox ca-certificates curl git \
make pkg-config gcc \
make cmake pkg-config gcc \
check cppcheck lua-check valgrind \
libcurl4-openssl-dev libevent-dev libssl-dev \
libcurl4-openssl-dev libevent-dev libssl-dev liburiparser-dev \
libb64-dev uthash-dev \
lua5.1 liblua5.1-0-dev \
asciidoc lcov markdown libcommon-sense-perl \
wget procps && \
......
......@@ -11,7 +11,7 @@ __pycache__
# The desired results
*.html
*.so
/src/opkg-trans/opkg-trans
/src/pkgtransaction/pkgtransaction
/src/pkgupdate/pkgupdate
/src/migrator/pkgmigrate
# Some stuff for debugging
......
......@@ -4,3 +4,6 @@
[submodule "tests/lunit-launch/lunit"]
path = tests/lunit-launch/lunit
url = http://repo.or.cz/lunit.git
[submodule "tests/usign"]
path = tests/usign
url = git://git.openwrt.org/project/usign.git
UPDATER_VERSION := $(shell (git describe --match 'v*' --dirty || echo 'unknown') | sed -e 's/^v//')
LUA_NAME := $(shell for lua in lua5.1 lua-5.1 lua51 lua ; do if pkg-config $$lua ; then echo $$lua ; break ; fi ; done)
VALGRIND:=IN_VALGRIND=1 valgrind --leak-check=full --show-leak-kinds=all --track-fds=yes --trace-children=no --child-silent-after-fork=yes --error-exitcode=1 --track-origins=yes
VALGRIND:=IN_VALGRIND=1 valgrind --leak-check=full --show-leak-kinds=definite,indirect,possible --track-fds=yes --trace-children=no --child-silent-after-fork=yes --error-exitcode=1 --track-origins=yes
# For picosat, it otherwise needs some headers not available on musl for a feature we don't need. And we need trace enabled.
EXTRA_DEFINES := NGETRUSAGE TRACE UPDATER_VERSION='"$(UPDATER_VERSION)"'
ifdef BUSYBOX_EXEC
......
......@@ -7,10 +7,12 @@ configuration scripts.
Dependencies
------------
Binary dependencies:
* C compiler (gcc preferred)
* C compiler (gcc preferred) with C11 support
* Lua 5.1
* libcurl
* libevent2
* libb64
* uthash
Runtime dependencies:
* usign (for signatures validation)
......
This diff is collapsed.
......@@ -25,10 +25,15 @@ libupdater_MODULES := \
embed_types \
events \
subprocess \
download \
uri \
uri_lua \
journal \
locks \
picosat \
util \
syscnf \
multiwrite \
logging
ifdef COV
libupdater_MODULES += lcoverage.embed
......@@ -39,9 +44,9 @@ endif
libupdater_MODULES_3RDPARTY := picosat-965/picosat
libupdater_PKG_CONFIGS := $(LUA_NAME) libevent libcurl libcrypto
libupdater_PKG_CONFIGS := $(LUA_NAME) libevent libcurl libcrypto liburiparser
# Workaround, lua.pc doesn't containd -ldl, even when it uses dlopen
libupdater_SO_LIBS += dl
libupdater_SO_LIBS += dl b64
LIB_DOCS := \
journal \
......
......@@ -77,10 +77,6 @@ static const char *opt_help[COT_LAST] = {
"--exclude=<name> Exclude this from output.\n",
[COT_USIGN] =
"--usign=<path> Path to usign tool used to verify packages signature. In default /usr/bin/usign.\n",
[COT_MODEL] =
"--model=<model> Set/override target system model (e.g. Turris Omnia)\n",
[COT_BOARD] =
"--board=<board> Set/override target system board (e.g. rtrom01)\n",
[COT_NO_REPLAN] =
"--no-replan Don't replan. Install everyting at once. Use this if updater you are running isn't from packages it installs.\n",
[COT_NO_IMMEDIATE_REBOOT] =
......@@ -102,8 +98,6 @@ enum option_val {
OPT_TASK_LOG_VAL,
OPT_EXCLUDE,
OPT_USIGN,
OPT_MODEL,
OPT_BOARD,
OPT_NO_REPLAN,
OPT_NO_IMMEDIATE_REBOOT,
OPT_OUT_OF_ROOT,
......@@ -127,8 +121,6 @@ static const struct option opt_long[] = {
{ .name = "task-log", .has_arg = required_argument, .val = OPT_TASK_LOG_VAL },
{ .name = "exclude", .has_arg = required_argument, .val = OPT_EXCLUDE },
{ .name = "usign", .has_arg = required_argument, .val = OPT_USIGN },
{ .name = "model", .has_arg = required_argument, .val = OPT_MODEL },
{ .name = "board", .has_arg = required_argument, .val = OPT_BOARD },
{ .name = "no-replan", .has_arg = no_argument, .val = OPT_NO_REPLAN },
{ .name = "no-immediate-reboot", .has_arg = no_argument, .val = OPT_NO_IMMEDIATE_REBOOT },
{ .name = "out-of-root", .has_arg = no_argument, .val = OPT_OUT_OF_ROOT },
......@@ -154,8 +146,6 @@ static const struct simple_opt {
[OPT_TASK_LOG_VAL] = { COT_TASK_LOG, true, true },
[OPT_EXCLUDE] = { COT_EXCLUDE, true, true },
[OPT_USIGN] = { COT_USIGN, true, true },
[OPT_MODEL] = { COT_MODEL, true, true },
[OPT_BOARD] = { COT_BOARD, true, true },
[OPT_NO_REPLAN] = { COT_NO_REPLAN, false, true },
[OPT_NO_IMMEDIATE_REBOOT] = { COT_NO_IMMEDIATE_REBOOT, false, true },
[OPT_OUT_OF_ROOT] = { COT_OUT_OF_ROOT, false, false },
......@@ -276,8 +266,6 @@ struct cmd_op *cmd_args_parse(int argc, char *argv[], const enum cmd_op_type acc
case COT_APPROVE:
case COT_EXCLUDE:
case COT_USIGN:
case COT_MODEL:
case COT_BOARD:
case COT_NO_REPLAN:
case COT_TASK_LOG: {
struct cmd_op tmp = result[i];
......
......@@ -68,10 +68,6 @@ enum cmd_op_type {
COT_EXCLUDE,
// Path to usign tool
COT_USIGN,
// Target model specification
COT_MODEL,
// Target board specification
COT_BOARD,
// Don't replan (do whole install at once)
COT_NO_REPLAN,
// Don't immediatelly reboot system
......
......@@ -35,10 +35,11 @@ local mkdir = mkdir
local stat = stat
local events_wait = events_wait
local run_util = run_util
local uri = require "uri"
module "utils"
-- luacheck: globals lines2set map set2arr arr2set cleanup_dirs dir_ensure mkdirp read_file write_file clone shallow_copy table_merge arr_append exception multi_index private filter_best strip table_overlay randstr arr_prune arr_inv file_exists
-- luacheck: globals lines2set map set2arr arr2set cleanup_dirs dir_ensure mkdirp read_file write_file clone shallow_copy table_merge arr_append exception multi_index private filter_best strip table_overlay table_wrap randstr arr_prune arr_inv file_exists uri_syste_cas uri_no_crl uri_config uri_content
--[[
Convert provided text into set of lines. Doesn't care about the order.
......@@ -357,6 +358,18 @@ function table_overlay(table)
})
end
--[[
This function returns always table. If input is not table then it is placed to
table. If input is table then it is returned as is.
]]
function table_wrap(table)
if type(table) == "table" then
return table
else
return {table}
end
end
--[[
Check whether file exists
]]
......@@ -370,4 +383,51 @@ function file_exists(name)
end
end
--[[
This function applies given table of configuration to given uri object.
This is here because we need bridge between old approach of using lua tables and
approach of inherited settings in uri object.
For full support of all fields see language documentation, section Verification.
Any field that is not set in table is ignored (configuration is not changed).
]]
function uri_config(uriobj, config)
-- TODO and how about veri?
if config.ca ~= nil then
uriobj:set_ssl_verify(config.ca)
uriobj:add_ca(nil)
for ca in pairs(table_wrap(config.ca)) do
uriobj:add_ca(ca)
end
end
if config.crl ~= nil then
uriobj:add_crl(nil)
for crl in pairs(table_wrap(config.crl)) do
uriobj:add_crl(crl)
end
end
if config.ocsp ~= nil then
uriobj:set_ocsp(config.ocsp)
end
if config.pubkey ~= nil then
uriobj:add_pubkey(nil)
for pubkey in pairs(table_wrap(config.pubkey)) do
uriobj:add_pubkey(pubkey)
end
end
if config.sig ~= nil then
uriobj:set_sig(config.sig)
end
end
-- Get content of given URI
-- It returns downloaded content as first argument and uri object as second (which
-- can be used as a parent to other uris)
function uri_content(struri, parent, config)
local master = uri.new()
local u = master:to_buffer(struri, parent)
uri_config(u, config)
master:download()
return u:finish(), u
end
return _M
--[[
Copyright 2018, CZ.NIC z.s.p.o. (http://www.nic.cz/)
This file is part of the turris updater.
Updater is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Updater is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Updater. If not, see <http://www.gnu.org/licenses/>.
]]--
local os = os
local utils = require "utils"
local getcwd = getcwd
local DIE = DIE
module "syscnf"
-- Variables accessed from outside of this module
-- luacheck: globals root_dir status_file info_dir pkg_download_dir pkg_unpacked_dir dir_opkg_collided target_model target_board
-- Functions that we want to access from outside of this module
-- luacheck: globals set_root_dir set_target
local status_file_suffix = "usr/lib/opkg/status"
local info_dir_suffix = "usr/lib/opkg/info/"
local pkg_unpacked_dir_suffix = "usr/share/updater/unpacked/"
local pkg_download_dir_suffix = "usr/share/updater/download/"
local dir_opkg_collided_suffix = "usr/share/updater/collided/"
--[[
Canonizes path to absolute path. It does no change in case path is already an
absolute but it if not then it prepends current working directory. There is also
special handling in case path starts with tilde (~) in that case that character is
replaced with content of HOME environment variable.
]]
local function path2abspath(path)
if path:match("^/") then
return path
elseif path:match("^~/") then
return os.getenv('HOME') .. "/" .. path
else
return getcwd() .. "/" .. path
end
end
--[[
Set all the configurable directories to be inside the provided dir
Effectively sets that the whole system is mounted under some
prefix.
]]
function set_root_dir(dir)
if dir then
dir = (path2abspath(dir) .. "/"):gsub("/+", "/")
else
dir = "/"
end
-- A root directory
root_dir = dir
-- The file with status of installed packages
status_file = dir .. status_file_suffix
-- The directory where unpacked control files of the packages live
info_dir = dir .. info_dir_suffix
-- A directory to which we download packages
pkg_download_dir = dir .. pkg_download_dir_suffix
-- A directory where unpacked packages live
pkg_unpacked_dir = dir .. pkg_unpacked_dir_suffix
-- Directory where we move files and directories that weren't part of any package.
dir_opkg_collided = dir .. dir_opkg_collided_suffix
end
--[[
Set variables taget_model and target_board.
You can explicitly specify model or board or both. If not specified then detection
is performed. That is files from /tmp/sysinfo directory are used.
If no model or board is specified (passed as nil) and detection failed than this
function causes error and execution termination.
]]
function set_target(model, board)
-- Name of the target model (ex: Turris Omnia)
target_model = model or utils.strip(utils.read_file('/tmp/sysinfo/model'))
-- Name of the target board (ex: rtrom01)
target_board = board or utils.strip(utils.read_file('/tmp/sysinfo/board_name'))
if not target_model or not target_board then
DIE("Auto detection of target model or board failed.You can specify them " ..
"explicitly using --model and --board arguments.")
end
end
......@@ -763,13 +763,13 @@ function steal_configs(current_status, installed_confs, configs)
end
--[[
Move anything on given path to dir_opkg_collided. This backups and removes original files.
Move anything on given path to opkg_collided_dir. This backups and removes original files.
When keep is set to true, file is copied instead of moved.
]]
function user_path_move(path, keep)
-- At first create same parent directory relative to dir_opkg_collided
-- At first create same parent directory relative to opkg_collided_dir
local fpath = ""
for dir in (syscnf.dir_opkg_collided .. path):gsub("[^/]*/?$", ""):gmatch("[^/]+") do
for dir in (syscnf.opkg_collided_dir .. path):gsub("[^/]*/?$", ""):gmatch("[^/]+") do
local randex = ""
while not utils.dir_ensure(fpath .. "/" .. dir .. randex) do
-- If there is file with same name, then append some random extension
......
......@@ -33,14 +33,11 @@ local tostring = tostring
local error = error
local WARN = WARN
local ERROR = ERROR
local run_command = run_command
local events_wait = events_wait
local get_updater_version = get_updater_version
local utils = require "utils"
local backend = require "backend"
local requests = require "requests"
local syscnf = require "syscnf"
local uri = require "uri"
local uci_ok, uci = pcall(require, "uci")
module "sandbox"
......@@ -54,7 +51,9 @@ local updater_features = utils.arr2set({
'conflicts',
'abi_change',
'abi_change_deep',
'replan_string'
'replan_string',
'relative_uri',
'no_returns'
})
-- Available functions and "constants" from global environment
......@@ -93,8 +92,8 @@ local local_available_funcs = {
local rest_additional_funcs = {
{"version_match", backend.version_match},
{"version_cmp", backend.version_cmp},
{"system_cas", uri.system_cas},
{"no_crl", uri.no_crl}
{"system_cas", true},
{"no_crl", false}
}
state_vars = nil
......@@ -120,12 +119,11 @@ function load_state_vars()
]]
state_vars = {
root_dir = syscnf.root_dir,
model = syscnf.target_model,
board_name = syscnf.target_board,
turris_version = utils.strip(utils.read_file('/etc/turris-version')),
self_version = get_updater_version(),
language_version = 1,
features = updater_features,
os_release = syscnf.os_release(),
host_os_release = syscnf.host_os_release(),
--[[
In case we fail to read that file (it is not there), we match against
an empty string, which produces nil ‒ the element won't be in there.
......@@ -147,11 +145,6 @@ function load_state_vars()
end
end)
}
events_wait(run_command(function (ecode, _, stdout, _)
if ecode == 0 then
state_vars.serial = utils.strip(stdout)
end
end, nil, nil, -1, -1, '/usr/bin/atsha204cmd', 'serial-number'))
end
......@@ -259,7 +252,7 @@ List the variable names here. This way we ensure they are actually set in case
they are nil. This helps in testing and also ensures some other global variable
isn't mistaken for the actual value that isn't available.
]]
for _, name in pairs({'root_dir', 'model', 'board_name', 'turris_version', 'serial', 'architectures', 'installed', 'self_version', 'language_version', 'features'}) do
for _, name in pairs({'root_dir', 'os_release', 'host_os_release', 'architectures', 'installed', 'self_version', 'language_version', 'features'}) do
funcs.Restricted[name] = {
mode = "state",
value = name
......@@ -404,8 +397,7 @@ function run_sandboxed(chunk, name, sec_level, parent, context_merge, context_mo
end
local context = new(sec_level, parent)
utils.table_merge(context, context_merge or {})
context_mod = context_mod or function () end
context_mod(context)
if context_mod then context_mod(context) end
local func = setfenv(chunk, context.env)
local ok, err = pcall(func)
if ok then
......
This diff is collapsed.
......@@ -25,7 +25,6 @@ local pcall = pcall
local next = next
local type = type
local assert = assert
local unpack = unpack
local table = table
local string = string
local events_wait = events_wait
......@@ -36,112 +35,84 @@ local ERROR = ERROR
local utils = require "utils"
local backend = require "backend"
local requests = require "requests"
local uri = require "uri"
module "postprocess"
-- luacheck: globals get_repos deps_canon conflicts_canon available_packages pkg_aggregate run sort_candidates
function get_repos()
DBG("Getting repos")
--[[
The repository index downloads are already in progress since
the repository objects have been created. We now register
callback for the arrival of data. This might happen right
away or later on. Anyway, after we wait, all the indices
have been downloaded.
When we get each index, we detect if the data is gzipped
or not. If it is not, the repository is parsed right away.
If it is, extraction is run in the background and parsing
is scheduled for once it finishes. Eventually, we wait for
all the extractions to finish, and at that point everything
is parsed.
]]
local uris = {} -- The uris we wait for to be downloaded
local extract_events = {} -- The extractions we wait for
local errors = {} -- Collect errors as we go
local fatal = false -- Are any of them a reason to abort?
--[[
We don't care about the order in which we register the callbacks
(which may be different from the order in which they are called
anyway).
]]
for _, repo in pairs(requests.known_repositories_all) do
repo.tp = 'parsed-repository'
repo.content = {}
for subrepo, index_uri in pairs(utils.private(repo).index_uri) do
local name = repo.name .. "/" .. index_uri.uri
table.insert(uris, index_uri)
local function broken(why, extra)
ERROR("Index " .. name .. " is broken (" .. why .. "): " .. tostring(extra))
extra.why = why
extra.repo = name
repo.content[subrepo] = extra
table.insert(errors, extra)
fatal = fatal or not utils.arr2set(repo.ignore or {})[why]
end
local function parse(content)
DBG("Parsing index " .. name)
local ok, list = pcall(backend.repo_parse, content)
if ok then
for _, pkg in pairs(list) do
-- Compute the URI of each package (but don't download it yet, so don't create the uri object)
pkg.uri_raw = repo.repo_uri .. subrepo .. '/' .. pkg.Filename
pkg.repo = repo
end
repo.content[subrepo] = {
tp = "pkg-list",
list = list
}
else
broken('syntax', utils.exception('repo broken', "Couldn't parse the index of " .. name .. ": " .. tostring(list)))
end
end
local function decompressed(ecode, _, stdout, stderr)
DBG("Decompression of " .. name .. " done")
if ecode == 0 then
parse(stdout)
else
broken('syntax', utils.exception('repo broken', "Couldn't decompress " .. name .. ": " .. stderr))
local function repo_parse(repo)
repo.tp = 'parsed-repository'
repo.content = {}
local name = repo.name .. "/" .. repo.index_uri:uri()
-- Get index
local index = repo.index_uri:finish() -- TODO error?
if index:sub(1, 2) == string.char(0x1F, 0x8B) then -- compressed index
DBG("Decompressing index " .. name)
local extr = run_util(function (ecode, _, stdout, stderr)
if ecode ~= 0 then
error(utils.exception('repo broken', "Couldn't decompress " .. name .. ": " .. stderr))
end
index = stdout
end
local function downloaded(ok, answer)
DBG("Received repository index " .. name)
if not ok then
-- Couldn't download
-- TODO: Once we have validation, this could also mean the integrity is broken, not download
broken('missing', answer)
elseif answer:sub(1, 2) == string.char(0x1F, 0x8B) then
-- It starts with gzip magic - we want to decompress it
DBG("Index " .. name .. " is compressed, decompressing")
table.insert(extract_events, run_util(decompressed, nil, answer, -1, -1, 'gzip', '-dc'))
else
parse(answer)
end
, nil, index, -1, -1, 'gzip', '-dc')
events_wait(extr)
end
-- Parse index
DBG("Parsing index " .. name)
local ok, list = pcall(backend.repo_parse, index)
if not ok then
local msg = "Couldn't parse the index of " .. name .. ": " .. tostring(list)
if not repo.optional then
error(utils.exception('syntax', msg))
end
WARN(msg)
-- TODO we might want to ignore this repository in its fulles instead of this
end
for _, pkg in pairs(list) do
-- Compute the URI of each package (but don't download it yet, so don't create the uri object)
pkg.uri_raw = repo.repo_uri .. '/' .. pkg.Filename
pkg.repo = repo
end
repo.content = list
end
local function repos_failed_download(uri_fail)
-- Locate failed repository and check if we can continue
for _, repo in pairs(requests.known_repositories) do
if uri_fail == repo.index_uri then
local message = "Download failed for repository index " ..
repo.name .. " (" .. repo.index_uri:uri() .. "): " ..
tostring(repo.index_uri:download_error())
if not repo.optional then
error(utils.exception('repo missing', message))
end
index_uri:cback(downloaded)
WARN(message)
repo.tp = 'failed-repository'
break
end
--[[
We no longer need to keep the uris in there, we
wait for them here and after all is done, we want
the contents to be garbage collected.
]]
utils.private(repo).index_uri = nil
end
-- Make sure everything is downloaded
uri.wait(unpack(uris))
-- And extracted
events_wait(unpack(extract_events))
-- Process any errors
local multi = utils.exception('multiple', "Multiple exceptions (" .. #errors .. ")")
multi.errors = errors
if fatal then
error(multi)
elseif next(errors) then
return multi
else
return nil
end
function get_repos()
DBG("Downloading repositories indexes")
-- Run download
while true do
local uri_fail = requests.repositories_uri_master:download()
if uri_fail then
repos_failed_download(uri_fail)
else
break
end
end
-- Collect indexes and parse them
for _, repo in pairs(requests.known_repositories) do
if repo.tp == 'repository' then -- ignore failed repositories
local ok, err = pcall(repo_parse, repo)
if not ok then
-- TODO is this fatal?
error(err)
end
end
end
end
......@@ -332,21 +303,20 @@ to form single package object.
]]
function pkg_aggregate()
DBG("Aggregating packages together")
for _, repo in pairs(requests.known_repositories_all) do
for _, cont in pairs(repo.content) do
if type(cont) == 'table' and cont.tp == 'pkg-list' then
for name, candidate in pairs(cont.list) do
if not available_packages[name] then
available_packages[name] = {candidates = {}, modifiers = {}}
end
table.insert(available_packages[name].candidates, candidate)
if candidate.Provides then -- Add this candidate to package it provides
for p in candidate.Provides:gmatch("[^, ]+") do
if not available_packages[p] then
available_packages[p] = {candidates = {}, modifiers = {}}
end
table.insert(available_packages[p].candidates, candidate)
for _, repo in pairs(requests.known_repositories) do
if repo.tp == "parsed-repository" then
-- TODO this content design is invalid as there can be multiple packages of same name in same repository with different versions
for name, candidate in pairs(repo.content) do
if not available_packages[name] then
available_packages[name] = {candidates = {}, modifiers = {}}
end
table.insert(available_packages[name].candidates, candidate)
if candidate.Provides then -- Add this candidate to package it provides
for p in candidate.Provides:gmatch("[^, ]+") do
if not available_packages[p] then
available_packages[p] = {candidates = {}, modifiers = {}}
end
table.insert(available_packages[p].candidates, candidate)
end
end
end
......@@ -467,10 +437,7 @@ function pkg_aggregate()
end
function run()
local repo_errors = get_repos()
if repo_errors then
WARN("Not all repositories are available")
end
get_repos()
pkg_aggregate()
end
......
......@@ -261,7 +261,7 @@ local function sat_build(sat, pkgs, requests)
}
-- Go trough requests and add them to SAT
for _, req in ipairs(requests) do
if not pkgs[req.package.name] and not utils.arr2set(req.ignore or {})["missing"] then
if not pkgs[req.package.name] and not req.optional then
error(utils.exception('inconsistent', "Requested package " .. req.package.name .. " doesn't exists."))
end
local req_var = sat:var()
......@@ -388,7 +388,7 @@ local function build_plan(pkgs, requests, sat, satmap)
inwstack[name] = #wstack + 1 -- Signal that we are working on this package group.
table.insert(wstack, name)
for _, p in pkg_dep_iterate(utils.multi_index(pkg, 'modifier', 'deps') or {}) do -- plan package group dependencies
pkg_plan(p, ignore_missing or utils.arr2set(utils.multi_index(pkg, 'modifier', 'ignore') or {})["deps"], false, "Package " .. name .. " requires package")
pkg_plan(p, ignore_missing or utils.multi_index(pkg, 'modifier', 'optional'), false, "Package " .. name .. " requires package")
end
if not next(candidates) then return end -- We have no candidate, but we passed previous check because it's virtual
local r = {}
......@@ -401,7 +401,7 @@ local function build_plan(pkgs, requests, sat, satmap)
else
no_pkg_candidate = false
for _, p in pkg_dep_iterate(utils.multi_index(candidate, 'deps') or {}) do
pkg_plan(p, ignore_missing or utils.arr2set(utils.multi_index(pkg, 'modifier', 'ignore') or {})["deps"], false, "Package " .. name .. " requires package")
pkg_plan(p, ignore_missing or utils.multi_index(pkg, 'modifier', 'optional'), false, "Package " .. name .. " requires package")
end
end
end
......@@ -434,7 +434,7 @@ local function build_plan(pkgs, requests, sat, satmap)
for _, req in pairs(requests) do
if sat[satmap.req2sat[req]] then -- Plan only if we can satisfy given request
if req.tp == "install" then -- And if it is install request, uninstall requests are resolved by not being planned.
local pln = pkg_plan(req.package, false, utils.arr2set(req.ignore or {})["missing"], 'Requested package')
local pln = pkg_plan(req.package, false, req.optional, 'Requested package')
-- Note that if pln is nil than we ignored missing package. We have to compute with that here
if pln then
if req.reinstall then
......
......@@ -20,18 +20,17 @@ along with Updater. If not, see <http://www.gnu.org/licenses/>.
local next = next
local error = error
local ipairs = ipairs
local pcall = pcall
local table = table
local WARN = WARN
local INFO = INFO
local DIE = DIE
local md5 = md5
local sha256 = sha256
local reexec = reexec
local LS_CONF = LS_CONF
local LS_PLAN = LS_PLAN
local LS_DOWN = LS_DOWN
local update_state = update_state
local log_event = log_event
local utils = require "utils"
local syscnf = require "syscnf"
local sandbox = require "sandbox"
......@@ -56,16 +55,13 @@ end
local function required_pkgs(entrypoint)
-- Get the top-level script
local tlc = sandbox.new('Full')
local ep_uri = uri(tlc, entrypoint)
local ok, tls = ep_uri:get()
if not ok then error(tls) end
local entry_chunk, entry_uri = utils.uri_content(entrypoint, nil, {})
local merge = {
-- Note: See requests.script for usage of this value
["parent_script_uri"] = entry_uri
}
update_state(LS_CONF)
--[[
Run the top level script with full privileges.
The script shall be part of updater anyway.
]]
local err = sandbox.run_sandboxed(tls, "", 'Full')
local err = sandbox.run_sandboxed(entry_chunk, entrypoint, 'Full', nil, merge)
if err and err.tp == 'error' then error(err) end
update_state(LS_PLAN)
-- Go through all the requirements and decide what we need
......@@ -103,28 +99,24 @@ function tasks_to_transaction()
INFO("Downloading packages")
update_state(LS_DOWN)
-- Start packages download
local uri_master = uri:new()
for _, task in ipairs(tasks) do
if task.action == "require" then
-- Strip sig verification off, packages from repos don't have their own .sig
-- files, but they are checked by hashes in the (already checked) index.
local veriopts = utils.shallow_copy(task.package.repo)
local veri = veriopts.verification or utils.private(task.package.repo).context.verification or 'both'
if veri == 'both' then
veriopts.verification = 'cert'
elseif veri == 'sig' then
veriopts.verification = 'none'
end
task.real_uri = uri(utils.private(task.package.repo).context, task.package.uri_raw, veriopts)
task.real_uri:cback(function()
log_event('D', task.name .. " " .. task.package.Version)
end)
task.file = syscnf.pkg_download_dir .. task.name .. '-' .. task.package.Version .. '.ipk'
task.real_uri = uri_master:to_file(task.package.Filename, task.file, task.package.repo.index_uri)
task.real_uri:add_pubkey() -- do not verify signatures (there are none)
-- TODO on failure: log_event('D', task.name .. " " .. task.package.Version)
end
end
uri_master:download() -- TODO what if error?
-- Now push all data into the transaction
utils.mkdirp(syscnf.pkg_download_dir)
for _, task in ipairs(tasks) do
if task.action == "require" then
local ok, data = task.real_uri:get()
if not ok then error(data) end
local ok, err = pcall(function() task.real_uri:finish() end)
if not ok then error(err) end
-- TODO check hash
--[[
if task.package.MD5Sum then
local sum = md5(data)
if sum ~= task.package.MD5Sum then
......@@ -137,9 +129,8 @@ function tasks_to_transaction()
error(utils.exception("corruption", "The sha256 sum of " .. task.name .. " does not match"))
end
end
local fpath = syscnf.pkg_download_dir .. task.name .. '-' .. task.package.Version .. '.ipk'
utils.write_file(fpath, data)
transaction.queue_install_downloaded(fpath, task.name, task.package.Version, task.modifier)
]]
transaction.queue_install_downloaded(task.file, task.name, task.package.Version, task.modifier)
elseif task.action == "remove" then
transaction.queue_remove(task.name)
else
......
This diff is collapsed.
/*
* Copyright 2018, CZ.NIC z.s.p.o. (http://www.nic.cz/)
*
* This file is part of the turris updater.
*
* Updater is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Updater is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Updater. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef UPDATER_DOWNLOAD_H
#define UPDATER_DOWNLOAD_H
#include <stdbool.h>
#include <stdio.h>
#include <stdint.h>
#include <event2/event.h>
#include <curl/curl.h>
#include "logging.h"
struct download_i;
// Download manager object
struct downloader {
struct event_base *ebase; // libevent base
CURLM *cmulti; // Curl multi instance
struct event *ctimer; // Timer used by curl
struct download_i **instances; // Registered instances
size_t i_size, i_allocated; // instances size and allocated size
int pending; // Number of still not downloaded instances
struct download_i *failed; // Latest failed instance (used internally)
};
// Download options (additional options configuring security and more)
struct download_opts {
long timeout; // Download timeout (including download retries)
long connect_timeout; // Timeout for single connection
int retries; // Number of full download retries
bool follow_redirect; // If HTTP request 3xx should be followed
bool ssl_verify; // If SSL should be verified
bool ocsp; // If OCSP should be used for certificate verification
const char *cacert_file; // Path to custom CA certificate bundle
const char *capath; // Path to directory containing CA certificates
const char *crl_file; // Path to custom CA crl
};
enum download_output_type {
DOWN_OUT_T_FILE,
DOWN_OUT_T_BUFFER
};
// Download instance. Identifier of single download.
struct download_i {
bool done; // What ever is download finished
bool success; // If download was successful. Not valid if done is false.
char error[CURL_ERROR_SIZE]; // error message if download fails
int retries; // Number of reties we have
struct downloader *downloader; // parent downloader
enum download_output_type out_t; // What output this instance utilizes
union {
struct {
int fd; // File descriptor
char *fpath; // Path to output file
} *file; // Used when writing to file
struct {
uint8_t *data; // Buffer for output data
size_t size; // Amount of downloaded data
} *buff; // Used when writing to buffer
} out; // Output data
CURL *curl; // easy curl session
};
// Initialize new download manager
// parallel: Number of possible parallel downloadings
// Returns new instance of downloader
struct downloader *downloader_new(int parallel);
// Free given instance of downloader
void downloader_free(struct downloader*) __attribute__((nonnull));
// Run downloader and download all registered URLs
// return: NULL on success otherwise pointer to download instance that failed.
struct download_i *downloader_run(struct downloader*) __attribute__((nonnull));
// Remove all download instances from downloader
void downloader_flush(struct downloader*) __attribute__((nonnull));
// Set default values for download_opts
// opts: Allocated instance of download options to be set to defaults
// Note: strings in download_opts are set to NULL and previous values are NOT
// freed.
void download_opts_def(struct download_opts *opts) __attribute__((nonnull));
// Register given URL to be downloaded to file.
// url: URL data are downloaded from
// output_path: Path where data are going to be stored (written to)
// opts: Download options (does not have to exist during instance existence)
// Returns download instance
struct download_i *download_file(struct downloader *downloader, const char *url,
const char *output_path, const struct download_opts *opts)
__attribute__((nonnull(1, 2, 3, 4)));
// Register given URL to be downloaded to temporally file. Output file path is
// generated using mkstemp function.
// url: URL data are downloaded from
// output_template: Template for path where data are going to be stored (written
// to). Passed string has to end with XXXXXX and is modified to contain used
// path. This string should be freed only after download instance is freed.
// opts: Download options (does not have to exist during instance existence)
// Returns download instance
struct download_i *download_temp_file(struct downloader *downloader,
const char *url, char *output_template, const struct download_opts *opts)
__attribute__((nonnull(1, 2, 3, 4)));
// Register given URL to be downloaded to internal buffer.
// url: URL data are downloaded from
// opts: Download options (does not have to exist during instance existence)
// Returns download instance
struct download_i *download_data(struct downloader *downloader, const char *url,
const struct download_opts *opts) __attribute__((nonnull(1, 2, 3)));
// Free download instance
void download_i_free(struct download_i*) __attribute__((nonnull));
// This is same as download_i_free but where download_i_free just frees downloaded
// buffer, this passes it to caller. Instance is freed the same way as in case of
// download_i_free but data buffer has to be freed later by caller.
// In other words this overtakes allocated buffer and frees rest of instance.
// This can be called only on instance that was created by download_data.
void download_i_collect_data(struct download_i*, uint8_t **data, size_t *size);
#endif
......@@ -20,6 +20,7 @@
#include "inject.h"
#include "util.h"
#include "logging.h"
#include <lauxlib.h>
void inject_func_n(lua_State *L, const char *module, const struct inject_func *inject, size_t count) {
// Inject the functions
......@@ -55,3 +56,9 @@ void inject_module(lua_State *L, const char *module) {
// Drop the _M, package, loaded
lua_pop(L, 3);
}
void inject_metatable_self_index(lua_State *L, const char *meta) {
ASSERT(luaL_newmetatable(L, meta) == 1);
lua_pushvalue(L, -1);
lua_setfield(L, -2, "__index");
}
......@@ -37,5 +37,7 @@ void inject_str_const(lua_State *L, const char *module, const char *name, const
void inject_int_const(lua_State *L, const char *module, const char *name, const int value) __attribute__((nonnull));
// Make the table on top of the stack a module. Drop the table from the stack.
void inject_module(lua_State *L, const char *module) __attribute__((nonnull));
// Create new metatable on top of stack that is self indexing (__index is a table it self)
void inject_metatable_self_index(lua_State *L, const char *meta) __attribute__((nonnull));
#endif
......@@ -25,6 +25,8 @@
#include "journal.h"
#include "locks.h"
#include "arguments.h"
#include "syscnf.h"
#include "uri_lua.h"
#include "picosat.h"
#include <lua.h>
......@@ -948,6 +950,7 @@ struct {
{ LS_FAIL, "LS_FAIL"},
{ LST_PKG_SCRIPT, "LST_PKG_SCRIPT"},
{ LST_HOOK, "LST_HOOK"},
{ LST_USIGN, "LST_USIGN"},
};
// Various enum values that we want to inject
......@@ -1020,6 +1023,8 @@ struct interpreter *interpreter_create(struct events *events) {
// Some binary embedded modules
journal_mod_init(L);
locks_mod_init(L);
syscnf_mod_init(L);
uri_mod_init(L);
picosat_mod_init(L);
#ifdef COVERAGE
interpreter_load_coverage(result);
......
......@@ -154,11 +154,6 @@ enum log_level log_level_get(const char *level) {
return LL_UNKNOWN;
}
static const char *type_string[] = {
[LST_PKG_SCRIPT] = "pkg-script",
[LST_HOOK] = "hook"
};
// log_subproc cookie
struct c_log_subproc {
bool err; // Is this out or err
......@@ -168,7 +163,7 @@ struct c_log_subproc {
static ssize_t c_log_subproc_write(void *cookie, const char *buf, size_t size) {
struct c_log_subproc *cls = (struct c_log_subproc*)cookie;
size_t len = size;
if (would_log(LL_INFO))
if (would_log(cls->lsp->type == LST_USIGN ? LL_DBG : LL_INFO))
len = fwrite(buf, sizeof(char), size, cls->err ? stderr : stdout);
// This is memory buffer so there should be no problem to match system output
ASSERT(fwrite(buf, sizeof(char), len, cls->lsp->buffer.f) == len);
......@@ -203,7 +198,10 @@ void log_subproc_open(struct log_subproc *lsp, enum log_subproc_type type, const
cls->lsp = lsp;
lsp->err = fopencookie(cls, "w", fncs);
// Print info
INFO("%s", message);
if (type == LST_USIGN)
DBG("%s", message);
else
INFO("%s", message);
}
void log_subproc_close(struct log_subproc *lsp, char **output) {
......
......@@ -19,7 +19,9 @@
#ifndef UPDATER_LOGGING_H
#define UPDATER_LOGGING_H
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
......@@ -107,7 +109,8 @@ void setup_logging(enum log_level tty, enum log_level syslog);
// afterward.
enum log_subproc_type {
LST_PKG_SCRIPT, // This is post/pre install/rm script
LST_HOOK // This is updater's hook
LST_HOOK, // This is updater's hook
LST_USIGN // This is usign executable (this has explicitly lower logging level set to DBG)
};
struct log_subproc {
......
......@@ -99,17 +99,18 @@ Function `subprocess` is defined as follows:
predefined constants are as follows:
- `LST_PKG_SCRIPT` Any script provided by package (pre/post inst/rm)
- `LST_HOOK` Hook script executed on some updater state
- `LST_USIGN` usign binary used for signature validation
* `message` is string describing what this subprocess is to user. It's human
readable description of executed command.
* `timeout` is time in seconds after which subprocess will be automatically
* `timeout` is time in milliseconds after which subprocess will be automatically
killed.
* `callback` is optional function that would be called in subprocess just before
It executes given command. If you don't want to specify it then you can pass nil
Or you can just drop it out (in that case command is expeted on this argument
Place). This functions should has no arguments and shouldn't return anything.
Place). This functions should have no arguments and shouldn't return anything.
* `command` is any arbitrary number of string arguments that are passed as command
and its additional arguments.
......
/*
* Copyright 2019, CZ.NIC z.s.p.o. (http://www.nic.cz/)
*
* This file is part of the turris updater.
*
* Updater is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Updater is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Updater. If not, see <http://www.gnu.org/licenses/>.
*/
#define _GNU_SOURCE
#include "multiwrite.h"
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <errno.h>
void mwrite_init(struct mwrite* mw) {
memset(mw, 0, sizeof *mw);
}
bool mwrite_add(struct mwrite *mw, int fd) {
if (fd == -1) // open failed (errno is set by open)
return false;
mw->count++;
mw->fds = realloc(mw->fds, mw->count * sizeof *mw->fds);
mw->fds[mw->count - 1] = fd;
return true;
}
bool mwrite_open(struct mwrite *mw, const char *pathname, int flags) {
int fd = open(pathname, flags, O_WRONLY);
return mwrite_add(mw, fd);
}
bool mwrite_mkstemp(struct mwrite *mw, char *template, int flags) {
int fd = mkostemp(template, flags);
return mwrite_add(mw, fd);
}
enum mwrite_result mwrite_write(struct mwrite *mw, const void *buf, size_t count) {
for (size_t i = 0; i < mw->count; i++) {
const void *lbuf = buf;
size_t tow = count;
do {
int ret = write(mw->fds[i], lbuf, tow);
if (ret < 0) {
if (errno != EINTR)
continue; // just try again
else
return MWRITE_R_STD_ERROR;
}
if (ret == 0)
return MWRITE_R_UNABLE_TO_WRITE;
tow -= ret;
} while (tow > 0);
}
return MWRITE_R_OK;
}
enum mwrite_result mwrite_str_write(struct mwrite *mw, const char *str) {
return mwrite_write(mw, str, strlen(str) * sizeof *str);
}
bool mwrite_close(struct mwrite *mw) {
for (size_t i = 0; i < mw->count; i++) {
int res;
while ((res = close(mw->fds[i])) != 0 && errno == EINTR);
if (res)
return false;
}
free(mw->fds);
mwrite_init(mw);
return true;
}
/*
* Copyright 2019, CZ.NIC z.s.p.o. (http://www.nic.cz/)
*
* This file is part of the Turris Updater.
*
* Updater is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Updater is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Updater. If not, see <http://www.gnu.org/licenses/>.
*/
/* This implements a way to write to multiple files at once. It is not optimal in
* any way. It opens FD for every file and writes data in loop. There seems to be
* no existing approach on how to open multiple files and write to them all at
* once (something like having multiple files under one file descriptor). If there
* is such API or approach possible then this code should be dropped and all usage
* should be replaced with given API.
*/
#ifndef UPDATER_MULTIWRITE_H
#define UPDATER_MULTIWRITE_H
#include <stdlib.h>
#include <stdbool.h>
// MultiWrite handler
struct mwrite {
size_t count;
int *fds;
};
// Result of mwrite_write function
enum mwrite_result {
MWRITE_R_OK = 0, // Write was successful
MWRITE_R_STD_ERROR, // There was an standard error (use errno)
MWRITE_R_UNABLE_TO_WRITE, // Write is unable to proceed (zero bytes written)
};
// Handler initialization function. Please call this before any other function.
void mwrite_init(struct mwrite*);
// Open pathname for writing. All subsequent calls to mwrite_write would write
// also to this file if open is successful.
// You can provide additional flags. These flags are same as in case of open.
// It returns false if error occurred (in such case errno is set), otherwise true
// is returned.
bool mwrite_open(struct mwrite*, const char *pathname, int flags);
// This is same as mwrite_open but instead of using open it uses mkostemp to open
// file descriptor.
bool mwrite_mkstemp(struct mwrite*, char *template, int flags);
// Write data to mwrite
// This is pretty much same as standard write. The only difference is that this
// implementation always writes all provided data unless error is detected.
// This returns MWRITE_R_OK if write was successful. MWRITE_R_STD_ERROR is
// returned when standard error is detected and MWRITE_R_UNABLE_TO_WRITE is
// returned if write is unable to proceed (probably because of not enough space).
// Note that if error is detected that some writes can be completed and others
// might not be. This means that on error there are no guaranties on state of all
// written files.
enum mwrite_result mwrite_write(struct mwrite*, const void *buf, size_t count);
// Same as mwrite_write but calculates size of string using strlen.
enum mwrite_result mwrite_str_write(struct mwrite*, const char *str);
// Close all previously opened files. This effectively returns handler to same
// state as it is after mwrite_init call.
// Returns false if error occurred (in such case errno is set), otherwise true is
// returned. Note that on error not all file descriptors are closed and that there
// is currently no recovery way. You should exit program instead.
bool mwrite_close(struct mwrite*);
#endif
......@@ -193,7 +193,6 @@ int vsubprocvoc(int timeout, FILE *fd[2], subproc_callback callback, void *data,
const char *argv[argc];
size_t i = 0;
while((argv[i++] = va_arg(args, const char *)) != NULL);
argv[argc - 1] = NULL;
return subprocloc(timeout, fd, callback, data, cmd, argv);
}
......@@ -227,7 +226,7 @@ int lsubproclc(enum log_subproc_type type, const char *message, char **output, i
}
int lvsubprocv(enum log_subproc_type type, const char *message, char **output, int timeout, const char *cmd, va_list args) {
return lsubprocvc(type, message, output, timeout, NULL, NULL, cmd, args);
return lvsubprocvc(type, message, output, timeout, NULL, NULL, cmd, args);
}
int lvsubprocvc(enum log_subproc_type type, const char *message, char **output, int timeout, subproc_callback callback, void *data, const char *cmd, va_list args) {
......
......@@ -19,7 +19,9 @@
#ifndef UPDATER_SUBPROCESS_H
#define UPDATER_SUBPROCESS_H
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdarg.h>
#include <stdio.h>
#include "logging.h"
......
/*
* Copyright 2019, CZ.NIC z.s.p.o. (http://www.nic.cz/)
*
* This file is part of the Turris Updater.
*
* Updater is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Updater is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Updater. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include "syscnf.h"
#include "util.h"
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <pwd.h>
#include <regex.h>
#include <uthash.h>
#include <lauxlib.h>
#include <lualib.h>
#include "logging.h"
#include "inject.h"
enum e_paths {
P_ROOT_DIR,
P_FILE_STATUS,
P_DIR_INFO,
P_DIR_PKG_UNPACKED,
P_DIR_PKG_DOWNLOAD,
P_DIR_OPKG_COLLIDED,
P_LAST
};
static const char* const default_paths[] = {
[P_ROOT_DIR] = "/",
[P_FILE_STATUS] = "/usr/lib/opkg/status",
[P_DIR_INFO] = "/usr/lib/opkg/info/",
[P_DIR_PKG_UNPACKED] = "/usr/share/updater/unpacked/",
[P_DIR_PKG_DOWNLOAD] = "/usr/share/updater/download/",
[P_DIR_OPKG_COLLIDED] = "/usr/share/updater/collided/",
};
static char* paths[] = {
[P_ROOT_DIR] = NULL,
[P_FILE_STATUS] = NULL,
[P_DIR_INFO] = NULL,
[P_DIR_PKG_UNPACKED] = NULL,
[P_DIR_PKG_DOWNLOAD] = NULL,
[P_DIR_OPKG_COLLIDED] = NULL,
};
struct os_release_data {
char *field;
char *content;
UT_hash_handle hh;
};
static struct os_release_data *osr = NULL;
static struct os_release_data *osr_host = NULL;
void set_path(enum e_paths tp, const char *value) {
if (paths[tp])
free(paths[tp]);
if (value)
asprintf(&paths[tp], "%s%s", value, default_paths[tp]);
else
paths[tp] = NULL;
}
void set_root_dir(const char *root) {
char *pth = NULL;
if (root) {
if (root[0] == '/')
pth = aprintf("%s", root);
else if (root[0] == '~' && root[1] == '/') {
struct passwd *pw = getpwuid(getuid());
pth = aprintf("%s%s", pw->pw_dir, root + 1);
} else {
char *cwd = getcwd(NULL, 0);
pth = aprintf("%s/%s", cwd, root);
free(cwd);
}
size_t last = strlen(pth) - 1;
while (last > 0 && pth[last] == '/')
pth[last--] = '\0';
}
set_path(P_ROOT_DIR, pth);
set_path(P_FILE_STATUS, pth);
set_path(P_DIR_INFO, pth);
set_path(P_DIR_PKG_UNPACKED, pth);
set_path(P_DIR_PKG_DOWNLOAD, pth);
set_path(P_DIR_OPKG_COLLIDED, pth);
TRACE(