Commit ec2b52ff authored by Pavel Spirek's avatar Pavel Spirek

Another code cleanup, NACM bugfixes, version bump

parent 42980f58
PROJECT = jetconf PROJECT = jetconf
VERSION = 0.3.0 VERSION = 0.3.1
.PHONY = tags deps install-deps test .PHONY = tags deps install-deps test
tags: tags:
......
...@@ -4,7 +4,7 @@ import getopt ...@@ -4,7 +4,7 @@ import getopt
import logging import logging
import sys import sys
from pkg_resources import get_distribution from pkg_resources import get_distribution, DistributionNotFound
from colorlog import error, info from colorlog import error, info
from . import config, jetconf from . import config, jetconf
...@@ -25,7 +25,10 @@ def main(): ...@@ -25,7 +25,10 @@ def main():
sys.exit(1) sys.exit(1)
# Get Jetconf version # Get Jetconf version
jetconf_version = get_distribution("jetconf").version try:
jetconf_version = get_distribution("jetconf").version
except DistributionNotFound:
jetconf_version = "(not found)"
# Parse command line arguments # Parse command line arguments
config_file = "config.yaml" config_file = "config.yaml"
......
...@@ -86,9 +86,8 @@ class DataChange: ...@@ -86,9 +86,8 @@ class DataChange:
class UsrChangeJournal: class UsrChangeJournal:
def __init__(self, root_origin: InstanceNode, transaction_opts: Optional[JsonNodeT]): def __init__(self, root_origin: InstanceNode):
self._root_origin = root_origin self._root_origin = root_origin
self._transaction_opts = transaction_opts
self._journal = [] # type: List[DataChange] self._journal = [] # type: List[DataChange]
def get_root_head(self) -> InstanceNode: def get_root_head(self) -> InstanceNode:
...@@ -156,7 +155,7 @@ class UsrChangeJournal: ...@@ -156,7 +155,7 @@ class UsrChangeJournal:
# Call commit begin hook # Call commit begin hook
begin_hook_failed = False begin_hook_failed = False
try: try:
ds.commit_begin_callback(self._transaction_opts) ds.commit_begin_callback()
except Exception as e: except Exception as e:
error("Exception occured in commit_begin handler: {}".format(epretty(e))) error("Exception occured in commit_begin handler: {}".format(epretty(e)))
begin_hook_failed = True begin_hook_failed = True
...@@ -177,7 +176,7 @@ class UsrChangeJournal: ...@@ -177,7 +176,7 @@ class UsrChangeJournal:
end_hook_abort_failed = False end_hook_abort_failed = False
if not (begin_hook_failed or conf_handler_failed): if not (begin_hook_failed or conf_handler_failed):
try: try:
ds.commit_end_callback(self._transaction_opts, failed=False) ds.commit_end_callback(failed=False)
except Exception as e: except Exception as e:
error("Exception occured in commit_end handler: {}".format(epretty(e))) error("Exception occured in commit_end handler: {}".format(epretty(e)))
end_hook_failed = True end_hook_failed = True
...@@ -185,7 +184,7 @@ class UsrChangeJournal: ...@@ -185,7 +184,7 @@ class UsrChangeJournal:
if begin_hook_failed or conf_handler_failed or end_hook_failed: if begin_hook_failed or conf_handler_failed or end_hook_failed:
try: try:
# Call commit_end callback again with "failed" argument set to True # Call commit_end callback again with "failed" argument set to True
ds.commit_end_callback(self._transaction_opts, failed=True) ds.commit_end_callback(failed=True)
except Exception as e: except Exception as e:
error("Exception occured in commit_end handler (abort): {}".format(epretty(e))) error("Exception occured in commit_end handler (abort): {}".format(epretty(e)))
end_hook_abort_failed = True end_hook_abort_failed = True
...@@ -205,10 +204,6 @@ class UsrChangeJournal: ...@@ -205,10 +204,6 @@ class UsrChangeJournal:
class BaseDatastore: class BaseDatastore:
def __init__(self, dm: DataModel, with_nacm: bool=False): def __init__(self, dm: DataModel, with_nacm: bool=False):
def _blankfn(*args, **kwargs):
pass
self.name = ""
self.nacm = None # type: NacmConfig self.nacm = None # type: NacmConfig
self._data = None # type: InstanceNode self._data = None # type: InstanceNode
self._data_history = [] # type: List[InstanceNode] self._data_history = [] # type: List[InstanceNode]
...@@ -217,6 +212,10 @@ class BaseDatastore: ...@@ -217,6 +212,10 @@ class BaseDatastore:
self._data_lock = Lock() self._data_lock = Lock()
self._lock_username = None # type: str self._lock_username = None # type: str
self._usr_journals = {} # type: Dict[str, UsrChangeJournal] self._usr_journals = {} # type: Dict[str, UsrChangeJournal]
def _blankfn(*args, **kwargs):
pass
self.commit_begin_callback = _blankfn # type: Callable[..., bool] self.commit_begin_callback = _blankfn # type: Callable[..., bool]
self.commit_end_callback = _blankfn # type: Callable[..., bool] self.commit_end_callback = _blankfn # type: Callable[..., bool]
...@@ -236,15 +235,17 @@ class BaseDatastore: ...@@ -236,15 +235,17 @@ class BaseDatastore:
else: else:
return self._data return self._data
# Returns the root node of YANG library data tree
def get_yl_data_root(self) -> InstanceNode: def get_yl_data_root(self) -> InstanceNode:
return self._yang_lib_data return self._yang_lib_data
# Journal manipulation methods
def make_user_journal(self, username: str, transaction_opts: Optional[JsonNodeT]): def make_user_journal(self, username: str, transaction_opts: Optional[JsonNodeT]):
usr_journal = self._usr_journals.get(username) usr_journal = self._usr_journals.get(username)
if usr_journal is not None: if usr_journal is not None:
raise StagingDataException("Transaction for user \"{}\" already opened".format(username)) raise StagingDataException("Transaction for user \"{}\" already opened".format(username))
else: else:
self._usr_journals[username] = UsrChangeJournal(self._data, transaction_opts) self._usr_journals[username] = UsrChangeJournal(self._data)
def get_user_journal(self, username: str): def get_user_journal(self, username: str):
usr_journal = self._usr_journals.get(username) usr_journal = self._usr_journals.get(username)
...@@ -260,9 +261,15 @@ class BaseDatastore: ...@@ -260,9 +261,15 @@ class BaseDatastore:
else: else:
raise StagingDataException("Transaction for user \"{}\" not opened".format(username)) raise StagingDataException("Transaction for user \"{}\" not opened".format(username))
# Returns the root node of data tree # Returns the root node of staging data tree (starts a new transaction if nonexistent)
def get_data_root_staging(self, username: str) -> InstanceNode: def get_data_root_staging(self, username: str) -> InstanceNode:
usr_journal = self.get_user_journal(username) try:
usr_journal = self.get_user_journal(username)
except StagingDataException:
info("Starting new transaction for user \"{}\"".format(username))
self.make_user_journal(username, None)
usr_journal = self.get_user_journal(username)
root = usr_journal.get_root_head() root = usr_journal.get_root_head()
return root return root
...@@ -293,7 +300,7 @@ class BaseDatastore: ...@@ -293,7 +300,7 @@ class BaseDatastore:
debug_data("Cannot find schema node for " + sch_pth) debug_data("Cannot find schema node for " + sch_pth)
return sn return sn
# Notify data observers about change in datastore # Run configuration data handlers
def run_conf_edit_handler(self, ii: InstanceRoute, ch: DataChange): def run_conf_edit_handler(self, ii: InstanceRoute, ch: DataChange):
sch_pth_list = list(filter(lambda n: isinstance(n, MemberName), ii)) sch_pth_list = list(filter(lambda n: isinstance(n, MemberName), ii))
...@@ -350,13 +357,7 @@ class BaseDatastore: ...@@ -350,13 +357,7 @@ class BaseDatastore:
ii = self.parse_ii(rpc.path, rpc.path_format) ii = self.parse_ii(rpc.path, rpc.path_format)
if staging: if staging:
try: root = self.get_data_root_staging(rpc.username)
root = self.get_data_root_staging(rpc.username)
except StagingDataException:
# root = self._data
info("Starting transaction for user \"{}\"".format(rpc.username))
self.make_user_journal(rpc.username, None)
root = self.get_data_root_staging(rpc.username)
else: else:
root = self._data root = self._data
...@@ -420,6 +421,8 @@ class BaseDatastore: ...@@ -420,6 +421,8 @@ class BaseDatastore:
else: else:
state_handler_val = sdh.generate_item(ii, rpc.username, staging) state_handler_val = sdh.generate_item(ii, rpc.username, staging)
state_root_n = sdh.schema_node.orphan_entry(state_handler_val) state_root_n = sdh.schema_node.orphan_entry(state_handler_val)
else:
state_root_n = None
# Select desired subnode from handler-generated content # Select desired subnode from handler-generated content
ii_prefix, ii_rel = sdh.schema_node.split_instance_route(ii) ii_prefix, ii_rel = sdh.schema_node.split_instance_route(ii)
...@@ -439,13 +442,15 @@ class BaseDatastore: ...@@ -439,13 +442,15 @@ class BaseDatastore:
if isinstance(node.value, ObjectValue): if isinstance(node.value, ObjectValue):
if node.schema_node is state_root_sn.parent: if node.schema_node is state_root_sn.parent:
ii_gen = DataHelpers.node_get_ii(node) ii_gen = DataHelpers.node_get_ii(node)
sdh = STATE_DATA_HANDLES.get_handler(state_root_sch_pth) _sdh = STATE_DATA_HANDLES.get_handler(state_root_sch_pth)
if sdh is not None: if _sdh is not None:
try: try:
if isinstance(sdh, StateDataContainerHandler): if isinstance(_sdh, StateDataContainerHandler):
state_handler_val = sdh.generate_node(ii_gen, rpc.username, staging) _state_handler_val = _sdh.generate_node(ii_gen, rpc.username, staging)
elif isinstance(sdh, StateDataListHandler): elif isinstance(_sdh, StateDataListHandler):
state_handler_val = sdh.generate_list(ii_gen, rpc.username, staging) _state_handler_val = _sdh.generate_list(ii_gen, rpc.username, staging)
else:
_state_handler_val = None
except Exception as e: except Exception as e:
error("Error occured in state data generator (sn: {})".format(state_root_sch_pth)) error("Error occured in state data generator (sn: {})".format(state_root_sch_pth))
error(epretty(e)) error(epretty(e))
...@@ -457,7 +462,7 @@ class BaseDatastore: ...@@ -457,7 +462,7 @@ class BaseDatastore:
nm_name = state_root_sn.qual_name[1] + ":" + state_root_sn.qual_name[0] nm_name = state_root_sn.qual_name[1] + ":" + state_root_sn.qual_name[0]
# print("nm={}".format(nm_name)) # print("nm={}".format(nm_name))
node = node.put_member(nm_name, state_handler_val, raw=True).up() node = node.put_member(nm_name, _state_handler_val, raw=True).up()
else: else:
for key in node: for key in node:
member = node[key] member = node[key]
...@@ -813,16 +818,15 @@ class BaseDatastore: ...@@ -813,16 +818,15 @@ class BaseDatastore:
else: else:
raise NoHandlerError("No active changelist for user \"{}\"".format(rpc.username)) raise NoHandlerError("No active changelist for user \"{}\"".format(rpc.username))
# Locks datastore data # Lock datastore data
def lock_data(self, username: str = None, blocking: bool=True): def lock_data(self, username: str = None, blocking: bool=True):
ret = self._data_lock.acquire(blocking=blocking, timeout=1) ret = self._data_lock.acquire(blocking=blocking, timeout=1)
if ret: if ret:
self._lock_username = username or "(unknown)" self._lock_username = username or "(unknown)"
debug_data("Acquired lock in datastore \"{}\" for user \"{}\"".format(self.name, username)) debug_data("Acquired datastore lock for user \"{}\"".format(username))
else: else:
raise DataLockError( raise DataLockError(
"Failed to acquire lock in datastore \"{}\" for user \"{}\", already locked by \"{}\"".format( "Failed to acquire datastore lock for user \"{}\", already locked by \"{}\"".format(
self.name,
username, username,
self._lock_username self._lock_username
) )
...@@ -831,7 +835,7 @@ class BaseDatastore: ...@@ -831,7 +835,7 @@ class BaseDatastore:
# Unlock datastore data # Unlock datastore data
def unlock_data(self): def unlock_data(self):
self._data_lock.release() self._data_lock.release()
debug_data("Released lock in datastore \"{}\" for user \"{}\"".format(self.name, self._lock_username)) debug_data("Released datastore lockfor user \"{}\"".format(self._lock_username))
self._lock_username = None self._lock_username = None
# Load data from persistent storage # Load data from persistent storage
......
...@@ -392,12 +392,7 @@ def _post(ds: BaseDatastore, pth: str, username: str, data: str) -> HttpResponse ...@@ -392,12 +392,7 @@ def _post(ds: BaseDatastore, pth: str, username: str, data: str) -> HttpResponse
ds.lock_data(username) ds.lock_data(username)
try: try:
try: staging_root = ds.get_data_root_staging(rpc1.username)
staging_root = ds.get_data_root_staging(rpc1.username)
except StagingDataException:
info("Starting transaction for user \"{}\"".format(rpc1.username))
ds.make_user_journal(rpc1.username, None)
staging_root = ds.get_data_root_staging(rpc1.username)
new_root = ds.create_node_rpc(staging_root, rpc1, json_data) new_root = ds.create_node_rpc(staging_root, rpc1, json_data)
ds.add_to_journal_rpc(ChangeType.CREATE, rpc1, json_data, *new_root) ds.add_to_journal_rpc(ChangeType.CREATE, rpc1, json_data, *new_root)
http_resp = HttpResponse.empty(HttpStatus.Created) http_resp = HttpResponse.empty(HttpStatus.Created)
...@@ -490,12 +485,7 @@ def _put(ds: BaseDatastore, pth: str, username: str, data: str) -> HttpResponse: ...@@ -490,12 +485,7 @@ def _put(ds: BaseDatastore, pth: str, username: str, data: str) -> HttpResponse:
ds.lock_data(username) ds.lock_data(username)
try: try:
try: staging_root = ds.get_data_root_staging(rpc1.username)
staging_root = ds.get_data_root_staging(rpc1.username)
except StagingDataException:
info("Starting transaction for user \"{}\"".format(rpc1.username))
ds.make_user_journal(rpc1.username, None)
staging_root = ds.get_data_root_staging(rpc1.username)
new_root = ds.update_node_rpc(staging_root, rpc1, json_data) new_root = ds.update_node_rpc(staging_root, rpc1, json_data)
ds.add_to_journal_rpc(ChangeType.REPLACE, rpc1, json_data, *new_root) ds.add_to_journal_rpc(ChangeType.REPLACE, rpc1, json_data, *new_root)
http_resp = HttpResponse.empty(HttpStatus.NoContent, status_in_body=False) http_resp = HttpResponse.empty(HttpStatus.NoContent, status_in_body=False)
...@@ -568,12 +558,7 @@ def _delete(ds: BaseDatastore, pth: str, username: str) -> HttpResponse: ...@@ -568,12 +558,7 @@ def _delete(ds: BaseDatastore, pth: str, username: str) -> HttpResponse:
ds.lock_data(username) ds.lock_data(username)
try: try:
try: staging_root = ds.get_data_root_staging(rpc1.username)
staging_root = ds.get_data_root_staging(rpc1.username)
except StagingDataException:
info("Starting transaction for user \"{}\"".format(rpc1.username))
ds.make_user_journal(rpc1.username, None)
staging_root = ds.get_data_root_staging(rpc1.username)
new_root = ds.delete_node_rpc(staging_root, rpc1) new_root = ds.delete_node_rpc(staging_root, rpc1)
ds.add_to_journal_rpc(ChangeType.DELETE, rpc1, None, *new_root) ds.add_to_journal_rpc(ChangeType.DELETE, rpc1, None, *new_root)
http_resp = HttpResponse.empty(HttpStatus.NoContent, status_in_body=False) http_resp = HttpResponse.empty(HttpStatus.NoContent, status_in_body=False)
......
...@@ -3,7 +3,7 @@ from io import StringIO ...@@ -3,7 +3,7 @@ from io import StringIO
from threading import Lock from threading import Lock
from enum import Enum from enum import Enum
from colorlog import error, warning as warn, info from colorlog import error, info
from typing import List, Set, Optional from typing import List, Set, Optional
from yangson.datamodel import DataModel from yangson.datamodel import DataModel
...@@ -19,7 +19,6 @@ from yangson.instance import ( ...@@ -19,7 +19,6 @@ from yangson.instance import (
) )
from .helpers import DataHelpers, ErrorHelpers, LogHelpers from .helpers import DataHelpers, ErrorHelpers, LogHelpers
from .errors import JetconfError
epretty = ErrorHelpers.epretty epretty = ErrorHelpers.epretty
debug_nacm = LogHelpers.create_module_dbg_logger(__name__) debug_nacm = LogHelpers.create_module_dbg_logger(__name__)
...@@ -45,9 +44,6 @@ class NacmRuleType(Enum): ...@@ -45,9 +44,6 @@ class NacmRuleType(Enum):
NACM_RULE_DATA = 3 NACM_RULE_DATA = 3
class NacmGroup: class NacmGroup:
def __init__(self, name: str, users: List[str]): def __init__(self, name: str, users: List[str]):
self.name = name self.name = name
...@@ -149,7 +145,7 @@ class DataRuleTree: ...@@ -149,7 +145,7 @@ class DataRuleTree:
else: else:
self._print_rule_tree(io_str, rule_node.children, depth + 1, vbars + [depth]) self._print_rule_tree(io_str, rule_node.children, depth + 1, vbars + [depth])
def print_rule_tree(self) -> str: def __str__(self) -> str:
io_str = StringIO() io_str = StringIO()
io_str.write("----- NACM Data Rule tree -----\n") io_str.write("----- NACM Data Rule tree -----\n")
self._print_rule_tree(io_str, self.root, 0, []) self._print_rule_tree(io_str, self.root, 0, [])
...@@ -180,14 +176,17 @@ class NacmConfig: ...@@ -180,14 +176,17 @@ class NacmConfig:
self.nacm_groups = [] self.nacm_groups = []
self.rule_lists = [] self.rule_lists = []
self._user_nacm_rpc = {} self._user_nacm_rpc = {}
self.enabled = False
try: try:
nacm_json = self.nacm_ds.get_data_root()["ietf-netconf-acm:nacm"].value nacm_n = self.nacm_ds.get_data_root()["ietf-netconf-acm:nacm"]
except NonexistentInstance: except NonexistentInstance:
warn("Data does not contain \"ietf-netconf-acm:nacm\" node, NACM rules will be empty") debug_nacm("Data does not contain \"/ietf-netconf-acm:nacm\" branch, NACM will not be enabled")
return return
nacm_json = nacm_n.add_defaults().value
self.enabled = nacm_json["enable-nacm"] self.enabled = nacm_json["enable-nacm"]
if not self.enabled: if not self.enabled:
# NACM not enabled, no need to continue # NACM not enabled, no need to continue
self.internal_data_lock.release() self.internal_data_lock.release()
...@@ -197,15 +196,15 @@ class NacmConfig: ...@@ -197,15 +196,15 @@ class NacmConfig:
self.default_write = Action.PERMIT if nacm_json["write-default"] == "permit" else Action.DENY self.default_write = Action.PERMIT if nacm_json["write-default"] == "permit" else Action.DENY
self.default_exec = Action.PERMIT if nacm_json["exec-default"] == "permit" else Action.DENY self.default_exec = Action.PERMIT if nacm_json["exec-default"] == "permit" else Action.DENY
for group in nacm_json["groups"]["group"]: for group in nacm_json.get("groups", {}).get("group", []):
self.nacm_groups.append(NacmGroup(group["name"], group["user-name"])) self.nacm_groups.append(NacmGroup(group["name"], group["user-name"]))
for rule_list_json in nacm_json["rule-list"]: for rule_list_json in nacm_json.get("rule-list", []):
rl = NacmRuleList() rl = NacmRuleList()
rl.name = rule_list_json["name"] rl.name = rule_list_json["name"]
rl.groups = rule_list_json["group"] rl.groups = rule_list_json["group"]
for rule_json in rule_list_json["rule"]: for rule_json in rule_list_json.get("rule", []):
rule = NacmRule() rule = NacmRule()
rule.name = rule_json.get("name") rule.name = rule_json.get("name")
rule.comment = rule_json.get("comment") rule.comment = rule_json.get("comment")
...@@ -216,15 +215,14 @@ class NacmConfig: ...@@ -216,15 +215,14 @@ class NacmConfig:
if isinstance(access_perm_list, str) and (access_perm_list == "*"): if isinstance(access_perm_list, str) and (access_perm_list == "*"):
rule.access = set(Permission) rule.access = set(Permission)
elif isinstance(access_perm_list, collections.Iterable): elif isinstance(access_perm_list, collections.Iterable):
def perm_str2enum(perm_str: str): perm_str2enum = {
return { "read": Permission.NACM_ACCESS_READ,
"read": Permission.NACM_ACCESS_READ, "create": Permission.NACM_ACCESS_CREATE,
"create": Permission.NACM_ACCESS_CREATE, "update": Permission.NACM_ACCESS_UPDATE,
"update": Permission.NACM_ACCESS_UPDATE, "delete": Permission.NACM_ACCESS_DELETE,
"delete": Permission.NACM_ACCESS_DELETE, "exec": Permission.NACM_ACCESS_EXEC,
"exec": Permission.NACM_ACCESS_EXEC, }
}.get(perm_str) rule.access.update(map(lambda x: perm_str2enum[x], access_perm_list))
rule.access.update(map(perm_str2enum, access_perm_list))
if rule_json.get("rpc-name") is not None: if rule_json.get("rpc-name") is not None:
if rule.type != NacmRuleType.NACM_RULE_NOTSET: if rule.type != NacmRuleType.NACM_RULE_NOTSET:
...@@ -298,7 +296,7 @@ class UserRuleSet: ...@@ -298,7 +296,7 @@ class UserRuleSet:
self.rule_lists = list(filter(lambda x: (set(user_groups_names) & set(x.groups)), config.rule_lists)) self.rule_lists = list(filter(lambda x: (set(user_groups_names) & set(x.groups)), config.rule_lists))
self.rule_tree = DataRuleTree(dm, self.rule_lists) self.rule_tree = DataRuleTree(dm, self.rule_lists)
debug_nacm("Rule tree for user \"{}\":\n{}".format(username, self.rule_tree.print_rule_tree())) debug_nacm("Rule tree for user \"{}\":\n{}".format(username, str(self.rule_tree)))
def check_data_node_permission(self, root: InstanceNode, ii: InstanceRoute, access: Permission) -> Action: def check_data_node_permission(self, root: InstanceNode, ii: InstanceRoute, access: Permission) -> Action:
if not self.nacm_enabled: if not self.nacm_enabled:
......
...@@ -8,17 +8,6 @@ class OpHandlersContainer: ...@@ -8,17 +8,6 @@ class OpHandlersContainer:
def __init__(self, ds: BaseDatastore): def __init__(self, ds: BaseDatastore):
self.ds = ds self.ds = ds
def jetconf_conf_start(self, rpc: RpcInfo) -> JsonNodeT:
# try:
# transaction_opts = rpc.op_input_args["options"]
# except (TypeError, KeyError):
# transaction_opts = None
# self.ds.make_user_journal(rpc.username, transaction_opts)
self.ds.make_user_journal(rpc.username, None)
ret_data = {"status": "OK"}
return ret_data
def jetconf_conf_status(self, rpc: RpcInfo) -> JsonNodeT: def jetconf_conf_status(self, rpc: RpcInfo) -> JsonNodeT:
try: try:
usr_journal = self.ds.get_user_journal(rpc.username) usr_journal = self.ds.get_user_journal(rpc.username)
...@@ -97,7 +86,6 @@ class OpHandlersContainer: ...@@ -97,7 +86,6 @@ class OpHandlersContainer:
def register_op_handlers(ds: BaseDatastore): def register_op_handlers(ds: BaseDatastore):
op_handlers_obj = OpHandlersContainer(ds) op_handlers_obj = OpHandlersContainer(ds)
# OP_HANDLERS.register(op_handlers_obj.jetconf_conf_start, "jetconf:conf-start")
OP_HANDLERS.register(op_handlers_obj.jetconf_conf_status, "jetconf:conf-status") OP_HANDLERS.register(op_handlers_obj.jetconf_conf_status, "jetconf:conf-status")
OP_HANDLERS.register(op_handlers_obj.jetconf_conf_reset, "jetconf:conf-reset") OP_HANDLERS.register(op_handlers_obj.jetconf_conf_reset, "jetconf:conf-reset")
OP_HANDLERS.register(op_handlers_obj.jetconf_conf_commit, "jetconf:conf-commit") OP_HANDLERS.register(op_handlers_obj.jetconf_conf_commit, "jetconf:conf-commit")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment