#!/usr/bin/env python3
from argparse import (
    ArgumentParser,
    RawTextHelpFormatter,
)
from base64 import b64decode
from collections import namedtuple
from configparser import ConfigParser
from contextlib import contextmanager
from datetime import datetime
from enum import Enum
from hashlib import sha256
from itertools import chain
from os import (
    environ,
    execlp,
    getuid,
)
from pathlib import Path
from pwd import getpwuid
from re import (
    compile,
    match,
    search,
    sub,
)
from shutil import (
    copy,
    rmtree,
)
from signal import (
    Signals,
    getsignal,
    signal,
)
from socket import getfqdn
from stat import (
    S_IRUSR,
    S_IXUSR,
)
from subprocess import (
    CalledProcessError,
    CompletedProcess,
    run,
)
from sys import (
    exit,
    stderr,
    version_info,
)
from tarfile import open as taropen
from tempfile import mkdtemp

from yaml import (
    dump,
    full_load,
)

from verify_dca import verify_checksum

__version__ = (2, 15, 1)


def printflush(*args, **kwargs):
    kwargs.pop('flush', None)
    kwargs['flush'] = True
    print(*args, **kwargs)


def error(msg, code=None):
    printflush(msg, file=stderr)
    if code is not None:
        exit(code)


def die(msg, error_code=1):
    if isinstance(msg, Exception) and environ.get('CAPP_DEBUG', None):
        from traceback import print_exc
        print_exc()
    error(str(msg), error_code)


def check_python():
    MIN_MAJ_VER = 3
    MIN_MIN_VER = 7
    if version_info.major < MIN_MAJ_VER or version_info.minor < MIN_MIN_VER:
        die("Python3 version %d.%d minimum required" % (MIN_MAJ_VER, MIN_MIN_VER))


class PositionalFirstHelpFormatter(RawTextHelpFormatter):
    def _format_actions_usage(self, actions, groups):
        # actions are optionals + positionals, opposite is wanted
        actions = [a for a in actions if not a.option_strings] + [a for a in actions if a.option_strings]
        return super()._format_actions_usage(actions, groups)


@contextmanager
def trap(handler, signals=(Signals.SIGTERM, Signals.SIGINT)):
    prevs = {}
    for s in signals:
        prevs[s] = getsignal(s)
        signal(s, handler)
    yield None
    for s, s_handler in prevs.items():
        if s_handler is not None:
            signal(s, s_handler)


@contextmanager
def temp_dir():
    tmpd = Path(mkdtemp())

    def rm_tmpd():
        rmtree(tmpd, ignore_errors=True)
    with trap(rm_tmpd):
        try:
            yield tmpd
        finally:
            rm_tmpd()


def copy_tree(src, dst, ignored=None):
    if not dst.exists():
        dst.mkdir(parents=True, exist_ok=True)
    for item_src in src.iterdir():
        if ignored(item_src):
            continue
        item_dst = dst / item_src.name
        if item_src.is_dir():
            copy_tree(item_src, item_dst, ignored)
        else:
            copy(item_src, item_dst)


class EnumNames(Enum):
    @classmethod
    def names(cls):
        return cls.__members__.keys()


class Unit(Enum):
    B = 2 ** 0
    K = 2 ** 10
    M = 2 ** 20
    G = 2 ** 30

    def __rmul__(self, other):
        if isinstance(other, (int, float)):
            return other * self.value
        else:
            raise TypeError("Cannot do multiplication")

    def __rtruediv__(self, other):
        if isinstance(other, (int, float)):
            return other / self.value
        else:
            raise TypeError("Cannot do division")

    def __rfloordiv__(self, other):
        if isinstance(other, (int, float)):
            return other // self.value
        else:
            raise TypeError("Cannot do division")


Env = EnumNames('Env', ('dev', 'integ', 'staging', 'demo', 'prod'))
Right = EnumNames('Right', (
    'DCA_READ', 'DCA_WRITE', 'APP_LIST',
    'DEPLOY', 'START', 'STOP', 'STATUS', 'LOGS', 'EXEC', 'EXEC_USER',
    'USER_LIST', 'USER_ADD', 'USER_CHANGE', 'USER_DELETE',
    'PKEY_LIST', 'PKEY_ADD', 'PKEY_DELETE',
    'RIGHT_LIST', 'RIGHT_ADD', 'RIGHT_DELETE',
))


class CApp:
    def __init__(self):
        global run

        self.config = None
        docker_compose_version = run(['docker', 'compose', 'version'], capture_output=True, text=True)
        if docker_compose_version.returncode == 0 and 'Docker Compose' in docker_compose_version.stdout:
            self.dc = ['docker', 'compose']
        else:
            self.dc = ['docker-compose']
        self.dry_run = environ.get('CAPP_DRY_RUN', None) is not None
        if self.dry_run:
            Path('/tmp/capp').mkdir(parents=True, exist_ok=True)
            self.compose_dirs_conf_file = Path('/tmp/capp/compose-dirs.conf')
            with open(self.compose_dirs_conf_file, 'w', encoding='utf8') as f:
                f.write(f"compose_user={getpwuid(getuid()).pw_name}\ncompose_dir=/tmp/capp/compose\ndeps_file=deps\ntmpl_name=compose\n")
            Path('/tmp/capp/compose').mkdir(parents=True, exist_ok=True)
            Path('/tmp/capp/compose/deps').touch()
            self.capp_conf_file = Path('/tmp/capp/capp.conf')
            with open(self.capp_conf_file, 'w', encoding='utf8') as f:
                f.write("min_dca_version=2\nmax_mem_size=10G\nmax_proc=1000\ndefault_hostname=example.com\n")
            self.hooks_root_dir = Path('/tmp/capp/hooks.d')
            for a in ('deploy', 'undeploy'):
                for m in ('pre', 'post'):
                    (self.hooks_root_dir / f'{m}_{a}').mkdir(parents=True, exist_ok=True)
                    f = self.hooks_root_dir / f'{m}_{a}' / 'hook'
                    f.touch(mode=0o744)
            # lets’s encrypt black list file
            self.le_blacklist_file = Path('/tmp/capp/le_blacklist.txt')
            self.le_blacklist_file.touch()
            self.users_dir = Path('/tmp/capp/users')
            self.users_dir.mkdir(exist_ok=True)
            self.pubkeys_dir = Path('/tmp/capp/pubkeys')
            self.pubkeys_dir.mkdir(exist_ok=True)
            self.rights_dir = Path('/tmp/capp/rights')
            self.rights_dir.mkdir(exist_ok=True)
            self.dca_dir = Path('/tmp/capp/dca')
            self.dca_dir.mkdir(exist_ok=True)
            self.log_file = Path('/tmp/capp/capp.log')
            self.log_file.touch()
            real_run = run

            def mock_run(*args, **kwargs):
                if environ.get('CAPP_DEBUG', None):
                    print(f"run with args={args}, kwargs={kwargs}")
                if len(args[0]) > 0 and args[0][0] == 'systemd-escape':
                    return real_run(*args, **kwargs)
                elif len(args[0]) > 0 and args[0][0] == 'openssl':
                    return real_run(*args, **kwargs)
                elif len(args[0]) > 2 and args[0][0:1] == ['systemctl', 'status']:
                    return CompletedProcess((), 0, stdout='Active: active (running)')
                else:
                    return CompletedProcess((), 0)
            run = mock_run
        else:
            self.compose_dirs_conf_file = Path('/etc/compose-dirs.conf')
            self.capp_conf_file = Path('/etc/capp.conf')
            self.hooks_root_dir = Path('/etc/capp/hooks.d')
            # lets’s encrypt black list file
            self.le_blacklist_file = Path('/etc/capp/le_blacklist.txt')
            self.users_dir = Path('/etc/capp/users')
            self.pubkeys_dir = Path('/etc/capp/pubkeys')
            self.rights_dir = Path('/etc/capp/rights')
            self.dca_dir = Path('/home/deploy/dca')
            self.log_file = Path('/var/log/capp.log')

    def _append_usage_to_subcommands(self, sps):
        for name, sp in sps._name_parser_map.items():
            ca = next(iter(ca for ca in sps._choices_actions if ca.metavar == name), None)
            if ca:
                usage = sub(rf'.+ {name} ', '  ', sp.format_usage())
                ca.help += f'\n{usage}'

    def read_arguments(self):
        perms_warning = "Some actions may require specific rights.\nThose are shown in each command help.\nRights are grouped in roles."
        parser = ArgumentParser(formatter_class=PositionalFirstHelpFormatter, epilog=perms_warning)
        parser.add_argument('-V', '--version', action='version', version='.'.join(str(p) for p in __version__))
        sps = parser.add_subparsers(title='actions', metavar='ACTION', required=True,
                                    description="Use ACTION -h|--help to get full help on any action",
                                    help="One of the following action is required\n ")
        # dcas
        descr = "List all Docker Compose Archives (DCAs) that can be deployed."
        epilog = (
            "Required rights:"
            f"\n- {Right.DCA_READ.name}"
        )
        sp = sps.add_parser('dcas', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        sp.add_argument('--check', action='store_true',
                        help="verify each archive checksum")
        sp.set_defaults(func=self.action_dcas)
        # deploy
        descr = (
            "Deploy the specified DCA file,"
            "\noverwriting any existing application with the same name and environment."
        )
        epilog = (
            "Required rights:"
            f"\n- {Right.DCA_READ.name}"
            f"\n- {Right.DEPLOY.name} with env and app regexes matching the environment name and app name accordingly"
        )
        sp = sps.add_parser('deploy', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        sp.description = descr
        sp.add_argument('dca_file', metavar='DCA_FILE',
                        help="DCA file as listed by the 'dcas' command")
        sp.add_argument('--clean', action='store_true',
                        help="first stop and remove all volumes")
        sp.add_argument('--nostart', action='store_false', dest='start',
                        help="deploy the archive but do not start the application")
        sp.set_defaults(func=self.action_deploy)

        # app env shortcut func
        def add_app_env_args(sp):
            sp.add_argument('app', metavar='APP_NAME', help="Application name")
            sp.add_argument('env', metavar='ENV', help=f"Environment name, one of {', '.join(env.name for env in Env)}")
        # undeploy
        descr = (
            "Undeploy the specified application by first stopping it."
            "\nNo data removed unless all is specified."
        )
        epilog = (
            "Required rights:"
            f"\n- {Right.DEPLOY.name} with env and app regexes matching the environment name and app name accordingly"
        )
        sp = sps.add_parser('undeploy', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        add_app_env_args(sp)
        sp.add_argument('--all', action='store_true',
                        help="Also remove all volumes and images")
        sp.set_defaults(func=self.action_undeploy)
        # apps
        descr = "List all applications (name and environment) configured on this node"
        epilog = (
            "Required rights:"
            f"\n- {Right.APP_LIST.name}"
        )
        sp = sps.add_parser('apps', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        sp.add_argument('--verbose', action='store_true',
                        help="Show status while listing")
        sp.set_defaults(func=self.action_apps)
        # build
        descr = (
            "Build (and pull) the application services images."
            "\nThe application should already exist on this node."
        )
        epilog = "Required rights: APP_START, START_{env} and START_APPS regex list matching the app name"
        epilog = (
            "Required rights:"
            f"\n- {Right.START.name} with env and app regexes matching the environment name and app name accordingly"
        )
        sp = sps.add_parser('build', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        add_app_env_args(sp)
        sp.set_defaults(func=self.action_build)
        # start
        descr = (
            "Start the application specified."
            "\nThe application should already exist on this node."
        )
        epilog = "Required rights: APP_START, START_{env} and START_APPS regex list matching the app name"
        epilog = (
            "Required rights:"
            f"\n- {Right.START.name} with env and app regexes matching the environment name and app name accordingly"
        )
        sp = sps.add_parser('start', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        add_app_env_args(sp)
        sp.set_defaults(func=self.action_start)
        # stop
        descr = (
            "Stop the application specified."
            "\nThe application should already exist on this node."
        )
        epilog = (
            "Required rights:"
            f"\n- {Right.STOP.name} with env and app regexes matching the environment name and app name accordingly"
        )
        sp = sps.add_parser('stop', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        add_app_env_args(sp)
        sp.set_defaults(func=self.action_stop)
        # reload
        descr = (
            "Reload the application specified."
            "\nThe application should already exist on this node."
        )
        epilog = (
            "Required rights:"
            f"\n- {Right.START.name} with env and app regexes matching the environment name and app name accordingly"
        )
        sp = sps.add_parser('reload', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        add_app_env_args(sp)
        sp.set_defaults(func=self.action_reload)
        # restart
        descr = (
            "Restart the application specified."
            "\nThe application should already exist on this node."
        )
        epilog = (
            "Required rights:"
            f"\n- {Right.START.name} with env and app regexes matching the environment name and app name accordingly"
            f"\n- {Right.STOP.name} with env and app regexes matching the environment name and app name accordingly"
        )
        sp = sps.add_parser('restart', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        add_app_env_args(sp)
        sp.set_defaults(func=self.action_restart)
        # status
        descr = (
            "Show the systemd status for the application specified."
            "\nThe application should already exist on this node."
        )
        epilog = (
            "Required rights:"
            f"\n- {Right.STATUS.name} with env and app regexes matching the environment name and app name accordingly"
        )
        sp = sps.add_parser('status', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        add_app_env_args(sp)
        sp.set_defaults(func=self.action_status)
        # logs
        descr = (
            "Show the global logs (all services) for the application specified."
            "\nThe application should already exist on this node."
            "\nWhen a pager is used, quit with Ctrl-C."
        )
        epilog = (
            "Required rights:"
            f"\n- {Right.LOGS.name} with env and app regexes matching the environment name and app name accordingly"
        )
        sp = sps.add_parser('logs', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        add_app_env_args(sp)
        sp.add_argument('--nopager', action='store_false', dest='pager',
                        help="Logs are output directly, without a pager")
        sp.set_defaults(func=self.action_logs)
        # exec
        descr = (
            "Enter a service container for the application specified."
            "\nThe application should already exist and up on this node."
            "\nWithout arguments, a shell (/bin/sh) will be used."
        )
        epilog = (
            "Required rights:"
            f"\n- {Right.EXEC.name} with env and app regexes matching the environment name and app name accordingly"
            """\n\nSpaces arguments should be encapsulated with '"..."'."""
            "\nIf you have any argument that starts with a dash (-), start your command with double dashes (--)."
            """\nExample: capp exec -u test -w /app app prod front -- bash -c '"ls -lh"'"""
        )
        sp = sps.add_parser('exec', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        add_app_env_args(sp)
        sp.add_argument('service', metavar='SERVICE', help="application service name")
        sp.add_argument('-u', dest='execuser', metavar='USER', help=f"Run the command as another user ({Right.EXEC_USER.name} right required)")
        sp.add_argument('-e', nargs=1, action='append', dest='execenvs', metavar='KEY=VALUE', help="Set environment variables for the command")
        sp.add_argument('-w', dest='execworkdir', metavar='DIRECTORY', help="Path to workdir directory for the command")
        sp.add_argument('args', nargs='*', default=['/bin/sh'], metavar='ARGS',
                        help="Any arguments to exec for the service. Default to /bin/sh.")
        sp.set_defaults(func=self.action_exec)
        # users
        descr = "User accounts management commands."
        sp = sps.add_parser('users', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr)
        usps = sp.add_subparsers(title='actions', metavar='ACTION', required=True,
                                 description="Use ACTION -h|--help to get full help on any action",
                                 help="One of the following action is required\n ")
        descr = "List user account names and fingerprints."
        epilog = (
            "Required rights:"
            f"\n- {Right.USER_LIST.name}"
        )
        rsp = usps.add_parser('list', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.set_defaults(func=self.action_list_users)
        descr = "Add a user account."
        epilog = (
            "Required rights:"
            f"\n- {Right.USER_ADD.name}"
        )
        rsp = usps.add_parser('add', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.add_argument('user', metavar='USER', help="User account name")
        rsp.add_argument('pkey', metavar='PUBLIC KEY', help="user public key")
        rsp.set_defaults(func=self.action_add_user)
        descr = "Delete a user account."
        epilog = (
            "Required rights:"
            f"\n- {Right.USER_DELETE.name}"
        )
        rsp = usps.add_parser('delete', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.add_argument('user', metavar='USER', help="User account name")
        rsp.set_defaults(func=self.action_delete_user)
        # user keys
        descr = "User public keys management commands."
        sp = usps.add_parser('key', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr)
        ksps = sp.add_subparsers(title='actions', metavar='ACTION', required=True,
                                 description="Use ACTION -h|--help to get full help on any action",
                                 help="One of the following action is required\n ")
        descr = "List the user account public keys."
        epilog = (
            "Required rights:"
            f"\n- {Right.USER_LIST.name}"
        )
        rsp = ksps.add_parser('list', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.add_argument('user', metavar='USER', help="User account name")
        rsp.set_defaults(func=self.action_list_pkeys)
        descr = "Add a public key to a user account."
        epilog = (
            "Required rights:"
            f"\n- {Right.USER_CHANGE.name} when adding a key to another user"
        )
        rsp = ksps.add_parser('add', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.add_argument('user', metavar='USER', help="User account name")
        rsp.add_argument('pkey', metavar='PUBLIC KEY', help="new user public key")
        rsp.set_defaults(func=self.action_add_pkey)
        descr = "Delete a public key from a user account."
        epilog = (
            "Required rights:"
            f"\n- {Right.USER_CHANGE.name} when deleting a key to another user"
        )
        rsp = ksps.add_parser('delete', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.add_argument('user', metavar='USER', help="User account name")
        rsp.add_argument('pkey', metavar='PUBLIC KEY', help="existing user public key")
        rsp.set_defaults(func=self.action_delete_pkey)
        # public keys
        descr = "Public keys management commands."
        sp = sps.add_parser('pubkeys', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr)
        rsps = sp.add_subparsers(title='actions', metavar='ACTION', required=True,
                                 description="Use ACTION -h|--help to get full help on any action",
                                 help="One of the following action is required\n ")
        descr = (
            "List the public keys allowed to validate a DCA signature."
        )
        epilog = (
            "Required rights:"
            f"\n- {Right.PKEY_LIST.name}"
        )
        rsp = rsps.add_parser('list', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.set_defaults(func=self.action_list_pubkeys)
        descr = "Add a public key to validate DCA signatures."
        epilog = (
            "Required rights:"
            f"\n- {Right.PKEY_ADD.name}"
        )
        rsp = rsps.add_parser('add', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.add_argument('pubkey', metavar='PUBLIC KEY', help="Signature public key (PEM format without header, footer and without new line character)")
        rsp.set_defaults(func=self.action_add_pubkey)
        descr = "Delete a public key to validate DCA signatures."
        epilog = (
            "Required rights:"
            f"\n- {Right.PKEY_DELETE.name}"
        )
        rsp = rsps.add_parser('delete', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.add_argument('name', help="Signature public key hash name")
        rsp.set_defaults(func=self.action_delete_pubkey)
        # rights
        descr = "Rights management commands."
        sp = sps.add_parser('rights', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr)
        rsps = sp.add_subparsers(title='actions', metavar='ACTION', required=True,
                                 description="Use ACTION -h|--help to get full help on any action",
                                 help="One of the following action is required\n ")
        descr = (
            "List the rights granted to you or the specified user account."
            "\nIf you specify an application and environment, you get the rights specific to it."
        )
        epilog = (
            "Required rights:"
            f"\n- {Right.RIGHT_LIST.name} when listing for another user"
        )
        rsp = rsps.add_parser('list', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.add_argument('--app', nargs='?', const=None, default=None, dest='app', metavar='APP_NAME', help="Application name")
        rsp.add_argument('--env', nargs='?', const=None, default=None, dest='env', metavar='ENV', help=f"Environment name, one of {', '.join(env.name for env in Env)}")
        rsp.add_argument('--user', nargs='?', const=None, default=None, dest='user', metavar='USER', help=f"User account name. Default to {self.get_ssh_user()}")
        rsp.add_argument('-v', '--verbose', action='store_true', help="Output all right names and regex. env and app are ignored.")
        rsp.set_defaults(func=self.action_list_rights)
        descr = "Add some rights to a user account."
        epilog = (
            "Rights should be defined like this: RIGHT,ENV_REGEX,APP_REGEX or just RIGHT"
            f"\nValid right names are: {' '.join(sorted(Right.names()))}"
            "\n\nRequired rights:"
            f"\n- {Right.RIGHT_ADD.name}"
        )
        rsp = rsps.add_parser('add', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.add_argument('user', metavar='USER', help="User account name")
        rsp.add_argument('rights', nargs='+', metavar='RIGHT,ENV_REGEX,APP_REGEX', help="right name, environment regex, app regex.")
        rsp.set_defaults(func=self.action_add_rights)
        descr = "Delete some rights from a user account."
        epilog = (
            f"Valid right names are: {' '.join(sorted(Right.names()))}"
            "\n\nRequired rights:"
            f"\n- {Right.RIGHT_DELETE.name}"
        )
        rsp = rsps.add_parser('delete', formatter_class=PositionalFirstHelpFormatter, help=descr, description=descr, epilog=epilog)
        rsp.add_argument('user', metavar='USER', help="User account name")
        rsp.add_argument('rights', nargs='+', metavar='RIGHT', help="A right name")
        rsp.set_defaults(func=self.action_delete_rights)
        # adjusting helps
        self._append_usage_to_subcommands(usps)
        self._append_usage_to_subcommands(rsps)
        self._append_usage_to_subcommands(sps)
        # parsing sys args
        self.actions = parser.parse_args()

    def run_action(self):
        self.read_arguments()
        try:
            self._check()
        except ValueError as e:
            die(e)
        try:
            self.actions.func(self.actions)
        except PermissionError as e:
            die(str(e), 2)
        except Exception as e:
            die(e)

    def _check(self):
        self.compose_dirs_config = self._read_compose_dirs_config()
        self.capp_config = self._read_capp_config()
        self.le_blacklist = self._read_le_blacklist()
        self._check_user()

    def _read_compose_dirs_config(self):
        if not self.compose_dirs_conf_file.is_file():
            raise ValueError(
                "This script relies on compose-systemd: https://github.com/jrd/compose-systemd"
                f"\n{self.compose_dirs_conf_file} file should exist"
            )
        parser = ConfigParser(delimiters=('=',), comment_prefixes=('#',), empty_lines_in_values=False)
        with open(self.compose_dirs_conf_file, encoding='utf8') as f:
            parser.read_file(chain(('[def]',), f))
        return dict(parser['def'])

    def _read_capp_config(self):
        if not self.capp_conf_file.is_file():
            raise ValueError(f"{self.capp_conf_file} file should exist")
        parser = ConfigParser(delimiters=('=',), comment_prefixes=('#',), empty_lines_in_values=False)
        with open(self.capp_conf_file, encoding='utf8') as f:
            parser.read_file(chain(('[def]',), f))
        cfg = dict(parser['def'])
        cfg['min_dca_version'] = float(cfg.get('min_dca_version', '2'))
        cfg['max_mem_size'] = self._to_bytes(cfg.get('max_mem_size', '10240M'))
        cfg['max_proc'] = int(cfg.get('max_proc', '1000'))
        cfg['default_hostname'] = cfg.get('default_hostname', getfqdn())
        return cfg

    def _read_le_blacklist(self):
        if self.le_blacklist_file.is_file():
            with open(self.le_blacklist_file, encoding='utf8') as f:
                lines = [line.strip() for line in f.readlines()]
                return set(line for line in lines if line and line[0] != "#")
        else:
            return set()

    def _check_user(self):
        current_user_name = getpwuid(getuid()).pw_name
        expected_user_name = self.compose_dirs_config.get('compose_user', '')
        if current_user_name != expected_user_name:
            raise ValueError(f"This script should be run as {expected_user_name}")

    def _iter_hooks(self, action, step):
        """
        action could be deploy or undeploy
        step could be pre or post
        """
        hooks_dir = self.hooks_root_dir / f'{step}_{action}'
        hooks = []
        if hooks_dir.is_dir():
            for f in hooks_dir.iterdir():
                if f.is_file():
                    mode = f.lstat().st_mode
                    # read an execute rights
                    if mode & S_IRUSR != 0 and mode & S_IXUSR != 0:
                        hooks.append(f)
        return sorted(hooks)

    def _run_hook(self, hook_file, app, target_env, version, compose_file, target_dir):
        printflush(f"Run hook {hook_file}")
        try:
            run(['/usr/bin/sudo', str(hook_file), app, target_env, str(version), str(compose_file), str(target_dir)], stdin=0, check=True, capture_output=True, text=True)
        except CalledProcessError as e:
            printflush(f"  Error: {e.stderr}")

    def _to_bytes(self, value_unit):
        if not value_unit:
            return None
        for unit in Unit:
            if value_unit.endswith(unit.name):
                return int(round(float(value_unit[:-1]) * unit))
        return None

    def _to_unit(self, value_bytes):
        if not value_bytes:
            return None
        for unit in list(Unit)[::-1]:
            if value_bytes >= unit.value:
                unit_name = unit.name if unit.value > 1 else ''
                return f'{value_bytes / unit:.2f}{unit_name}'.replace(f'.00{unit_name}', unit_name).replace(f'0{unit_name}', unit_name)

    def get_ssh_user(self):
        return environ.get('SSH_USER', environ['USER'])

    def get_rights(self, user, env=None, app=None):
        rights = {}
        user_pkey_path = self.users_dir / user
        if not user_pkey_path.exists():
            raise ValueError(f"user '{user}' does not exist")
        user_right_path = self.rights_dir / user
        if user_right_path.exists():
            with open(user_right_path, encoding='utf8') as f:
                rights = dict((line.split(' ')[0], line.split(' ')[1:]) for line in f.read().split('\n') if line)
        if env or app:
            filtered_rights = {}
            allowed_right_names = Right.names()
            for name, (env_regex, app_regex) in rights.items():
                if all((
                    name in allowed_right_names,
                    not env or match(env_regex, env),
                    not app or match(app_regex, app),
                )):
                    filtered_rights[name] = (env_regex, app_regex)
            return filtered_rights
        else:
            return rights

    def check_right(self, right, env=None, app=None):
        user = self.get_ssh_user()
        if right.name not in self.get_rights(user, env=env, app=app):
            raise PermissionError(f"User {user} does not have the right {right.name}{(' on ' + env) if env else ''}{(' on ' + app) if app else ''}")

    def trace_action(self, action, args_dict):
        with open(self.log_file, 'a', encoding='utf8') as f:
            dt = datetime.now().replace(microsecond=0).isoformat(' ')
            user = self.get_ssh_user()
            args_str = ['='.join((k, f'"{str(v)}"')) for k, v in args_dict.items() if k != 'func']
            f.write(f"{dt} {user} {action} {' '.join(args_str)}\n")

    def action_dcas(self, args):
        self.check_right(Right.DCA_READ)
        self.trace_action('dcas', vars(args))
        dcas = [f for f in self.dca_dir.iterdir() if f.is_file() and f.suffix == '.dca']
        if args.check:
            for f in dcas:
                fsha = f.with_suffix('.dca.sha256')
                if fsha.is_file():
                    printflush(f"{f.name}: ", end='')
                    try:
                        verify_checksum(f)
                        printflush("OK")
                    except ValueError:
                        printflush("FAILED")
                else:
                    printflush("MISSING .sha256")
        else:
            for f in dcas:
                printflush(f.name)

    def action_deploy(self, args):
        self.check_right(Right.DCA_READ)
        dca = (self.dca_dir / args.dca_file).resolve()
        if dca.parent != self.dca_dir or dca.suffix != '.dca' or not dca.is_file():
            raise ValueError(f"{args.dca_file} is incorrect")
        if not dca.with_suffix('.dca.sha256').is_file():
            raise ValueError(f"{args.dca_file}.sha256 is missing")
        self.trace_action('deploy', vars(args))
        printflush(f"Deploying {dca.name}…")
        try:
            verify_checksum(dca)
            printflush(f"{dca.name}: 'OK'")
        except ValueError:
            raise ValueError(f"{dca.name}: FAILED")
        with temp_dir() as tmpd:
            with taropen(dca, 'r:*') as tar:
                tar.extractall(path=tmpd)
            metadata_file = tmpd / 'metadata'
            compose_file = tmpd / 'context' / 'docker-compose.yml'
            for f in (metadata_file, compose_file):
                if not f.is_file():
                    raise ValueError(f"Bad {dca.name}: missing {f.name}")
            parser = ConfigParser(delimiters=('=',), comment_prefixes=('#',), empty_lines_in_values=False)
            with open(metadata_file, encoding='utf8') as f:
                parser.read_file(chain(('[def]',), f))
            metadata = dict(parser['def'])
            app = metadata.get('app', None)
            target_env = metadata.get('target_env', None)
            version = float(metadata.get('version', '1'))
            privileged = metadata.get('privileged', '0') == '1'
            signature = metadata.get('signature', '')
            self._check_dca_version(version)
            self._check_app_name_target_env(app, target_env)
            if privileged:
                self._check_signature(compose_file, signature)
            self.check_right(Right.DEPLOY, app=app, env=target_env)
            default_hostname = self.capp_config['default_hostname']
            vhost_suffix = f"{'' if target_env == 'prod' else '-' + target_env}.{default_hostname}"
            target_dir = Path(self.compose_dirs_config['compose_dir']) / app / target_env
            self._pre_deploy(app, target_env, version, compose_file, target_dir)
            dc, svc_names, limits = self._check_compose(compose_file, target_env, version, privileged)
            self._check_limits(limits)
            self._load_images((tmpd / 'images').glob('*.tar.gz'))
            if args.clean:
                self._clean_volumes(target_dir)
            target_dir.mkdir(parents=True, exist_ok=True)
            self._copy_context(tmpd / 'context', target_dir)
            vhostd_dir = Path('/var/docker-volumes/nginx-proxy/vhost.d')
            for svc_name in svc_names:
                full_hostnames = self._build_full_hostnames(metadata, vhost_suffix, svc_name)
                for vhost in full_hostnames:
                    server_nginx_config_file = tmpd / 'proxy' / f'{svc_name}-server'
                    if server_nginx_config_file.is_file():
                        copy(server_nginx_config_file, vhostd_dir / vhost)
                    else:
                        old_config_file = vhostd_dir / vhost / f'{svc_name}-server'
                        if old_config_file.exists():
                            old_config_file.unlink()
                    location_nginx_config_file = tmpd / 'proxy' / f'{svc_name}-location'
                    if location_nginx_config_file.is_file():
                        copy(location_nginx_config_file, vhostd_dir / f'{vhost}_location')
                    else:
                        old_config_file = vhostd_dir / vhost / f'{svc_name}_location'
                        if old_config_file.exists():
                            old_config_file.unlink()
            orig_dotenv = tmpd / 'context' / '.env'
            orig_dotenv_content = orig_dotenv.open(encoding='utf8').read() if orig_dotenv.exists() else ''
        with open(target_dir / '.env', 'w', encoding='utf8') as f:
            f.write(f"COMPOSE_PROJECT_NAME={app}-{target_env}\n{orig_dotenv_content}\n")
        with open(target_dir / '.version', 'w', encoding='ascii') as f:
            f.write(f"{version}\n")
        with open(target_dir / '.metadata', 'w', encoding='utf8') as f:
            for key, value in metadata.items():
                f.write(f"{key}={value}\n")
        if 'networks' not in dc:
            dc['networks'] = {}
        dc['networks']['proxy'] = {
            'external': True,
            'name': 'proxy_network',
        }
        for svc_name in svc_names:
            svc_def = dc['services'][svc_name]
            full_hostnames = self._build_full_hostnames(metadata, vhost_suffix, svc_name)
            if full_hostnames:
                if 'environment' not in svc_def:
                    svc_def['environment'] = {}
                full_host = ','.join(host.strip() for host in full_hostnames)
                letsencrypt_full_host = ','.join(host.strip() for host in full_hostnames if host.strip() not in self.le_blacklist)
                if isinstance(svc_def['environment'], dict):
                    svc_def['environment']['VIRTUAL_HOST'] = full_host
                    svc_def['environment']['LETSENCRYPT_HOST'] = letsencrypt_full_host
                    svc_def['environment']['VIRTUAL_PORT'] = metadata.get(f'{svc_name}_vhost_port', '80')
                else:  # list
                    svc_def['environment'].append(f'VIRTUAL_HOST={full_host}')
                    svc_def['environment'].append(f'LETSENCRYPT_HOST={letsencrypt_full_host}')
                    svc_def['environment'].append(f"VIRTUAL_PORT={metadata.get(f'{svc_name}_vhost_port', '80')}")
            if any((
                'environment' in svc_def and isinstance(svc_def['environment'], dict) and
                    'VIRTUAL_HOST' in svc_def['environment'],
                'environment' in svc_def and isinstance(svc_def['environment'], list) and
                    any(e.startswith('VIRTUAL_HOST=') for e in svc_def['environment']),
                'env_file' in svc_def and isinstance(svc_def['env_file'], str) and
                    any(e.startswith('VIRTUAL_HOST=') for e in (target_dir / svc_def['env_file']).open(encoding='utf8').readlines()),
                'env_file' in svc_def and isinstance(svc_def['env_file'], list) and
                    any(e.startswith('VIRTUAL_HOST=') for env_file in svc_def['env_file'] for e in (target_dir / env_file).open(encoding='utf8').readlines()),
            )):
                if 'networks' not in svc_def:
                    svc_def['networks'] = []
                if isinstance(svc_def['networks'], dict):
                    svc_def['networks']['proxy'] = {}
                else:  # list
                    svc_def['networks'].append('proxy')
            svc_def['mem_limit'] = f'{limits.memory[svc_name]}B'
            svc_def['mem_reservation'] = f'{limits.memory_avg[svc_name]}B'
            svc_def['mem_swappiness'] = 10
            svc_def['oom_score_adj'] = 100  # prefer to kill containers than system processes
            # min = 1024 ÷ 4 (cpu = 1), avg = 1024 (cpu = 4), max 1024 × 4 (cpu = 16)
            svc_def['pids_limit'] = self.capp_config['max_proc']
            svc_def['cpu_shares'] = limits.cpu[svc_name] * 1024 // 4
        with open(target_dir / 'docker-compose.yml', 'w', encoding='utf8') as f:
            dump(dc, f)
        self._pull_images(target_dir)
        self._build_images(target_dir)
        deps_file = Path(self.compose_dirs_config['compose_dir']) / self.compose_dirs_config['deps_file']
        with open(deps_file, 'r+', encoding='utf8') as f:
            deps = [line.strip() for line in f if not line.startswith(f'{app}/{target_env}:')]
            deps.append(f'{app}/{target_env}:proxy')
            f.seek(0)
            f.truncate()
            f.write('\n'.join(deps))
            f.write('\n')
        printflush("Update systemd")
        run(['sudo', '/usr/local/bin/compose-dirs', 'update', f'{app}/{target_env}'], check=True, text=True)
        systemd_svc = self._get_systemd_service_name(app, target_env)
        is_active = run(['systemctl', 'is-active', systemd_svc], capture_output=True).returncode == 0
        if args.start:
            if is_active:
                try:
                    printflush("Reload application")
                    run(['sudo', 'systemctl', 'reload-or-restart', systemd_svc], check=True, text=True)
                except CalledProcessError as e:
                    printflush(f"  Error: {e.stderr}")
                    printflush("Try restarting application instead")
                    run(['sudo', 'systemctl', 'restart', systemd_svc], check=True, text=True)
            else:
                printflush("Start application")
                run(['sudo', 'systemctl', 'start', systemd_svc], check=True, text=True)
        elif is_active:
            printflush("Stop application")
            run(['sudo', 'systemctl', 'stop', systemd_svc], check=True, text=True)
        self._post_deploy(app, target_env, version, target_dir / 'docker-compose.yml', target_dir)
        printflush("Cleaning dangling images…")
        run(['docker', 'image', 'prune', '-f'], text=True)

    def _check_vhost(self, vhost):
        try:
            hostname = vhost.encode('idna').decode()
            if len(hostname) > 255:
                raise ValueError
            dash_doesn_t_match_next = '(?!-)'
            label = '[-_a-zA-Z0-9]{1,63}'
            not_preceeded_by_dash = '(?<!-)'
            allowed_chars = compile(f'{dash_doesn_t_match_next}{label}{not_preceeded_by_dash}$')
            if all(allowed_chars.match(label) for label in hostname.split('.')):
                return hostname
            else:
                raise ValueError
        except (UnicodeError, ValueError):
            raise ValueError(f"vhost {vhost} is not a valid hostname")

    def _build_full_hostnames(self, metadata, vhost_suffix, svc_name):
        return (
            [self._check_vhost(base_vhost.strip() + vhost_suffix) for base_vhost in metadata[f'{svc_name}_base_vhost'].split(',')]
            if f'{svc_name}_base_vhost' in metadata else []
        ) + (
            [self._check_vhost(vhost.strip()) for vhost in metadata[f'{svc_name}_vhost'].split(',')]
            if f'{svc_name}_vhost' in metadata else []
        )

    def _get_systemd_service_name(self, app, env=None):
        full_name = f"{app}{'/' + env if env else ''}"
        full_name_systemd = run(['systemd-escape', '--suffix', 'service', full_name], check=True, capture_output=True, text=True).stdout.strip()
        tmpl_name = self.compose_dirs_config.get('tmpl_name', 'compose')
        return f'{tmpl_name}@{full_name_systemd}'

    def _check_dca_version(self, version):
        if version < self.capp_config['min_dca_version']:
            raise ValueError(f"minimum DCA version required is {self.capp_config['min_dca_version']}, given {version}")

    def _check_app_name(self, app):
        valid_app_name = r'[a-zA-Z][-_a-zA-Z0-9]+'
        if not all((
            app,
            match(f'^{valid_app_name}$', app),
        )):
            raise ValueError(f"app '{app or ''}' should be a valid name: {valid_app_name}")

    def _check_target_env(self, target_env):
        if not all((
            target_env,
            target_env in Env.names(),
        )):
            raise ValueError(f"target environment '{target_env or ''}' should be a valid env: {', '.join(env.name for env in Env)}")

    def _check_app_name_target_env(self, app, target_env):
        self._check_app_name(app)
        self._check_target_env(target_env)

    def _verify_signature(self, signature, compose_file, pubkey_file):
        try:
            run([
                'openssl', 'dgst',
                '-sha256',
                '-verify', str(pubkey_file),
                '-signature', '/dev/stdin',
                str(compose_file),
            ],
                input=signature,
                check=True,
                capture_output=True,
            )
            return True
        except CalledProcessError:
            return False

    def _check_signature(self, compose_file, signature_base64):
        signature = b64decode(signature_base64)
        for pubkey_file in (f for f in self.pubkeys_dir.iterdir() if f.is_file() and f.suffix == '.pem'):
            if self._verify_signature(signature, compose_file, pubkey_file):
                printflush("Signature: 'OK'")
                break
        else:
            raise ValueError("metadata signature mismatch")

    def _check_compose(self, compose_file, target_env, version, privileged=False):
        if run(self.dc + ['-f', str(compose_file), 'config', '-q'], capture_output=True).returncode:
            raise ValueError(f'{compose_file.name} is incorrect')
        svc_names = []
        ResourceLimits = namedtuple('ResourceLimits', 'memory memory_avg cpu')
        limits = ResourceLimits({}, {}, {})
        with open(compose_file, encoding='utf8') as f:
            dc = full_load(f)
        for svc_name, svc_def in dc.get('services', {}).items():
            svc_names.append(svc_name)
            limits.memory[svc_name] = 300 * Unit.M if version > 1 else 1 * Unit.G
            limits.memory_avg[svc_name] = 100 * Unit.M if version > 1 else 300 * Unit.M
            limits.cpu[svc_name] = 4
            if version > 1:
                self._verify_compose_service(svc_name, svc_def, privileged)
        if version > 1:
            for vol_name, vol_def in dc.get('volumes', {}).items():
                self._verify_compose_volume(vol_name, vol_def)
            for net_name, net_def in dc.get('networks', {}).items():
                self._verify_compose_network(net_name, net_def)
            for res_name, res_def in dc.get('x-resources', {}).items():
                self._verify_resources(res_name, res_def, limits)
            for res_name, res_def in dc.get(f'x-{target_env}-resources', {}).items():
                self._verify_resources(res_name, res_def, limits)
        return dc, svc_names, limits

    def _verify_compose_service(self, name, definition, privileged=False):
        allowed_keys = (
            'build',
            'cap_drop',
            'command',
            'depends_on',
            'entrypoint',
            'env_file',
            'environment',
            'expose',
            'extends',
            'extra_hosts',
            'group_add',
            'healthcheck',
            'image',
            'init',
            'labels',
            'logging',
            'networks',
            'pid',
            'scale',
            'stop_grace_period',
            'stop_signal',
            'sysctls',
            'tmpfs',
            'ulimits',
            'volumes',
            'volumes_from',
            'restart',
            'shm_size',
            'tty',
            'user',
            'working_dir',
        )
        _volume_allowed_conditions = lambda vol_src: (  # noqa: E731
            not match(r'[a-zA-Z]', vol_src),  # allow named volume starting with a letter
            not vol_src.startswith('./'),  # allow relative path
        )
        if privileged:
            allowed_keys += ('ports',)
            volume_allowed_conditions = lambda vol_src: (  # noqa: E731
                *_volume_allowed_conditions(vol_src),
                not vol_src.startswith('/'),  # allow absolute path
            )
        else:
            volume_allowed_conditions = _volume_allowed_conditions
        for key in (definition or {}).keys():
            if key not in allowed_keys:
                raise ValueError(f"key {key}, defined for {name} is not allowed in services section")
            if key == 'build':
                build_def = definition[key]
                if isinstance(build_def, dict):
                    for subkey in build_def.keys():
                        if subkey not in (
                            'context',
                            'dockerfile',
                            'args',
                            'cache_from',
                            'extra_hosts',
                            'labels',
                            'shm_size',
                            'target',
                        ):
                            raise ValueError(f"key {subkey}, defined for {name}.{key} is not allowed in services section")
            elif key == 'extends':
                for subkey in (definition[key] or {}).keys():
                    if subkey not in (
                        'file',
                        'service',
                    ):
                        raise ValueError(f"key {subkey}, defined for {name}.{key} is not allowed in services section")
            elif key == 'healthcheck':
                for subkey in (definition[key] or {}).keys():
                    if subkey not in (
                        'test',
                        'interval',
                        'timeout',
                        'retries',
                        'start_period',
                        'disable',
                    ):
                        raise ValueError(f"key {subkey}, defined for {name}.{key} is not allowed in services section")
            elif key == 'pid':
                if definition[key] == 'host':
                    raise ValueError(f"key {key}, defined for {name} is not allowed to take the 'host' value in services section")
            elif key == 'volumes':
                volumes = definition[key]
                for volume in volumes:
                    if isinstance(volume, str):
                        vol_parts = volume.split(':') if ':' in volume else []
                        vol_src = vol_parts[0] if vol_parts[1].lower() not in ('ro', 'rw') else ''
                    else:
                        vol_src = volume.get('source', '')
                    if vol_src and all(volume_allowed_conditions(vol_src)):
                        raise ValueError(f"The volume {volume}, defined for {name} is not allowed to have a non local source path or non-named volume in services section")

    def _verify_compose_volume(self, name, definition):
        for key in (definition or {}).keys():
            if key not in ('external', 'labels', 'name'):
                raise ValueError(f"key {key}, defined for {name} is not allowed in volumes section")

    def _verify_compose_network(self, name, definition):
        for key in (definition or {}).keys():
            if key not in ('external', 'internal', 'labels', 'name'):
                raise ValueError(f"key {key}, defined for {name} is not allowed in networks section")

    def _verify_resources(self, name, definition, limits):
        for key in (definition or {}).keys():
            if key not in ('memory', 'memory_avg', 'cpu'):
                raise ValueError(f"key {key}, defined for {name} is not allowed in x-resources section")
            elif definition[key] is None:
                continue
            elif key == 'cpu':
                if not 1 <= definition[key] <= 16:
                    raise ValueError(f"key {key}, defined for {name} should have a value between [1; 16], in x-resources section")
                else:
                    limits.cpu[name] = definition[key]
            else:
                if not any((
                    definition[key].endswith('B'),
                    definition[key].endswith('K'),
                    definition[key].endswith('M'),
                    definition[key].endswith('G'),
                )):
                    raise ValueError(f"key {key}, defined for {name} should have a unit value of B, K, M or G, in x-resources section")
                elif not search(r'^[0-9]+$', definition[key][:-1]):
                    raise ValueError(f"key {key}, defined for {name} should have a valid postive integer value, in x-resources section")
                else:
                    value = self._to_bytes(definition[key])
                    if key == 'memory':
                        limits.memory[name] = value
                    elif key == 'memory_avg':
                        limits.memory_avg[name] = value

    def _check_limits(self, limits):
        max_mem_size = self.capp_config['max_mem_size']
        for (name, limit), (_, limit_avg) in zip(sorted(limits.memory.items()), sorted(limits.memory_avg.items())):
            if limit > max_mem_size:
                raise ValueError(f"memory limit of {self._to_unit(limit)} exceeds max defined limit of {self._to_unit(max_mem_size)}")
            if limit_avg > limit:
                raise ValueError(f"memory average limit of {self._to_unit(limit_avg)} exceeds memory limit of {self._to_unit(limit)}")
        for name, cpu in limits.cpu.items():
            if not 1 <= cpu <= 16:
                raise ValueError(f"cpu weight limit of {cpu} should be in [1; 16] range")

    def _pre_deploy(self, app, target_env, version, compose_file, target_dir):
        for hook_file in self._iter_hooks('deploy', 'pre'):
            self._run_hook(hook_file, app, target_env, version, compose_file, target_dir)

    def _post_deploy(self, app, target_env, version, compose_file, target_dir):
        for hook_file in self._iter_hooks('deploy', 'post'):
            self._run_hook(hook_file, app, target_env, version, compose_file, target_dir)

    def _pre_undeploy(self, app, target_env, version, compose_file, target_dir):
        for hook_file in self._iter_hooks('undeploy', 'pre'):
            self._run_hook(hook_file, app, target_env, version, compose_file, target_dir)

    def _post_undeploy(self, app, target_env, version, compose_file, target_dir):
        for hook_file in self._iter_hooks('undeploy', 'post'):
            self._run_hook(hook_file, app, target_env, version, compose_file, target_dir)

    def _pull_images(self, target_dir):
        if target_dir.is_dir() and (target_dir / 'docker-compose.yml').is_file():
            printflush("Pulling images…")
            run(self.dc + ['pull', '--ignore-pull-failures', '--quiet'], cwd=target_dir, text=True)

    def _build_images(self, target_dir, quiet=True, use_cache=True):
        if target_dir.is_dir() and (target_dir / 'docker-compose.yml').is_file():
            printflush("Building images…")
            cmd = self.dc + ['build', '--pull']
            if quiet:
                cmd.append('--quiet')
            if not use_cache:
                cmd.append('--no-cache')
            run(cmd, cwd=target_dir, text=True)

    def _load_images(self, image_files):
        for image_file in image_files:
            printflush(f"Loading image {image_file}…")
            run(f'zcat "{str(image_file)}" | docker image load', shell=True, check=True, text=True)
        if image_files:
            printflush("Cleaning dangling images…")
            run(['docker', 'image', 'prune', '-f'], text=True)

    def _clean_volumes(self, target_dir):
        if target_dir.is_dir() and (target_dir / 'docker-compose.yml').is_file():
            printflush("Cleaning volumes…")
            run(self.dc + ['down', '-v'], cwd=target_dir, text=True)

    def _copy_context(self, src_dir, dest_dir):
        excludes = ('.env', 'docker-compose.yml')

        def ignore_func(item):
            return item.name in excludes
        copy_tree(src_dir, dest_dir, ignored=ignore_func)

    def action_undeploy(self, args):
        app = args.app
        target_env = args.env
        self._check_app_name_target_env(app, target_env)
        self.check_right(Right.DEPLOY, app=app, env=target_env)
        target_dir = Path(self.compose_dirs_config['compose_dir']) / app / target_env
        if not target_dir.exists():
            raise ValueError(f'App {app}/{target_env} does not exist')
        self.trace_action('undeploy', vars(args))
        printflush(f"Undeploying {app}/{target_env}…")
        try:
            with open(target_dir / '.version', encoding='ascii') as f:
                version = float(f.read().strip())
        except FileNotFoundError:
            version = 1
        self._pre_undeploy(app, target_env, version, target_dir / 'docker-compose.yml', target_dir)
        systemd_svc = self._get_systemd_service_name(app, target_env)
        run(['sudo', 'systemctl', 'stop', systemd_svc], check=True, text=True)
        deps_file = Path(self.compose_dirs_config['compose_dir']) / self.compose_dirs_config['deps_file']
        with open(deps_file, 'r+', encoding='utf8') as f:
            deps = [line.strip() for line in f if not line.startswith(f'{app}/{target_env}:')]
            f.seek(0)
            f.truncate()
            f.write('\n'.join(deps))
            f.write('\n')
        printflush("Update systemd")
        run(['sudo', '/usr/local/bin/compose-dirs', 'update', f'{app}/{target_env}'], check=True, text=True)
        if target_dir.is_dir():
            with open(target_dir / 'docker-compose.yml', encoding='utf8') as f:
                dc = full_load(f)
            vhostd_dir = Path('/var/docker-volumes/nginx-proxy/vhost.d')
            for svc in dc.get('services', {}).values():
                environment = svc.get('environment', {})
                vhost_str = environment.get('VIRTUAL_HOST', '') if isinstance(environment, dict) \
                    else next(iter([e for e in environment if match(r'VIRTUAL_HOST=', e)]), '=').split('=', 1)[1]
                if vhost_str:
                    for vhost in [x.strip() for x in vhost_str.split(',')]:
                        server_nginx_config_file = vhostd_dir / vhost
                        if server_nginx_config_file.is_file():
                            server_nginx_config_file.unlink()
                        location_nginx_config_file = vhostd_dir / f'{vhost}_location'
                        if location_nginx_config_file.is_file():
                            location_nginx_config_file.unlink()
            dc_down_args = '-v --rmi all' if args.all else '--rmi local'
            printflush("Destroying application…")
            run(f"{' '.join(self.dc)} down {dc_down_args} 2>&1 | grep -v ^Network | grep -v '^Removing network'", shell=True, cwd=target_dir, text=True)
            self._post_undeploy(app, target_env, version, target_dir / 'docker-compose.yml', target_dir)
            rmtree(target_dir, ignore_errors=True)
            try:
                target_dir.parent.rmdir()
            except OSError:
                pass
        else:
            self._post_undeploy(app, target_env, version, target_dir / 'docker-compose.yml', target_dir)

    def action_apps(self, args):
        self.check_right(Right.APP_LIST)
        self.trace_action('apps', vars(args))
        deps_file = Path(self.compose_dirs_config['compose_dir']) / self.compose_dirs_config['deps_file']
        with open(deps_file, encoding='utf8') as f:
            deps = [line.strip() for line in f if search(r'/.+:', line)]
            for dep in sorted(deps):
                app, env = dep.split(':')[0].split('/')
                printflush(f'{app} {env}')
                if args.verbose:
                    svc = self._get_systemd_service_name(app, env)
                    raw_status = run(['systemctl', 'status', '--no-pager', svc], env={'SYSTEMD_COLORS': '1'}, capture_output=True, text=True).stdout
                    status = ''.join([line.replace('Active: ', '') for line in raw_status.split('\n') if search(r'Active:', line)])
                    printflush(status)

    def action_build(self, args):
        app = args.app
        target_env = args.env
        self._check_app_name_target_env(app, target_env)
        self.check_right(Right.START, app=app, env=target_env)
        self.trace_action('build', vars(args))
        target_dir = Path(self.compose_dirs_config['compose_dir']) / app / target_env
        self._pull_images(target_dir)
        self._build_images(target_dir, quiet=False, use_cache=False)

    def action_start(self, args):
        app = args.app
        target_env = args.env
        self._check_app_name_target_env(app, target_env)
        self.check_right(Right.START, app=app, env=target_env)
        self.trace_action('start', vars(args))
        svc = self._get_systemd_service_name(app, target_env)
        execlp('sudo', 'sudo', 'systemctl', 'start', svc)

    def action_stop(self, args):
        app = args.app
        target_env = args.env
        self._check_app_name_target_env(app, target_env)
        self.check_right(Right.STOP, app=app, env=target_env)
        self.trace_action('stop', vars(args))
        svc = self._get_systemd_service_name(app, target_env)
        execlp('sudo', 'sudo', 'systemctl', 'stop', svc)

    def action_reload(self, args):
        app = args.app
        target_env = args.env
        self._check_app_name_target_env(app, target_env)
        self.check_right(Right.START, app=app, env=target_env)
        self.trace_action('reload', vars(args))
        svc = self._get_systemd_service_name(app, target_env)
        execlp('sudo', 'sudo', 'systemctl', 'reload', svc)

    def action_restart(self, args):
        app = args.app
        target_env = args.env
        self._check_app_name_target_env(app, target_env)
        self.check_right(Right.START, app=app, env=target_env)
        self.check_right(Right.STOP, app=app, env=target_env)
        self.trace_action('restart', vars(args))
        svc = self._get_systemd_service_name(app, target_env)
        execlp('sudo', 'sudo', 'systemctl', 'restart', svc)

    def action_status(self, args):
        app = args.app
        target_env = args.env
        self._check_app_name_target_env(app, target_env)
        self.check_right(Right.STATUS, app=app, env=target_env)
        self.trace_action('status', vars(args))
        svc = self._get_systemd_service_name(app, target_env)
        execlp('systemctl', 'systemctl', 'status', '--no-pager', '--lines=0', svc)

    def action_logs(self, args):
        app = args.app
        target_env = args.env
        self._check_app_name_target_env(app, target_env)
        self.check_right(Right.LOGS, app=app, env=target_env)
        self.trace_action('logs', vars(args))
        svc = self._get_systemd_service_name(app, target_env)
        if args.pager:
            execlp('journalctl', 'journalctl', '-feu', svc)
        else:
            execlp('journalctl', 'journalctl', '--no-pager', '-u', svc)

    def action_exec(self, args):
        app = args.app
        target_env = args.env
        self._check_app_name_target_env(app, target_env)
        self.check_right(Right.EXEC, app=app, env=target_env)
        self.trace_action('exec', vars(args))
        target_dir = Path(self.compose_dirs_config['compose_dir']) / app / target_env
        cmd = self.dc + ['exec']
        if args.execuser:
            self.check_right(Right.EXEC_USER, app=app, env=target_env)
            cmd.extend(['-u', args.execuser])
        if args.execenvs:
            for kv in args.execenvs:
                cmd.extend(['-e', kv[0]])
        if args.execworkdir:
            cmd.extend(['-w', args.execworkdir])
        cmd.append(args.service)
        cmd.extend(args.args)
        run(cmd, cwd=target_dir)

    def action_list_users(self, args):
        self.check_right(Right.USER_LIST)
        self.trace_action('users-list', vars(args))
        users = sorted([f for f in self.users_dir.iterdir() if f.is_file()])
        for user in users:
            fingerprint = run(['ssh-keygen', '-l', '-f', str(user)], capture_output=True, text=True).stdout.strip()
            print(f"{user.name.ljust(19)} {fingerprint.split(' ')[1]}")

    def action_add_user(self, args):
        if not list(self.users_dir.iterdir()):
            # special case with no user
            # the call should therefore be local and not over ssh
            first_user = True
        else:
            first_user = False
            self.check_right(Right.USER_ADD)
        user = args.user
        pkey = args.pkey
        user_pkey_path = self.users_dir / user
        if user_pkey_path.exists():
            raise ValueError(f"user '{user}' already exists")
        self.trace_action('users-add', vars(args))
        with open(user_pkey_path, 'w', encoding='utf8') as f:
            f.write(pkey)
        if first_user:
            rights = {right: ('.*', '.*') for right in Right.names()}
            self._update_user_rights(user, rights)

    def action_list_pkeys(self, args):
        user = args.user
        if user != self.get_ssh_user():
            self.check_right(Right.USER_LIST)
        user_pkey_path = self.users_dir / user
        if not user_pkey_path.exists():
            raise ValueError(f"user '{user}' does not exist")
        self.trace_action('users-listkeys', vars(args))
        print(user_pkey_path.open(encoding='utf8').read())

    def action_add_pkey(self, args):
        user = args.user
        pkey = args.pkey
        if user != self.get_ssh_user():
            self.check_right(Right.USER_CHANGE)
        user_pkey_path = self.users_dir / user
        if not user_pkey_path.exists():
            raise ValueError(f"user '{user}' does not exist")
        self.trace_action('users-addkey', vars(args))
        pkeys = set(user_pkey_path.open(encoding='utf8').read().split('\n'))
        pkeys.add(pkey)
        with open(user_pkey_path, 'w', encoding='utf8') as f:
            f.write('\n'.join(pkeys))

    def action_delete_pkey(self, args):
        user = args.user
        pkey = args.pkey
        if user != self.get_ssh_user():
            self.check_right(Right.USER_CHANGE)
        user_pkey_path = self.users_dir / user
        if not user_pkey_path.exists():
            raise ValueError(f"user '{user}' does not exist")
        self.trace_action('users-delkey', vars(args))
        pkeys = set(user_pkey_path.open(encoding='utf8').read().split('\n')) - set((pkey, ))
        with open(user_pkey_path, 'w', encoding='utf8') as f:
            f.write('\n'.join(pkeys))

    def action_delete_user(self, args):
        self.check_right(Right.USER_DELETE)
        user = args.user
        user_pkey_path = self.users_dir / user
        if not user_pkey_path.exists():
            raise ValueError(f"user '{user}' does not exist")
        self.trace_action('users-delete', vars(args))
        user_pkey_path.unlink()

    def _update_user_rights(self, user, rights):
        users = [f.name for f in self.users_dir.iterdir() if f.is_file()]
        if user in users:
            with open(self.rights_dir / user, 'w', encoding='utf8') as f:
                for name, (env_regex, app_regex) in rights.items():
                    f.write(f'{name} {env_regex} {app_regex}\n')

    def action_list_pubkeys(self, args):
        self.check_right(Right.PKEY_LIST)
        pubkey_files = sorted([f for f in self.pubkeys_dir.iterdir() if f.is_file() and f.suffix == '.pem'])
        self.trace_action('pubkeys-list', vars(args))
        for pubkey_file in pubkey_files:
            print(pubkey_files.stem)

    def action_add_pubkey(self, args):
        self.check_right(Right.PKEY_ADD)
        if not args.pubkey:
            raise ValueError(f"pubkey '{args.pubkey}' is not valid")
        header = '-----BEGIN PUBLIC KEY-----'
        content = '\n'.join(args.pubkey[i:i + 64] for i in range(0, len(args.pubkey), 64))
        footer = '-----END PUBLIC KEY-----'
        pubkey = f'{header}\n{content}\n{footer}\n'
        pubkey_file = self.pubkeys_dir / (sha256(content.encode('ascii')).hexdigest() + '.pem')
        with open(pubkey_file, 'w', encoding='utf8') as f:
            f.write(pubkey)
        self.trace_action('pubkeys-add', vars(args))

    def action_delete_pubkey(self, args):
        self.check_right(Right.PKEY_DELETE)
        pubkey_file = self.pubkeys_dir / (args.name + '.pem')
        if pubkey_file.exists():
            pubkey_file.unlink()
        self.trace_action('pubkeys-delete', vars(args))

    def action_list_rights(self, args):
        verbose = args.verbose
        if verbose:
            app = target_env = None
        else:
            app = args.app
            target_env = args.env
        if app:
            self._check_app_name(app)
        if target_env:
            self._check_target_env(target_env)
        user = args.user or self.get_ssh_user()
        if user != self.get_ssh_user():
            self.check_right(Right.RIGHT_LIST)
        rights = self.get_rights(user, target_env, app)
        self.trace_action('rights-list', vars(args))
        if verbose:
            for name, (env_regex, app_regex) in sorted(rights.items()):
                print(f'{name.ljust(19)} {env_regex.ljust(39)} {app_regex}')
        else:
            print('\n'.join(sorted(rights)))

    def action_add_rights(self, args):
        self.check_right(Right.RIGHT_ADD)
        user = args.user
        new_rights = args.rights
        rights = self.get_rights(user)
        self.trace_action('rights-add', vars(args))
        for right in new_rights:
            name, env_regex, app_regex = right.split(',') if ',' in right else (right, '', '')
            if name in Right.names():
                rights[name] = [env_regex or '.*', app_regex or '.*']
        self._update_user_rights(user, rights)

    def action_delete_rights(self, args):
        self.check_right(Right.RIGHT_DELETE)
        user = args.user
        rights_to_delete = args.rights
        rights = self.get_rights(user)
        self.trace_action('rights-delete', vars(args))
        for name in rights_to_delete:
            del rights[name]
        self._update_user_rights(user, rights)


if __name__ == '__main__':
    check_python()
    capp = CApp()
    capp.run_action()
