diff --git a/changelog/.template.jinja b/changelog/.template.jinja index 0cf429a3b182..c0bd12b0d7f7 100644 --- a/changelog/.template.jinja +++ b/changelog/.template.jinja @@ -4,7 +4,14 @@ ### {{ definitions[category]['name'] }} {% for text, values in sections[""][category].items() %} -- {{ text }} {{ values|join(', ') }} +{% set lines = text.split('\n') %} +{% for line in lines %} +{% if line.startswith('- ') %} +{{ line | trim }} {{ values|join(', ') }} +{% else %} +- {{ line | trim }} {{ values|join(', ') }} +{% endif %} +{% endfor %} {% endfor %} {% endfor %} diff --git a/changelog/67799.added.md b/changelog/67799.added.md new file mode 100644 index 000000000000..db7ccaed9008 --- /dev/null +++ b/changelog/67799.added.md @@ -0,0 +1,3 @@ +refactored server-side PKI to support cache interface +optimization: check_compound_minions: defer _pki_minions fetch +refactor: push salt.utils.minions bits into salt.key / optimize matching diff --git a/doc/ref/cache/all/index.rst b/doc/ref/cache/all/index.rst index 7976f3e853bd..4081bf54e1c8 100644 --- a/doc/ref/cache/all/index.rst +++ b/doc/ref/cache/all/index.rst @@ -15,5 +15,6 @@ For understanding and usage of the cache modules see the :ref:`cache` topic. consul etcd_cache localfs + localfs_key mysql_cache redis_cache diff --git a/doc/ref/cache/all/salt.cache.localfs_key.rst b/doc/ref/cache/all/salt.cache.localfs_key.rst new file mode 100644 index 000000000000..d40919480e50 --- /dev/null +++ b/doc/ref/cache/all/salt.cache.localfs_key.rst @@ -0,0 +1,5 @@ +salt.cache.localfs_key +================================= + +.. automodule:: salt.cache.localfs_key + :members: diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index 64dcef30257a..f00e84ba33a0 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -30,6 +30,7 @@ import salt.utils.network import salt.utils.user import salt.utils.versions +from salt.utils.decorators import cached_property log = logging.getLogger(__name__) @@ -60,7 +61,11 @@ def __init__(self, opts, ckminions=None): self.max_fail = 1.0 self.auth = salt.loader.auth(opts) self.tokens = salt.loader.eauth_tokens(opts) - self.ckminions = ckminions or salt.utils.minions.CkMinions(opts) + self._ckminions = ckminions + + @cached_property + def ckminions(self): + return self._ckminions or salt.utils.minions.CkMinions(self.opts) def load_name(self, load): """ diff --git a/salt/cache/__init__.py b/salt/cache/__init__.py index fb2ecdc62f14..8b0389d313d7 100644 --- a/salt/cache/__init__.py +++ b/salt/cache/__init__.py @@ -62,7 +62,9 @@ def __init__(self, opts, cachedir=None, **kwargs): self.cachedir = opts.get("cachedir", salt.syspaths.CACHE_DIR) else: self.cachedir = cachedir - self.driver = opts.get("cache", salt.config.DEFAULT_MASTER_OPTS["cache"]) + self.driver = kwargs.get( + "driver", opts.get("cache", salt.config.DEFAULT_MASTER_OPTS["cache"]) + ) self._modules = None self._kwargs = kwargs self._kwargs["cachedir"] = self.cachedir diff --git a/salt/cache/localfs_key.py b/salt/cache/localfs_key.py new file mode 100644 index 000000000000..9ff7e440e275 --- /dev/null +++ b/salt/cache/localfs_key.py @@ -0,0 +1,387 @@ +""" +Backward compatible shim layer for pki interaction + +.. versionadded:: 3008.0 + +The ``localfs_key`` is a shim driver meant to allow the salt.cache +subsystem to interact with the existing master pki folder/file structure +without any migration from previous versions of salt. It is not meant for +general purpose use and should not be used outside of the master auth system. + +The main difference from before is the 'state' of the key, ie accepted/rejected +is now stored in the data itself, as opposed to the cache equivalent of a bank +previously. + +store and fetch handle ETL from new style, where data itself contains key +state, to old style, where folder and/or bank contain state. +flush/list/contains/updated are left as nearly equivalent to localfs, without +the .p file extension to work with legacy keys via banks. +""" + +import errno +import logging +import os +import os.path +import shutil +import stat +import tempfile +from pathlib import Path + +import salt.utils.atomicfile +import salt.utils.files +import salt.utils.stringutils +from salt.exceptions import SaltCacheError +from salt.utils.verify import clean_path, valid_id + +log = logging.getLogger(__name__) + +__func_alias__ = {"list_": "list"} + + +BASE_MAPPING = { + "minions_pre": "pending", + "minions_rejected": "rejected", + "minions": "accepted", + "minions_denied": "denied", +} + +# master_keys keys that if fetched, even with cluster_id set, will still refer +# to pki_dir instead of cluster_pki_dir +NON_CLUSTERED_MASTER_KEYS = [] + + +# we explicitly override cache dir to point to pki here +def init_kwargs(kwargs): + """ + setup kwargs for cache functions + """ + if __opts__["__role"] != "minion": + global NON_CLUSTERED_MASTER_KEYS + NON_CLUSTERED_MASTER_KEYS = [ + "master.pem", + "master.pub", + f"{__opts__['master_sign_key_name']}.pem", + f"{__opts__['master_sign_key_name']}.pub", + f"{__opts__['id'].removesuffix('_master')}.pub", + f"{__opts__['id'].removesuffix('_master')}.pem", + __opts__.get( + "master_pubkey_signature", f"{__opts__['id']}_pubkey_signature" + ), + ] + + if "pki_dir" in kwargs: + pki_dir = kwargs["pki_dir"] + elif __opts__.get("cluster_id"): + pki_dir = __opts__["cluster_pki_dir"] + else: + pki_dir = __opts__["pki_dir"] + + user = kwargs.get("user", __opts__.get("user")) + + return {"cachedir": pki_dir, "user": user} + + +def store(bank, key, data, cachedir, user, **kwargs): + """ + Store key state information. storing a accepted/pending/rejected state + means clearing it from the other 2. denied is handled separately + """ + if bank in ["keys", "denied_keys"] and not valid_id(__opts__, key): + raise SaltCacheError(f"key {key} is not a valid minion_id") + + if bank not in ["keys", "denied_keys", "master_keys"]: + raise SaltCacheError(f"Unrecognized bank: {bank}") + + if __opts__["permissive_pki_access"]: + umask = 0o0700 + else: + umask = 0o0750 + + if bank == "keys": + if data["state"] == "rejected": + base = "minions_rejected" + elif data["state"] == "pending": + base = "minions_pre" + elif data["state"] == "accepted": + base = "minions" + else: + raise SaltCacheError("Unrecognized data/bank: {}".format(data["state"])) + data = data["pub"] + elif bank == "denied_keys": + # denied keys is a list post migration, but is a single key in legacy + data = data[0] + base = "minions_denied" + elif bank == "master_keys": + # private keys are separate from permissive_pki_access + umask = 0o277 + base = "" + # even in clustered mode, master and signing keys live in the + # non-clustered pki dir + if key in NON_CLUSTERED_MASTER_KEYS: + cachedir = __opts__["pki_dir"] + + savefn = Path(cachedir) / base / key + base = savefn.parent + + if not clean_path(cachedir, str(savefn), subdir=True): + raise SaltCacheError(f"key {key} is not a valid key path.") + + try: + os.makedirs(base) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise SaltCacheError( + f"The cache directory, {base}, could not be created: {exc}" + ) + + # delete current state before re-serializing new state + flush(bank, key, cachedir, **kwargs) + + tmpfh, tmpfname = tempfile.mkstemp(dir=base) + os.close(tmpfh) + + if user: + try: + import pwd + + uid = pwd.getpwnam(user).pw_uid + os.chown(tmpfname, uid, -1) + except (KeyError, ImportError, OSError): + # The specified user was not found, allow the backup systems to + # report the error + pass + + try: + with salt.utils.files.set_umask(umask): + with salt.utils.files.fopen(tmpfname, "w+b") as fh_: + fh_.write(salt.utils.stringutils.to_bytes(data)) + + if bank == "master_keys": + os.chmod(tmpfname, 0o400) + + # On Windows, os.rename will fail if the destination file exists. + salt.utils.atomicfile.atomic_rename(tmpfname, savefn) + except OSError as exc: + raise SaltCacheError( + f"There was an error writing the cache file, base={base}: {exc}" + ) + + +def fetch(bank, key, cachedir, **kwargs): + """ + Fetch and construct state data for a given minion based on the bank and id + """ + if bank in ["keys", "denied_keys"] and not valid_id(__opts__, key): + raise SaltCacheError(f"key {key} is not a valid minion_id") + + if bank not in ["keys", "denied_keys", "master_keys"]: + raise SaltCacheError(f"Unrecognized bank: {bank}") + + if not clean_path(cachedir, key, subdir=True): + raise SaltCacheError(f"key {key} is not a valid key path.") + + if key == ".key_cache": + raise SaltCacheError("trying to read key_cache, there is a bug at call-site") + try: + if bank == "keys": + for state, bank in [ + ("rejected", "minions_rejected"), + ("pending", "minions_pre"), + ("accepted", "minions"), + ]: + keyfile = Path(cachedir, bank, key) + + if not clean_path(cachedir, str(keyfile), subdir=True): + raise SaltCacheError(f"key {key} is not a valid key path.") + + if keyfile.is_file() and not keyfile.is_symlink(): + with salt.utils.files.fopen(keyfile, "r") as fh_: + return {"state": state, "pub": fh_.read()} + return None + elif bank == "denied_keys": + # there can be many denied keys per minion post refactor, but only 1 + # with the filesystem, so return a list of 1 + pubfn_denied = os.path.join(cachedir, "minions_denied", key) + + if not clean_path(cachedir, pubfn_denied, subdir=True): + raise SaltCacheError(f"key {key} is not a valid key path.") + + if os.path.isfile(pubfn_denied): + with salt.utils.files.fopen(pubfn_denied, "r") as fh_: + return [fh_.read()] + elif bank == "master_keys": + if key in NON_CLUSTERED_MASTER_KEYS: + cachedir = __opts__["pki_dir"] + + keyfile = Path(cachedir, key) + + if not clean_path(cachedir, str(keyfile), subdir=True): + raise SaltCacheError(f"key {key} is not a valid key path.") + + if keyfile.is_file() and not keyfile.is_symlink(): + with salt.utils.files.fopen(keyfile, "r") as fh_: + return fh_.read() + else: + raise SaltCacheError(f'unrecognized bank "{bank}"') + except OSError as exc: + raise SaltCacheError( + 'There was an error reading the cache bank "{}", key "{}": {}'.format( + bank, key, exc + ) + ) + + +def updated(bank, key, cachedir, **kwargs): + """ + Return the epoch of the mtime for this cache file + """ + if not valid_id(__opts__, key): + raise SaltCacheError(f"key {key} is not a valid minion_id") + + if bank == "keys": + bases = [base for base in BASE_MAPPING if base != "minions_denied"] + elif bank == "denied_keys": + bases = ["minions_denied"] + elif bank == "master_keys": + if key in NON_CLUSTERED_MASTER_KEYS: + cachedir = __opts__["pki_dir"] + bases = [""] + else: + raise SaltCacheError(f"Unrecognized bank: {bank}") + + for dir in bases: + keyfile = Path(cachedir, dir, key) + + if not clean_path(cachedir, str(keyfile), subdir=True): + raise SaltCacheError(f"key {key} is not a valid key path.") + + if keyfile.is_file() and not keyfile.is_symlink(): + try: + return int(os.path.getmtime(keyfile)) + except OSError as exc: + raise SaltCacheError( + 'There was an error reading the mtime for "{}": {}'.format( + keyfile, exc + ) + ) + log.debug('pki file "%s" does not exist in accepted/rejected/pending', key) + return + + +def flush(bank, key=None, cachedir=None, **kwargs): + """ + Remove the key from the cache bank with all the key content. + flush can take a legacy bank or a keys/denied_keys modern bank + """ + if bank in ["keys", "denied_keys"] and not valid_id(__opts__, key): + raise SaltCacheError(f"key {key} is not a valid minion_id") + + if cachedir is None: + raise SaltCacheError("cachedir missing") + + if bank == "keys": + bases = [base for base in BASE_MAPPING if base != "minions_denied"] + elif bank == "denied_keys": + bases = ["minions_denied"] + elif bank == "master_keys": + if key in NON_CLUSTERED_MASTER_KEYS: + cachedir = __opts__["pki_dir"] + bases = [""] + else: + raise SaltCacheError(f"Unrecognized bank: {bank}") + + flushed = False + + for base in bases: + try: + if key is None: + target = os.path.join(cachedir, base) + if not os.path.isdir(target): + return False + shutil.rmtree(target) + else: + target = os.path.join(cachedir, base, key) + + if not clean_path(cachedir, target, subdir=True): + raise SaltCacheError(f"key {key} is not a valid key path.") + + if not os.path.isfile(target): + continue + + # necessary on windows, otherwise PermissionError: [WinError 5] Access is denied + os.chmod(target, stat.S_IWRITE) + + os.remove(target) + flushed = True + except OSError as exc: + if exc.errno != errno.ENOENT: + raise SaltCacheError(f'There was an error removing "{target}": {exc}') + return flushed + + +def list_(bank, cachedir, **kwargs): + """ + Return an iterable object containing all entries stored in the specified bank. + """ + if bank == "keys": + bases = [base for base in BASE_MAPPING if base != "minions_denied"] + elif bank == "denied_keys": + bases = ["minions_denied"] + elif bank == "master_keys": + bases = [""] + else: + raise SaltCacheError(f"Unrecognized bank: {bank}") + + ret = [] + for base in bases: + base = os.path.join(cachedir, os.path.normpath(base)) + if not os.path.isdir(base): + continue + try: + items = os.listdir(base) + except OSError as exc: + raise SaltCacheError( + f'There was an error accessing directory "{base}": {exc}' + ) + for item in items: + # salt foolishly dumps a file here for key cache, ignore it + keyfile = Path(cachedir, base, item) + + if ( + bank in ["keys", "denied_keys"] and not valid_id(__opts__, item) + ) or not clean_path(cachedir, str(keyfile), subdir=True): + log.error("saw invalid id %s, discarding", item) + + if keyfile.is_file() and not keyfile.is_symlink(): + ret.append(item) + return ret + + +def contains(bank, key, cachedir, **kwargs): + """ + Checks if the specified bank contains the specified key. + """ + if bank in ["keys", "denied_keys"] and not valid_id(__opts__, key): + raise SaltCacheError(f"key {key} is not a valid minion_id") + + if bank == "keys": + bases = [base for base in BASE_MAPPING if base != "minions_denied"] + elif bank == "denied_keys": + bases = ["minions_denied"] + elif bank == "master_keys": + if key in NON_CLUSTERED_MASTER_KEYS: + cachedir = __opts__["pki_dir"] + bases = [""] + else: + raise SaltCacheError(f"Unrecognized bank: {bank}") + + for base in bases: + keyfile = Path(cachedir, base, key) + + if not clean_path(cachedir, str(keyfile), subdir=True): + raise SaltCacheError(f"key {key} is not a valid key path.") + + if keyfile.is_file() and not keyfile.is_symlink(): + return True + + return False diff --git a/salt/channel/client.py b/salt/channel/client.py index b03f4659a17d..38edec74980f 100644 --- a/salt/channel/client.py +++ b/salt/channel/client.py @@ -18,7 +18,6 @@ import salt.transport.frame import salt.utils.event import salt.utils.files -import salt.utils.minions import salt.utils.stringutils import salt.utils.verify import salt.utils.versions @@ -204,7 +203,7 @@ def crypted_transfer_decode_dictentry( timeout, ) key = self.auth.get_keys() - if "key" not in ret: + if not isinstance(ret, dict) or "key" not in ret: # Reauth in the case our key is deleted on the master side. yield self.auth.authenticate() ret = yield self._send_with_retry( @@ -235,7 +234,7 @@ def crypted_transfer_decode_dictentry( raise tornado.gen.Return(data["pillar"]) def verify_signature(self, data, sig): - return salt.crypt.PublicKey(self.master_pubkey_path).verify( + return salt.crypt.PublicKey.from_file(self.master_pubkey_path).verify( data, sig, self.opts["signing_algorithm"] ) diff --git a/salt/channel/server.py b/salt/channel/server.py index 0872dda89cc5..914fc1a7debf 100644 --- a/salt/channel/server.py +++ b/salt/channel/server.py @@ -10,22 +10,19 @@ import hashlib import logging import os -import pathlib -import shutil import tornado.gen +import salt.cache import salt.crypt import salt.master import salt.payload import salt.transport.frame import salt.utils.channel import salt.utils.event -import salt.utils.files import salt.utils.minions import salt.utils.platform import salt.utils.stringutils -import salt.utils.verify from salt.exceptions import SaltDeserializationError, UnsupportedAlgorithm from salt.utils.cache import CacheCli @@ -57,6 +54,7 @@ def compare_keys(cls, key1, key2): def __init__(self, opts, transport): self.opts = opts self.transport = transport + self.cache = salt.cache.Cache(opts, driver=self.opts["keys.cache_driver"]) self.event = salt.utils.event.get_master_event( self.opts, self.opts["sock_dir"], listen=False ) @@ -207,19 +205,27 @@ def _encrypt_private( The server equivalent of ReqChannel.crypted_transfer_decode_dictentry """ # encrypt with a specific AES key - if self.master_key.cluster_key: - pubfn = os.path.join(self.opts["cluster_pki_dir"], "minions", target) - else: - pubfn = os.path.join(self.opts["pki_dir"], "minions", target) - key = salt.crypt.Crypticle.generate_key_string() - pcrypt = salt.crypt.Crypticle(self.opts, key) try: - pub = salt.crypt.PublicKey(pubfn) - except (ValueError, IndexError, TypeError): + key = salt.crypt.Crypticle.generate_key_string() + pcrypt = salt.crypt.Crypticle(self.opts, key) + pub = self.cache.fetch("keys", target) + if not isinstance(pub, dict) or "pub" not in pub: + log.error( + "No pub key found for target %s, its pub key was likely deleted mid-request.", + target, + ) + return self.crypticle.dumps({}) + + pub = salt.crypt.PublicKey.from_str(pub["pub"]) + except Exception as exc: # pylint: disable=broad-except + log.error( + 'Corrupt or missing public key "%s": %s', + target, + exc, + exc_info_on_loglevel=logging.DEBUG, + ) return self.crypticle.dumps({}) - except OSError: - log.error("AES key not found") - return {"error": "AES key not found"} + pret = {} pret["key"] = pub.encrypt(key, encryption_algorithm) if ret is False: @@ -232,9 +238,7 @@ def _encrypt_private( ) signed_msg = { "data": tosign, - "sig": salt.crypt.PrivateKey(self.master_key.rsa_path).sign( - tosign, algorithm=signing_algorithm - ), + "sig": self.master_key.sign(tosign, algorithm=signing_algorithm), } pret[dictkey] = pcrypt.dumps(signed_msg) else: @@ -247,9 +251,7 @@ def _clear_signed(self, load, algorithm): return { "enc": "clear", "load": tosign, - "sig": salt.crypt.PrivateKey(self.master_key.rsa_path).sign( - tosign, algorithm=algorithm - ), + "sig": self.master_key.sign(tosign, algorithm=algorithm), } except UnsupportedAlgorithm: log.info( @@ -328,6 +330,8 @@ def _auth(self, load, sign_messages=False): else: return {"enc": "clear", "load": {"ret": False}} log.info("Authentication request from %s", load["id"]) + # remove any trailing whitespace + load["pub"] = load["pub"].strip() # 0 is default which should be 'unlimited' if self.opts["max_minions"] > 0: @@ -371,26 +375,29 @@ def _auth(self, load, sign_messages=False): else: return {"enc": "clear", "load": {"ret": "full"}} - pki_dir = self.opts["pki_dir"] - if self.opts["cluster_id"]: - if self.opts["cluster_pki_dir"]: - pki_dir = self.opts["cluster_pki_dir"] - # Check if key is configured to be auto-rejected/signed auto_reject = self.auto_key.check_autoreject(load["id"]) auto_sign = self.auto_key.check_autosign( load["id"], load.get("autosign_grains", None) ) - pubfn = os.path.join(pki_dir, "minions", load["id"]) - pubfn_pend = os.path.join(pki_dir, "minions_pre", load["id"]) - pubfn_rejected = os.path.join(pki_dir, "minions_rejected", load["id"]) - pubfn_denied = os.path.join(pki_dir, "minions_denied", load["id"]) + # key will be a dict of str and state + # state can be one of pending, rejected, accepted + key = self.cache.fetch("keys", load["id"]) + + # although keys should be always newline stripped in current state of auth.py + # older salt versions may have written pub-keys with trailing whitespace + if key and "pub" in key: + key["pub"] = key["pub"].strip() + + # any number of keys can be denied for a given minion_id regardless of above + denied = self.cache.fetch("denied_keys", load["id"]) or [] + if self.opts["open_mode"]: # open mode is turned on, nuts to checks and overwrite whatever # is there pass - elif os.path.isfile(pubfn_rejected): + elif key and key["state"] == "rejected": # The key has been rejected, don't place it in pending log.info( "Public key rejected for %s. Key is present in rejection key dir.", @@ -405,42 +412,26 @@ def _auth(self, load, sign_messages=False): ) else: return {"enc": "clear", "load": {"ret": False}} - elif os.path.isfile(pubfn): + elif key and key["state"] == "accepted": # The key has been accepted, check it - with salt.utils.files.fopen(pubfn, "r") as pubfn_handle: - if not self.compare_keys(pubfn_handle.read(), load["pub"]): - log.error( - "Authentication attempt from %s failed, the public " - "keys did not match. This may be an attempt to compromise " - "the Salt cluster.", - load["id"], - ) - # put denied minion key into minions_denied - with salt.utils.files.fopen(pubfn_denied, "w+") as fp_: - fp_.write(load["pub"]) - eload = { - "result": False, - "id": load["id"], - "act": "denied", - "pub": load["pub"], - } - if self.opts.get("auth_events") is True: - self.event.fire_event( - eload, salt.utils.event.tagify(prefix="auth") - ) - if sign_messages: - return self._clear_signed( - {"ret": False, "nonce": load["nonce"]}, sig_algo - ) - else: - return {"enc": "clear", "load": {"ret": False}} + if not self.compare_keys(key["pub"], load["pub"]): + log.error( + "Authentication attempt from %s failed, the public " + "keys did not match. This may be an attempt to compromise " + "the Salt cluster.", + load["id"], + ) + # put denied minion key into minions_denied + if load["pub"] not in denied: + denied.append(load["pub"]) + self.cache.store("denied_keys", load["id"], denied) - elif not os.path.isfile(pubfn_pend): - # The key has not been accepted, this is a new minion - if os.path.isdir(pubfn_pend): - # The key path is a directory, error out - log.info("New public key %s is a directory", load["id"]) - eload = {"result": False, "id": load["id"], "pub": load["pub"]} + eload = { + "result": False, + "id": load["id"], + "act": "denied", + "pub": load["pub"], + } if self.opts.get("auth_events") is True: self.event.fire_event(eload, salt.utils.event.tagify(prefix="auth")) if sign_messages: @@ -450,27 +441,28 @@ def _auth(self, load, sign_messages=False): else: return {"enc": "clear", "load": {"ret": False}} + elif not key: + # The key has not been accepted, this is a new minion if auto_reject: - key_path = pubfn_rejected log.info( "New public key for %s rejected via autoreject_file", load["id"] ) + key = {"pub": load["pub"], "state": "rejected"} + self.cache.store("keys", load["id"], key) key_act = "reject" key_result = False elif not auto_sign: - key_path = pubfn_pend log.info("New public key for %s placed in pending", load["id"]) + key = {"pub": load["pub"], "state": "pending"} + self.cache.store("keys", load["id"], key) key_act = "pend" key_result = True else: # The key is being automatically accepted, don't do anything # here and let the auto accept logic below handle it. - key_path = None + key_result = None - if key_path is not None: - # Write the key to the appropriate location - with salt.utils.files.fopen(key_path, "w+") as fp_: - fp_.write(load["pub"]) + if key_result is not None: eload = { "result": key_result, "act": key_act, @@ -487,16 +479,14 @@ def _auth(self, load, sign_messages=False): else: return {"enc": "clear", "load": {"ret": key_result}} - elif os.path.isfile(pubfn_pend): + elif key and key["state"] == "pending": # This key is in the pending dir and is awaiting acceptance if auto_reject: # We don't care if the keys match, this minion is being # auto-rejected. Move the key file from the pending dir to the # rejected dir. - try: - shutil.move(pubfn_pend, pubfn_rejected) - except OSError: - pass + key["state"] = "rejected" + self.cache.store("keys", load["id"], key) log.info( "Pending public key for %s rejected via autoreject_file", load["id"], @@ -521,87 +511,84 @@ def _auth(self, load, sign_messages=False): # Check if the keys are the same and error out if this is the # case. Otherwise log the fact that the minion is still # pending. - with salt.utils.files.fopen(pubfn_pend, "r") as pubfn_handle: - if not self.compare_keys(pubfn_handle.read(), load["pub"]): - log.error( - "Authentication attempt from %s failed, the public " - "key in pending did not match. This may be an " - "attempt to compromise the Salt cluster.", - load["id"], + if not self.compare_keys(key["pub"], load["pub"]): + log.error( + "Authentication attempt from %s failed, the public " + "key in pending did not match. This may be an " + "attempt to compromise the Salt cluster.", + load["id"], + ) + # put denied minion key into minions_denied + if load["pub"] not in denied: + denied.append(load["pub"]) + self.cache.store("denied_keys", load["id"], denied) + eload = { + "result": False, + "id": load["id"], + "act": "denied", + "pub": load["pub"], + } + if self.opts.get("auth_events") is True: + self.event.fire_event( + eload, salt.utils.event.tagify(prefix="auth") + ) + if sign_messages: + return self._clear_signed( + {"ret": False, "nonce": load["nonce"]}, sig_algo ) - # put denied minion key into minions_denied - with salt.utils.files.fopen(pubfn_denied, "w+") as fp_: - fp_.write(load["pub"]) - eload = { - "result": False, - "id": load["id"], - "act": "denied", - "pub": load["pub"], - } - if self.opts.get("auth_events") is True: - self.event.fire_event( - eload, salt.utils.event.tagify(prefix="auth") - ) - if sign_messages: - return self._clear_signed( - {"ret": False, "nonce": load["nonce"]}, sig_algo - ) - else: - return {"enc": "clear", "load": {"ret": False}} else: - log.info( - "Authentication failed from host %s, the key is in " - "pending and needs to be accepted with salt-key " - "-a %s", - load["id"], - load["id"], + return {"enc": "clear", "load": {"ret": False}} + else: + log.info( + "Authentication failed from host %s, the key is in " + "pending and needs to be accepted with salt-key " + "-a %s", + load["id"], + load["id"], + ) + eload = { + "result": True, + "act": "pend", + "id": load["id"], + "pub": load["pub"], + } + if self.opts.get("auth_events") is True: + self.event.fire_event( + eload, salt.utils.event.tagify(prefix="auth") ) - eload = { - "result": True, - "act": "pend", - "id": load["id"], - "pub": load["pub"], - } - if self.opts.get("auth_events") is True: - self.event.fire_event( - eload, salt.utils.event.tagify(prefix="auth") - ) - if sign_messages: - return self._clear_signed( - {"ret": True, "nonce": load["nonce"]}, sig_algo - ) - else: - return {"enc": "clear", "load": {"ret": True}} + if sign_messages: + return self._clear_signed( + {"ret": True, "nonce": load["nonce"]}, sig_algo + ) + else: + return {"enc": "clear", "load": {"ret": True}} else: # This key is in pending and has been configured to be # auto-signed. Check to see if it is the same key, and if # so, pass on doing anything here, and let it get automatically # accepted below. - with salt.utils.files.fopen(pubfn_pend, "r") as pubfn_handle: - if not self.compare_keys(pubfn_handle.read(), load["pub"]): - log.error( - "Authentication attempt from %s failed, the public " - "keys in pending did not match. This may be an " - "attempt to compromise the Salt cluster.", - load["id"], + if not self.compare_keys(key["pub"], load["pub"]): + log.error( + "Authentication attempt from %s failed, the public " + "keys in pending did not match. This may be an " + "attempt to compromise the Salt cluster.", + load["id"], + ) + # put denied minion key into minions_denied + if load["pub"] not in denied: + denied.append(load["pub"]) + self.cache.store("denied_keys", load["id"], denied) + eload = {"result": False, "id": load["id"], "pub": load["pub"]} + if self.opts.get("auth_events") is True: + self.event.fire_event( + eload, salt.utils.event.tagify(prefix="auth") + ) + if sign_messages: + return self._clear_signed( + {"ret": False, "nonce": load["nonce"]}, sig_algo ) - # put denied minion key into minions_denied - with salt.utils.files.fopen(pubfn_denied, "w+") as fp_: - fp_.write(load["pub"]) - eload = {"result": False, "id": load["id"], "pub": load["pub"]} - if self.opts.get("auth_events") is True: - self.event.fire_event( - eload, salt.utils.event.tagify(prefix="auth") - ) - if sign_messages: - return self._clear_signed( - {"ret": False, "nonce": load["nonce"]}, sig_algo - ) - else: - return {"enc": "clear", "load": {"ret": False}} else: - os.remove(pubfn_pend) - + return {"enc": "clear", "load": {"ret": False}} else: # Something happened that I have not accounted for, FAIL! log.warning("Unaccounted for authentication failure") @@ -616,20 +603,16 @@ def _auth(self, load, sign_messages=False): return {"enc": "clear", "load": {"ret": False}} log.info("Authentication accepted from %s", load["id"]) + # only write to disk if you are adding the file, and in open mode, # which implies we accept any key from a minion. - if not os.path.isfile(pubfn) and not self.opts["open_mode"]: - with salt.utils.files.fopen(pubfn, "w+") as fp_: - fp_.write(load["pub"]) + if (not key or key["state"] != "accepted") and not self.opts["open_mode"]: + key = {"pub": load["pub"], "state": "accepted"} + self.cache.store("keys", load["id"], key) elif self.opts["open_mode"]: - disk_key = "" - if os.path.isfile(pubfn): - with salt.utils.files.fopen(pubfn, "r") as fp_: - disk_key = fp_.read() - if load["pub"] and load["pub"] != disk_key: - log.debug("Host key change detected in open mode.") - with salt.utils.files.fopen(pubfn, "w+") as fp_: - fp_.write(load["pub"]) + if load["pub"] and (not key or load["pub"] != key["pub"]): + key = {"pub": load["pub"], "state": "accepted"} + self.cache.store("keys", load["id"], key) elif not load["pub"]: log.error("Public key is empty: %s", load["id"]) if sign_messages: @@ -648,9 +631,14 @@ def _auth(self, load, sign_messages=False): # The key payload may sometimes be corrupt when using auto-accept # and an empty request comes in try: - pub = salt.crypt.PublicKey(pubfn) + pub = salt.crypt.PublicKey.from_str(key["pub"]) except salt.crypt.InvalidKeyError as err: - log.error('Corrupt public key "%s": %s', pubfn, err) + log.error( + 'Corrupt or missing public key "%s": %s', + load["id"], + err, + exc_info_on_loglevel=logging.DEBUG, + ) if sign_messages: return self._clear_signed( {"ret": False, "nonce": load["nonce"]}, sig_algo @@ -668,31 +656,23 @@ def _auth(self, load, sign_messages=False): # sent to the minion that was just authenticated if self.opts["master_sign_pubkey"]: # append the pre-computed signature to the auth-reply - if self.master_key.pubkey_signature(): + if self.master_key.pubkey_signature: log.debug("Adding pubkey signature to auth-reply") - log.debug(self.master_key.pubkey_signature()) - ret.update({"pub_sig": self.master_key.pubkey_signature()}) + log.debug(self.master_key.pubkey_signature) + ret.update({"pub_sig": self.master_key.pubkey_signature}) else: # the master has its own signing-keypair, compute the master.pub's # signature and append that to the auth-reply - - # get the key_pass for the signing key - key_pass = salt.utils.sdb.sdb_get( - self.opts["signing_key_pass"], self.opts - ) log.debug("Signing master public key before sending") - pub_sign = salt.crypt.sign_message( - self.master_key.get_sign_paths()[1], - ret["pub_key"], - key_pass, - algorithm=sig_algo, + pub_sign = self.master_key.sign_key.sign( + ret["pub_key"], algorithm=sig_algo ) ret.update({"pub_sig": binascii.b2a_base64(pub_sign)}) if self.opts["auth_mode"] >= 2: if "token" in load: try: - mtoken = self.master_key.key.decrypt(load["token"], enc_algo) + mtoken = self.master_key.decrypt(load["token"], enc_algo) aes = "{}_|-{}".format( salt.master.SMaster.secrets["aes"]["secret"].value, mtoken ) @@ -714,7 +694,7 @@ def _auth(self, load, sign_messages=False): else: if "token" in load: try: - mtoken = self.master_key.key.decrypt(load["token"], enc_algo) + mtoken = self.master_key.decrypt(load["token"], enc_algo) ret["token"] = pub.encrypt(mtoken, enc_algo) except UnsupportedAlgorithm as exc: log.info( @@ -733,7 +713,7 @@ def _auth(self, load, sign_messages=False): # Be aggressive about the signature digest = salt.utils.stringutils.to_bytes(hashlib.sha256(aes).hexdigest()) - ret["sig"] = self.master_key.key.encrypt(digest) + ret["sig"] = self.master_key.encrypt(digest) eload = {"result": True, "act": "accept", "id": load["id"], "pub": load["pub"]} if self.opts.get("auth_events") is True: self.event.fire_event(eload, salt.utils.event.tagify(prefix="auth")) @@ -922,9 +902,9 @@ def wrap_payload(self, load): if self.opts["sign_pub_messages"]: log.debug("Signing data packet") payload["sig_algo"] = self.opts["publish_signing_algorithm"] - payload["sig"] = salt.crypt.PrivateKey( - self.master_key.rsa_path, - ).sign(payload["load"], self.opts["publish_signing_algorithm"]) + payload["sig"] = self.master_key.sign( + payload["load"], self.opts["publish_signing_algorithm"] + ) int_payload = {"payload": salt.payload.dumps(payload)} @@ -979,11 +959,8 @@ def __init__(self, opts, transport, presence_events=False): def send_aes_key_event(self): data = {"peer_id": self.opts["id"], "peers": {}} for peer in self.opts.get("cluster_peers", []): - peer_pub = ( - pathlib.Path(self.opts["cluster_pki_dir"]) / "peers" / f"{peer}.pub" - ) - if peer_pub.exists(): - pub = salt.crypt.PublicKey(peer_pub) + pub = self.master_key.fetch(f"peers/{peer}.pub") + if pub: aes = salt.master.SMaster.secrets["aes"]["secret"].value digest = salt.utils.stringutils.to_bytes( hashlib.sha256(aes).hexdigest() @@ -993,7 +970,7 @@ def send_aes_key_event(self): "sig": self.master_key.master_key.encrypt(digest), } else: - log.warning("Peer key missing %r", peer_pub) + log.warning("Peer key missing %r", "peers/{peer}.pub") data["peers"][peer] = {} with salt.utils.event.get_master_event( self.opts, self.opts["sock_dir"], listen=False @@ -1077,7 +1054,7 @@ def _publish_daemon(self, **kwargs): async def handle_pool_publish(self, payload, _): """ - Handle incomming events from cluster peer. + Handle incoming events from cluster peer. """ try: tag, data = salt.utils.event.SaltEvent.unpack(payload) @@ -1091,10 +1068,7 @@ async def handle_pool_publish(self, payload, _): digest = salt.utils.stringutils.to_bytes( hashlib.sha256(key_str).hexdigest() ) - pub_path = ( - pathlib.Path(self.opts["cluster_pki_dir"]) / "peers" / f"{peer}.pub" - ) - key = salt.crypt.PublicKey(pub_path) + key = self.master_key.fetch(f"peers/{peer}.pub") m_digest = key.decrypt(sig) if m_digest != digest: log.error("Invalid aes signature from peer: %s", peer) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index 5f5d0b24c0d5..474c305df0e3 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -1926,6 +1926,9 @@ def pub( payload_kwargs["key"] = self.key payload = channel.send(payload_kwargs) + if isinstance(payload, str): + payload = {"error": payload} + error = payload.pop("error", None) if error is not None: if isinstance(error, dict): diff --git a/salt/config/__init__.py b/salt/config/__init__.py index af6efb7628e0..8b97e814663a 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -162,7 +162,7 @@ def _gather_buffer_space(): "always_verify_signature": bool, # The name of the file in the masters pki-directory that holds the pre-calculated signature of # the masters public-key - "master_pubkey_signature": str, + "master_pubkey_signature": (type(None), str), # Instead of computing the signature for each auth-reply, use a pre-calculated signature. # The master_pubkey_signature must also be set for this. "master_use_pubkey_signature": bool, @@ -1016,6 +1016,8 @@ def _gather_buffer_space(): "signing_algorithm": str, # Master publish channel signing "publish_signing_algorithm": str, + # the cache driver to be used to manage keys for both minion and master + "keys.cache_driver": (type(None), str), } ) @@ -1325,6 +1327,7 @@ def _gather_buffer_space(): "features": {}, "encryption_algorithm": "OAEP-SHA1", "signing_algorithm": "PKCS1v15-SHA1", + "keys.cache_driver": "localfs_key", } ) @@ -1626,7 +1629,7 @@ def _gather_buffer_space(): "max_minions": 0, "master_sign_key_name": "master_sign", "master_sign_pubkey": False, - "master_pubkey_signature": "master_pubkey_signature", + "master_pubkey_signature": None, "master_use_pubkey_signature": False, "zmq_filtering": False, "zmq_monitor": False, @@ -1679,6 +1682,7 @@ def _gather_buffer_space(): "cluster_pool_port": 4520, "features": {}, "publish_signing_algorithm": "PKCS1v15-SHA1", + "keys.cache_driver": "localfs_key", } ) @@ -2555,12 +2559,12 @@ def apply_sdb(opts, sdb_opts=None): """ Recurse for sdb:// links for opts """ - # Late load of SDB to keep CLI light - import salt.utils.sdb - if sdb_opts is None: sdb_opts = opts if isinstance(sdb_opts, str) and sdb_opts.startswith("sdb://"): + # Late load of SDB to keep CLI light + import salt.utils.sdb + return salt.utils.sdb.sdb_get(sdb_opts, opts) elif isinstance(sdb_opts, dict): for key, value in sdb_opts.items(): diff --git a/salt/crypt.py b/salt/crypt.py index d0a8d232a9f7..bc0027993c9a 100644 --- a/salt/crypt.py +++ b/salt/crypt.py @@ -7,7 +7,6 @@ import base64 import binascii import copy -import getpass import hashlib import hmac import logging @@ -24,6 +23,7 @@ import tornado.gen +import salt.cache import salt.channel.client import salt.defaults.exitcodes import salt.payload @@ -31,6 +31,7 @@ import salt.utils.decorators import salt.utils.event import salt.utils.files +import salt.utils.platform import salt.utils.rsax931 import salt.utils.sdb import salt.utils.stringutils @@ -141,80 +142,63 @@ def dropfile(cachedir, user=None, master_id=""): os.rename(dfn_next, dfn) -def gen_keys(keydir, keyname, keysize, user=None, passphrase=None, e=65537): +def gen_keys(keysize, passphrase=None, e=65537): """ Generate a RSA public keypair for use with salt - :param str keydir: The directory to write the keypair to - :param str keyname: The type of salt server for whom this key should be written. (i.e. 'master' or 'minion') :param int keysize: The number of bits in the key - :param str user: The user on the system who should own this keypair :param str passphrase: The passphrase which should be used to encrypt the private key :rtype: str :return: Path on the filesystem to the RSA private key """ - base = os.path.join(keydir, keyname) - priv = f"{base}.pem" - pub = f"{base}.pub" - gen = rsa.generate_private_key(e, keysize) - if os.path.isfile(priv): - # Between first checking and the generation another process has made - # a key! Use the winner's key - return priv - - # Do not try writing anything, if directory has no permissions. - if not os.access(keydir, os.W_OK): - raise OSError( - 'Write access denied to "{}" for user "{}".'.format( - os.path.abspath(keydir), getpass.getuser() - ) - ) - - with salt.utils.files.set_umask(0o277): - with salt.utils.files.fopen(priv, "wb+") as f: - if passphrase: - enc = serialization.BestAvailableEncryption(passphrase.encode()) - _format = serialization.PrivateFormat.TraditionalOpenSSL - if fips_enabled(): - _format = serialization.PrivateFormat.PKCS8 - else: - enc = serialization.NoEncryption() - _format = serialization.PrivateFormat.TraditionalOpenSSL - pem = gen.private_bytes( - encoding=serialization.Encoding.PEM, - format=_format, - encryption_algorithm=enc, - ) - f.write(pem) + if passphrase: + enc = serialization.BestAvailableEncryption(passphrase.encode()) + _format = serialization.PrivateFormat.TraditionalOpenSSL + if fips_enabled(): + _format = serialization.PrivateFormat.PKCS8 + else: + enc = serialization.NoEncryption() + _format = serialization.PrivateFormat.TraditionalOpenSSL + priv_pem = gen.private_bytes( + encoding=serialization.Encoding.PEM, + format=_format, + encryption_algorithm=enc, + ) pubkey = gen.public_key() - with salt.utils.files.fopen(pub, "wb+") as f: - pem = pubkey.public_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PublicFormat.SubjectPublicKeyInfo, - ) - f.write(pem) + pub_pem = pubkey.public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) - os.chmod(priv, 0o400) - if user: - try: - import pwd - - uid = pwd.getpwnam(user).pw_uid - os.chown(priv, uid, -1) - os.chown(pub, uid, -1) - except (KeyError, ImportError, OSError): - # The specified user was not found, allow the backup systems to - # report the error - pass - return priv + return ( + salt.utils.stringutils.to_str(priv_pem), + salt.utils.stringutils.to_str(pub_pem), + ) class BaseKey: + @classmethod + def from_file(cls, path, *args, **kwargs): + with salt.utils.files.fopen(path, "rb") as fp: + key = fp.read() + + return cls(key, *args, **kwargs) + + @classmethod + def from_str(cls, key_str, *args, **kwargs): + key_bytes = salt.utils.stringutils.to_bytes(key_str) + + return cls(key_bytes, *args, **kwargs) + + @classmethod + def from_bytes(cls, key_bytes, *args, **kwargs): + return cls(key_bytes, *args, **kwargs) + @staticmethod def parse_padding_for_signing(algorithm): if algorithm not in VALID_SIGNING_ALGORITHMS: @@ -245,8 +229,13 @@ def parse_hash(algorithm): class PrivateKey(BaseKey): - def __init__(self, path, passphrase=None): - self.key = get_rsa_key(path, passphrase) + def __init__(self, key_bytes, passphrase=None): + log.debug("Loading private key") + if passphrase: + password = passphrase.encode() + else: + password = None + self.key = serialization.load_pem_private_key(key_bytes, password=password) def encrypt(self, data): pem = self.key.private_bytes( @@ -281,14 +270,22 @@ def decrypt(self, data, algorithm=OAEP_SHA1): except cryptography.exceptions.UnsupportedAlgorithm: raise UnsupportedAlgorithm(f"Unsupported algorithm: {algorithm}") + def public_key(self): + """ + proxy to PrivateKey.public_key() + """ + return self.key.public_key() + class PublicKey(BaseKey): - def __init__(self, path): - with salt.utils.files.fopen(path, "rb") as fp: - try: - self.key = serialization.load_pem_public_key(fp.read()) - except ValueError as exc: - raise InvalidKeyError("Invalid key") + def __init__(self, key_bytes): + log.debug("Loading public key") + try: + self.key = serialization.load_pem_public_key(key_bytes) + except ValueError: + raise InvalidKeyError("Encountered bad RSA public key") + except cryptography.exceptions.UnsupportedAlgorithm: + raise InvalidKeyError("Unsupported key algorithm") def encrypt(self, data, algorithm=OAEP_SHA1): _padding = self.parse_padding_for_encryption(algorithm) @@ -330,60 +327,26 @@ def decrypt(self, data): @salt.utils.decorators.memoize -def _get_key_with_evict(path, timestamp, passphrase): - """ - Load a private key from disk. `timestamp` above is intended to be the - timestamp of the file's last modification. This fn is memoized so if it is - called with the same path and timestamp (the file's last modified time) the - second time the result is returned from the memoization. If the file gets - modified then the params are different and the key is loaded from disk. - """ - log.debug("salt.crypt._get_key_with_evict: Loading private key") - if passphrase: - password = passphrase.encode() - else: - password = None - with salt.utils.files.fopen(path, "rb") as f: - return serialization.load_pem_private_key( - f.read(), - password=password, - ) - - def get_rsa_key(path, passphrase): """ - Read a private key off the disk. Poor man's simple cache in effect here, - we memoize the result of calling _get_rsa_with_evict. This means the first - time _get_key_with_evict is called with a path and a timestamp the result - is cached. If the file (the private key) does not change then its - timestamp will not change and the next time the result is returned from the - cache. If the key DOES change the next time _get_rsa_with_evict is called - it is called with different parameters and the fn is run fully to retrieve - the key from disk. + Read a private key off the disk. we memoize the constructed private key + based on the input args. """ - log.debug("salt.crypt.get_rsa_key: Loading private key") - return _get_key_with_evict(path, str(os.path.getmtime(path)), passphrase) + return PrivateKey.from_file(path, passphrase).key def get_rsa_pub_key(path): """ - Read a public key off the disk. + Return a public key from bytes """ - log.debug("salt.crypt.get_rsa_pub_key: Loading public key") - try: - with salt.utils.files.fopen(path, "rb") as fp: - return serialization.load_pem_public_key(fp.read()) - except ValueError: - raise InvalidKeyError("Encountered bad RSA public key") - except cryptography.exceptions.UnsupportedAlgorithm: - raise InvalidKeyError("Unsupported key algorithm") + return PublicKey.from_file(path).key def sign_message(privkey_path, message, passphrase=None, algorithm=PKCS1v15_SHA1): """ Use Crypto.Signature.PKCS1_v1_5 to sign a message. Returns the signature. """ - return PrivateKey(privkey_path, passphrase).sign(message, algorithm) + return PrivateKey.from_file(privkey_path, passphrase).sign(message, algorithm) def verify_signature(pubkey_path, message, signature, algorithm=PKCS1v15_SHA1): @@ -391,39 +354,8 @@ def verify_signature(pubkey_path, message, signature, algorithm=PKCS1v15_SHA1): Use Crypto.Signature.PKCS1_v1_5 to verify the signature on a message. Returns True for valid signature. """ - log.debug("salt.crypt.verify_signature: Loading public key") - return PublicKey(pubkey_path).verify(message, signature, algorithm) - - -def gen_signature(priv_path, pub_path, sign_path, passphrase=None): - """ - creates a signature for the given public-key with - the given private key and writes it to sign_path - """ - - with salt.utils.files.fopen(pub_path) as fp_: - mpub_64 = fp_.read() - - mpub_sig = sign_message(priv_path, mpub_64, passphrase) - mpub_sig_64 = binascii.b2a_base64(mpub_sig) - if os.path.isfile(sign_path): - return False - log.trace( - "Calculating signature for %s with %s", - os.path.basename(pub_path), - os.path.basename(priv_path), - ) - - if os.path.isfile(sign_path): - log.trace( - "Signature file %s already exists, please remove it first and try again", - sign_path, - ) - else: - with salt.utils.files.fopen(sign_path, "wb+") as sig_f: - sig_f.write(salt.utils.stringutils.to_bytes(mpub_sig_64)) - log.trace("Wrote signature to %s", sign_path) - return True + log.debug("Loading public key") + return PublicKey.from_file(pubkey_path).verify(message, signature, algorithm) def pwdata_decrypt(rsa_key, pwdata): @@ -443,60 +375,109 @@ class MasterKeys(dict): It also generates a signing key-pair if enabled with master_sign_key_name. """ - def __init__(self, opts): + def __init__(self, opts, autocreate=True): super().__init__() self.opts = opts - self.master_pub_path = os.path.join(self.opts["pki_dir"], "master.pub") - self.master_rsa_path = os.path.join(self.opts["pki_dir"], "master.pem") + self.cache = salt.cache.Cache(opts, driver=self.opts["keys.cache_driver"]) + + # we need to differentiate this here because in a multi-master setup, + # if the driver is localfs, each master's key can be different but + # exist with the same name (master.pem); but with a different driver + # the state is shared across all masters, so it would be impossible to + # represent that setup unless the key used is unique (e.g the master + # id). + # when get_keys(name='master') runs it will duplicate the keys to + # ${id}.pem/pub to avoid this scenario. at some point in the future + # master.pem/pub can be removed + self.master_id = self.opts["id"].removesuffix("_master") + + # set names for the signing key-pairs + self.pubkey_signature = None + self.master_pubkey_signature = ( + opts.get("master_pubkey_signature") or f"{opts['id']}_pubkey_signature" + ) + + if autocreate: + self._setup_keys() + + # We need __setstate__ and __getstate__ to avoid pickling errors since + # some of the member variables correspond to Cython objects which are + # not picklable. + # These methods are only used when pickling so will not be used on + # non-Windows platforms. + def __setstate__(self, state): + self.__init__(state["opts"]) + + def __getstate__(self): + return {"opts": self.opts} + + def _setup_keys(self): + # it's important to init this even if cluster_id is enabled so that on + # initial start the master's non cluster key is generated key_pass = salt.utils.sdb.sdb_get(self.opts["key_pass"], self.opts) - self.master_key = self.__get_keys(passphrase=key_pass) - self.cluster_pub_path = None - self.cluster_rsa_path = None - self.cluster_key = None - if self.opts["cluster_id"]: - self.cluster_pub_path = os.path.join( - self.opts["cluster_pki_dir"], "cluster.pub" + if self.cache.contains("master_keys", f"{self.master_id}.pem"): + self.master_key = self.key = self.find_or_create_keys( + name=self.master_id, passphrase=key_pass ) - self.cluster_rsa_path = os.path.join( - self.opts["cluster_pki_dir"], "cluster.pem" - ) - self.cluster_shared_path = os.path.join( - self.opts["cluster_pki_dir"], - "peers", - f"{self.opts['id']}.pub", + else: + self.master_key = self.key = self.find_or_create_keys( + name="master", passphrase=key_pass ) + + # facilitate migrating to pem named off the master id instead of master.pem + if not self.cache.contains("master_keys", f"{self.master_id}.pem"): + priv = self.cache.fetch("master_keys", "master.pem") + pub = self.cache.fetch("master_keys", "master.pub") + self.cache.store("master_keys", f"{self.master_id}.pem", priv) + self.cache.store("master_keys", f"{self.master_id}.pub", pub) + self.cache.flush("master_keys", "master.pem") + self.cache.flush("master_keys", "master.pub") + + # lets create symlinks in case a user downgrades back to a previous version + if self.opts["keys.cache_driver"] == "localfs_key": + os.symlink( + os.path.join(self.opts["pki_dir"], f"{self.master_id}.pem"), + os.path.join(self.opts["pki_dir"], "master.pem"), + ) + os.symlink( + os.path.join(self.opts["pki_dir"], f"{self.master_id}.pub"), + os.path.join(self.opts["pki_dir"], "master.pub"), + ) + + if self.opts["cluster_id"]: self.check_master_shared_pub() key_pass = salt.utils.sdb.sdb_get(self.opts["cluster_key_pass"], self.opts) - self.cluster_key = self.__get_keys( + self.cluster_key = self.key = self.find_or_create_keys( name="cluster", passphrase=key_pass, - pki_dir=self.opts["cluster_pki_dir"], ) - self.pub_signature = None - - # set names for the signing key-pairs - if opts["master_sign_pubkey"]: + if self.opts["master_sign_pubkey"]: # if only the signature is available, use that - if opts["master_use_pubkey_signature"]: - self.sig_path = os.path.join( - self.opts["pki_dir"], opts["master_pubkey_signature"] - ) - if os.path.isfile(self.sig_path): - with salt.utils.files.fopen(self.sig_path) as fp_: - self.pub_signature = clean_key(fp_.read()) + if self.opts["master_use_pubkey_signature"]: + if self.opts["keys.cache_driver"] == "localfs_key": + sig_path = os.path.join( + self.opts["pki_dir"], self.master_pubkey_signature + ) + else: + sig_path = f"{self.opts['keys.cache_driver']}:master_keys/{self.master_pubkey_signature}" + + if self.cache.contains("master_keys", self.master_pubkey_signature): + self.pubkey_signature = clean_key( + self.cache.fetch("master_keys", self.master_pubkey_signature) + ) log.info( "Read %s's signature from %s", - os.path.basename(self.pub_path), - self.opts["master_pubkey_signature"], + self.master_pubkey_signature, + sig_path, ) else: log.error( "Signing the master.pub key with a signature is " "enabled but no signature file found at the defined " "location %s", - self.sig_path, + sig_path, ) log.error( "The signature-file may be either named differently " @@ -510,74 +491,51 @@ def __init__(self, opts): key_pass = salt.utils.sdb.sdb_get( self.opts["signing_key_pass"], self.opts ) - self.pub_sign_path = os.path.join( - self.opts["pki_dir"], opts["master_sign_key_name"] + ".pub" - ) - self.rsa_sign_path = os.path.join( - self.opts["pki_dir"], opts["master_sign_key_name"] + ".pem" + self.sign_key = self.find_or_create_keys( + name=self.opts["master_sign_key_name"], passphrase=key_pass ) - self.sign_key = self.__get_keys(name=opts["master_sign_key_name"]) - - # We need __setstate__ and __getstate__ to avoid pickling errors since - # some of the member variables correspond to Cython objects which are - # not picklable. - # These methods are only used when pickling so will not be used on - # non-Windows platforms. - def __setstate__(self, state): - self.__init__(state["opts"]) - def __getstate__(self): - return {"opts": self.opts} - - @property - def key(self): - if self.cluster_key: - return self.cluster_key - return self.master_key - - @property - def pub_path(self): - if self.cluster_pub_path: - return self.cluster_pub_path - return self.master_pub_path - - @property - def rsa_path(self): - if self.cluster_rsa_path: - return self.cluster_rsa_path - return self.master_rsa_path - - def __key_exists(self, name="master", passphrase=None, pki_dir=None): - if pki_dir is None: - pki_dir = self.opts["pki_dir"] - path = os.path.join(pki_dir, name + ".pem") - return os.path.exists(path) - - def __get_keys(self, name="master", passphrase=None, pki_dir=None): + def find_or_create_keys( + self, name=None, passphrase=None, keysize=None, cache=None, force=False + ): """ Returns a key object for a key in the pki-dir + If it does not exist, creates it """ - if pki_dir is None: - pki_dir = self.opts["pki_dir"] - path = os.path.join(pki_dir, name + ".pem") - if not self.__key_exists(name, passphrase, pki_dir): - log.info("Generating %s keys: %s", name, pki_dir) - gen_keys( - pki_dir, - name, - self.opts["keysize"], - self.opts.get("user"), + if not name: + raise ValueError("name must be defined for a key") + + if not cache: + cache = self.cache + + path = name + ".pem" + # try to make the error messaging more obvious + if self.opts["keys.cache_driver"] == "localfs_key": + path = os.path.join(self.cache._kwargs["cachedir"], name + ".pem") + else: + path = f"{self.opts['keys.cache_driver']}:master_keys/{self.master_id}.pub" + + if force or not cache.contains("master_keys", f"{name}.pem"): + log.info("Generating key-pair for %s", path) + (priv, pub) = gen_keys( + keysize or self.opts["keysize"], passphrase, ) + + cache.store("master_keys", f"{name}.pem", priv) + cache.store("master_keys", f"{name}.pub", pub) + else: + priv = cache.fetch("master_keys", f"{name}.pem") + try: - key = PrivateKey(path, passphrase) - except ValueError as e: + key = PrivateKey.from_str(priv, passphrase) + except ValueError: message = f"Unable to read key: {path}; file may be corrupt" - except TypeError as e: + except TypeError: message = f"Unable to read key: {path}; passphrase may be incorrect" - except InvalidKeyError as e: + except InvalidKeyError: message = f"Unable to read key: {path}; key contains unsupported algorithm" - except cryptography.exceptions.UnsupportedAlgorithm as e: + except cryptography.exceptions.UnsupportedAlgorithm: message = f"Unable to read key: {path}; key contains unsupported algorithm" else: log.debug("Loaded %s key: %s", name, path) @@ -585,46 +543,29 @@ def __get_keys(self, name="master", passphrase=None, pki_dir=None): log.error(message) raise MasterExit(message) - def get_pub_str(self, name="master"): + def get_pub_str(self): """ Return the string representation of a public key in the pki-directory """ - if self.cluster_pub_path: - path = self.cluster_pub_path + if self.opts["cluster_id"]: + key = "cluster.pub" else: - path = self.master_pub_path + key = f"{self.master_id}.pub" + # XXX We should always have a key present when this is called, if not # it's an error. # if not os.path.isfile(path): # raise RuntimeError(f"The key {path} does not exist.") - if not os.path.isfile(path): + if not self.cache.contains("master_keys", key): pubkey = self.key.public_key() - with salt.utils.files.fopen(path, "wb+") as f: - f.write( - pubkey.public_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PublicFormat.SubjectPublicKeyInfo, - ) - ) - with salt.utils.files.fopen(path) as rfh: - return clean_key(rfh.read()) - - def get_ckey_paths(self): - return self.cluster_pub_path, self.cluster_rsa_path - - def get_mkey_paths(self): - return self.pub_path, self.rsa_path - - def get_sign_paths(self): - return self.pub_sign_path, self.rsa_sign_path + key_bytes = pubkey.public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) + self.cache.store("master_keys", key, key_bytes) - def pubkey_signature(self): - """ - returns the base64 encoded signature from the signature file - or None if the master has its own signing keys - """ - return self.pub_signature + return clean_key(self.cache.fetch("master_keys", key)) def check_master_shared_pub(self): """ @@ -634,19 +575,110 @@ def check_master_shared_pub(self): to the shared location. Otherwise validate the shared key matches our key. Failed validation raises MasterExit """ - shared_pub = pathlib.Path(self.cluster_shared_path) - master_pub = pathlib.Path(self.master_pub_path) - if shared_pub.exists(): - if shared_pub.read_bytes() != master_pub.read_bytes(): + if self.opts["keys.cache_driver"] == "localfs_key": + shared_path = os.path.join( + self.opts["cluster_pki_dir"], "peers", f"{self.master_id}.pub" + ) + else: + shared_path = f"{self.opts['keys.cache_driver']}:master_keys/peers/{self.master_id}.pub" + + shared_pub = self.cache.fetch("master_keys", f"peers/{self.master_id}.pub") + # the non-clustered master key can live in both places depending on if + # a shared backend or not. see comment in __init__ + master_pub = self.cache.fetch("master_keys", f"{self.master_id}.pub") + if not master_pub: + master_pub = self.cache.fetch("master_keys", "master.pub") + + if shared_pub: + if shared_pub != master_pub: message = ( - f"Shared key does not match, remove it to continue: {shared_pub}" + f"Shared key does not match, remove it to continue: {shared_path}" ) log.error(message) raise MasterExit(message) else: # permissions - log.debug("Writing shared key %s", shared_pub) - shared_pub.write_bytes(master_pub.read_bytes()) + log.debug("Writing shared key %s", shared_path) + self.cache.store("master_keys", f"peers/{self.master_id}.pub", master_pub) + + def gen_signature(self, priv=None, pub=None, sign_path=None): + """ + creates a signature for the given public-key with + the given private key and writes it to sign_path + """ + # we need to replace the path if sign_path is specified + if sign_path: + if self.opts["keys.cache_driver"] != "localfs_key": + log.error( + "You seem to be calling salt.crypt.MasterKeys.gen_signature() with a signature-path override, but are not using localfs_key. This probably isn't doing what you intended" + ) + cache = salt.cache.Cache( + self.opts, + driver=self.opts["keys.cache_driver"], + pki_dir=pathlib.Path(sign_path).parent, + ) + else: + cache = self.cache + + if cache.contains("master_keys", self.master_pubkey_signature): + log.error( + "%s already exists at expected location", + sign_path or self.master_pubkey_signature, + ) + return False + + if not priv: + priv = self.sign_key + + if not pub: + pub = priv.public_key() + + pub_pem = pub.public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) + + mpub_sig = priv.sign(pub_pem) + mpub_sig_64 = binascii.b2a_base64(mpub_sig) + + log.trace("Calculating signature for %s with %s", pub, priv) + + cache.store("master_keys", self.master_pubkey_signature, mpub_sig_64) + return True + + def sign(self, *args, **kwargs): + """ + proxy to PrivateKey.sign + """ + return self.key.sign(*args, **kwargs) + + def decrypt(self, *args, **kwargs): + """ + proxy to PrivateKey.decrypt + """ + return self.key.decrypt(*args, **kwargs) + + def encrypt(self, *args, **kwargs): + """ + proxy to PrivateKey.encrypt + """ + return self.key.encrypt(*args, **kwargs) + + def fetch(self, name): + """ + fetch from keystore, unmarshalling to object if possible + """ + key = self.cache.fetch("master_keys", name) + + if not key: + return + + if name.endswith(".pem"): + return PrivateKey.from_str(key) + elif name.endswith(".pub"): + return PublicKey.from_str(key) + else: + return key class AsyncAuth: @@ -708,6 +740,7 @@ def __singleton_init__(self, opts, io_loop=None): """ self.opts = opts self.token = salt.utils.stringutils.to_bytes(Crypticle.generate_key_string()) + self.cache = salt.cache.Cache(opts, driver=self.opts["keys.cache_driver"]) self.pub_path = os.path.join(self.opts["pki_dir"], "minion.pub") self.rsa_path = os.path.join(self.opts["pki_dir"], "minion.pem") if self.opts["__role"] == "syndic": @@ -992,7 +1025,7 @@ def handle_signin_response(self, sign_in_payload, payload): raise SaltClientError("Invalid master key") master_pubkey_path = os.path.join(self.opts["pki_dir"], self.mpub) - if os.path.exists(master_pubkey_path) and not PublicKey( + if os.path.exists(master_pubkey_path) and not PublicKey.from_file( master_pubkey_path ).verify( clear_signed_data, @@ -1079,13 +1112,14 @@ def get_keys(self): if not os.path.exists(self.rsa_path): log.info("Generating keys: %s", self.opts["pki_dir"]) - gen_keys( - self.opts["pki_dir"], - "minion", - self.opts["keysize"], - self.opts.get("user"), - ) - key = PrivateKey(self.rsa_path, None) + (priv, pub) = gen_keys(self.opts["keysize"]) + + # the cache bank is called master keys but the codepath is shared + # on master/minion for interacting with pki + self.cache.store("master_keys", "minion.pem", priv) + self.cache.store("master_keys", "minion.pub", pub) + + key = PrivateKey.from_file(self.rsa_path, None) log.debug("Loaded minion key: %s", self.rsa_path) return key @@ -1122,7 +1156,7 @@ def minion_sign_in_payload(self): payload["autosign_grains"] = autosign_grains try: pubkey_path = os.path.join(self.opts["pki_dir"], self.mpub) - pub = PublicKey(pubkey_path) + pub = PublicKey.from_file(pubkey_path) payload["token"] = pub.encrypt( self.token, self.opts["encryption_algorithm"] ) @@ -1168,8 +1202,9 @@ def decrypt_aes(self, payload, master_pub=True): m_path = os.path.join(self.opts["pki_dir"], self.mpub) if os.path.exists(m_path): try: - mkey = PublicKey(m_path) + mkey = PublicKey.from_file(m_path) except Exception: # pylint: disable=broad-except + log.exception("Something unexpected occured loading master pub-key") return "", "" digest = hashlib.sha256(key_str).hexdigest() digest = salt.utils.stringutils.to_bytes(digest) diff --git a/salt/key.py b/salt/key.py index 66fd42ce073a..aa2b3588eba7 100644 --- a/salt/key.py +++ b/salt/key.py @@ -7,15 +7,14 @@ import itertools import logging import os -import shutil import sys import salt.cache import salt.client import salt.crypt -import salt.daemons.masterapi import salt.exceptions -import salt.minion +import salt.payload +import salt.transport import salt.utils.args import salt.utils.crypt import salt.utils.data @@ -23,10 +22,11 @@ import salt.utils.files import salt.utils.json import salt.utils.kinds -import salt.utils.master +import salt.utils.minions import salt.utils.sdb import salt.utils.stringutils import salt.utils.user +from salt.utils.decorators import cached_property log = logging.getLogger(__name__) @@ -49,12 +49,16 @@ class KeyCLI: def __init__(self, opts): self.opts = opts + import salt.wheel + self.client = salt.wheel.WheelClient(opts) - self.key = Key # instantiate the key object for masterless mode if not opts.get("eauth"): - self.key = self.key(opts) - self.auth = None + self.key = get_key(opts) + else: + self.key = Key + + self.auth = {} def _update_opts(self): # get the key command @@ -117,11 +121,13 @@ def _init_auth(self): low["key"] = salt.utils.stringutils.to_unicode(fp_.readline()) except OSError: low["token"] = self.opts["token"] - # + # If using eauth and a token hasn't already been loaded into # low, prompt the user to enter auth credentials if "token" not in low and "key" not in low and self.opts["eauth"]: # This is expensive. Don't do it unless we need to. + import salt.auth + resolver = salt.auth.Resolver(self.opts) res = resolver.cli(self.opts["eauth"]) if self.opts["mktoken"] and res: @@ -134,6 +140,9 @@ def _init_auth(self): low.update(res) low["eauth"] = self.opts["eauth"] else: + # late import to avoid circular import + import salt.utils.master + low["user"] = salt.utils.user.get_specific_user() low["key"] = salt.utils.master.get_master_key( low["user"], self.opts, skip_perm_errors @@ -244,7 +253,7 @@ def run(self): ret = None try: if cmd in ("accept", "reject", "delete"): - ret = self._run_cmd("name_match") + ret = self._run_cmd("glob_match") if not isinstance(ret, dict): salt.output.display_output(ret, "key", opts=self.opts) return ret @@ -294,6 +303,10 @@ def run(self): ret = f"{exc}" if not self.opts.get("quiet", False): salt.output.display_output(ret, "nested", self.opts) + except Exception as exc: # pylint: disable=broad-except + # dont swallow unexpected exceptions in salt-key + log.exception(exc) + return ret @@ -307,27 +320,47 @@ class Key: REJ = "minions_rejected" DEN = "minions_denied" + # handle transitions from legacy naming to simpler new format + STATE_MAP = {"accepted": ACC, "rejected": REJ, "pending": PEND, "denied": DEN} + DIR_MAP = {v: k for k, v in STATE_MAP.items()} + + ACT_MAP = { + ACC: "accept", + REJ: "reject", + PEND: "pend", + DEN: "denied", + } + def __init__(self, opts, io_loop=None): self.opts = opts - self.pki_dir = self.opts["pki_dir"] - if self.opts["cluster_id"]: - self.pki_dir = self.opts["cluster_pki_dir"] - kind = self.opts.get("__role", "") # application kind - if kind not in salt.utils.kinds.APPL_KINDS: - emsg = f"Invalid application kind = '{kind}'." + self.cache = salt.cache.Cache(opts, driver=self.opts["keys.cache_driver"]) + if self.opts.get("cluster_id", None) is not None: + self.pki_dir = self.opts.get("cluster_pki_dir", "") + else: + self.pki_dir = self.opts.get("pki_dir", "") + self._kind = self.opts.get("__role", "") # application kind + if self._kind not in salt.utils.kinds.APPL_KINDS: + emsg = f"Invalid application kind = '{self._kind}'." log.error(emsg) raise ValueError(emsg) - self.event = salt.utils.event.get_event( - kind, - opts["sock_dir"], - opts=opts, - listen=False, - io_loop=io_loop, - ) - self.passphrase = salt.utils.sdb.sdb_get( self.opts.get("signing_key_pass"), self.opts ) + self.io_loop = io_loop + + @cached_property + def master_keys(self): + return salt.crypt.MasterKeys(self.opts) + + @cached_property + def event(self): + return salt.utils.event.get_event( + self._kind, + self.opts["sock_dir"], + opts=self.opts, + listen=False, + io_loop=self.io_loop, + ) def _check_minions_directories(self): """ @@ -341,11 +374,15 @@ def _check_minions_directories(self): return minions_accepted, minions_pre, minions_rejected, minions_denied def _get_key_attrs(self, keydir, keyname, keysize, user): + cache = None if not keydir: if "gen_keys_dir" in self.opts: keydir = self.opts["gen_keys_dir"] else: keydir = self.pki_dir + cache = salt.cache.Cache( + self.opts, driver=self.opts["keys.cache_driver"], cachedir=keydir, user=user + ) if not keyname: if "gen_keys" in self.opts: keyname = self.pki_dir @@ -353,23 +390,19 @@ def _get_key_attrs(self, keydir, keyname, keysize, user): keyname = "minion" if not keysize: keysize = self.opts["keysize"] - return keydir, keyname, keysize, user + return keydir, keyname, keysize, user, cache def gen_keys(self, keydir=None, keyname=None, keysize=None, user=None): """ Generate minion RSA public keypair """ - keydir, keyname, keysize, user = self._get_key_attrs( + keydir, keyname, keysize, user, cache = self._get_key_attrs( keydir, keyname, keysize, user ) - salt.crypt.gen_keys(keydir, keyname, keysize, user, self.passphrase) - return salt.utils.crypt.pem_finger(os.path.join(keydir, keyname + ".pub")) - - def gen_signature(self, privkey, pubkey, sig_path): - """ - Generate master public-key-signature - """ - return salt.crypt.gen_signature(privkey, pubkey, sig_path, self.passphrase) + priv = self.master_keys.find_or_create_keys( + keyname, keysize=keysize, cache=cache + ) + return salt.utils.crypt.pem_finger(key=priv.public_key()) def gen_keys_signature( self, priv, pub, signature_path, auto_create=False, keysize=None @@ -397,26 +430,29 @@ def gen_keys_signature( if os.path.isfile(mpriv): priv = mpriv - if not priv: + if priv: + priv = salt.crypt.PrivateKey.from_file(priv) + else: if auto_create: log.debug( "Generating new signing key-pair .%s.* in %s", self.opts["master_sign_key_name"], self.pki_dir, ) - salt.crypt.gen_keys( - self.pki_dir, - self.opts["master_sign_key_name"], - keysize or self.opts["keysize"], - self.opts.get("user"), - self.passphrase, + # we force re-create as master_keys init also does the same + # creation without these kwarg overrides + priv = self.master_keys.sign_key = self.master_keys.find_or_create_keys( + name=self.opts["master_sign_key_name"], + keysize=keysize or self.opts["keysize"], + passphrase=self.passphrase, + force=True, ) - - priv = self.pki_dir + "/" + self.opts["master_sign_key_name"] + ".pem" else: return "No usable private-key found" - if not pub: + if pub: + pub = salt.crypt.PublicKey.from_file(pub).key + else: return "No usable public-key found" log.debug("Using public-key %s", pub) @@ -425,13 +461,11 @@ def gen_keys_signature( if signature_path: if not os.path.isdir(signature_path): log.debug("target directory %s does not exist", signature_path) + sign_path = signature_path + "/" + self.master_keys.master_pubkey_signature else: - signature_path = self.pki_dir + sign_path = None - sign_path = signature_path + "/" + self.opts["master_pubkey_signature"] - - skey = get_key(self.opts) - return skey.gen_signature(priv, pub, sign_path) + return self.master_keys.gen_signature(priv, pub, sign_path) def check_minion_cache(self, preserve_minions=None): """ @@ -447,19 +481,7 @@ def check_minion_cache(self, preserve_minions=None): for key, val in keys.items(): minions.extend(val) if not self.opts.get("preserve_minion_cache", False): - m_cache = os.path.join(self.opts["cachedir"], self.ACC) - if os.path.isdir(m_cache): - for minion in os.listdir(m_cache): - if minion not in minions and minion not in preserve_minions: - try: - shutil.rmtree(os.path.join(m_cache, minion)) - except OSError as ex: - log.warning( - "Key: Delete cache for %s got OSError/IOError: %s \n", - minion, - ex, - ) - continue + # we use a new cache instance here as we dont want the key cache cache = salt.cache.factory(self.opts) clist = cache.list(self.ACC) if clist: @@ -478,7 +500,7 @@ def check_master(self): return False return True - def name_match(self, match, full=False): + def glob_match(self, match, full=False): """ Accept a glob which to match the of a key and return the key's location """ @@ -504,6 +526,29 @@ def name_match(self, match, full=False): ret[status].append(key) return ret + def list_match(self, match): + """ + Accept a glob which to match the of a key and return the key's location + """ + ret = {} + if isinstance(match, str): + match = match.split(",") + + for name in match: + key = self.cache.fetch("keys", name) + if key: + try: + ret.setdefault(self.STATE_MAP[key["state"]], []) + ret[self.STATE_MAP[key["state"]]].append(name) + except KeyError: + log.error("unexpected key state returned for %s: %s", name, key) + + denied_keys = self.cache.fetch("denied_keys", name) + if denied_keys: + ret.setdefault(self.DEN, []) + ret[self.DEN].append(name) + return ret + def dict_match(self, match_dict): """ Accept a dictionary of keys and return the current state of the @@ -518,38 +563,47 @@ def dict_match(self, match_dict): ret.setdefault(keydir, []).append(key) return ret - def local_keys(self): - """ - Return a dict of local keys - """ - ret = {"local": []} - for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(self.pki_dir)): - if fn_.endswith(".pub") or fn_.endswith(".pem"): - path = os.path.join(self.pki_dir, fn_) - ret["local"].append(fn_) - return ret - def list_keys(self): """ Return a dict of managed keys and what the key status are """ - key_dirs = self._check_minions_directories() + if self.opts.get("key_cache") == "sched": + acc = "accepted" - ret = {} + cache_file = os.path.join(self.opts["pki_dir"], acc, ".key_cache") + if self.opts["key_cache"] and os.path.exists(cache_file): + log.debug("Returning cached minion list") + with salt.utils.files.fopen(cache_file, mode="rb") as fn_: + return salt.payload.load(fn_) - for dir_ in key_dirs: - if dir_ is None: - continue - ret[os.path.basename(dir_)] = [] - try: - for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(dir_)): - if not fn_.startswith("."): - ret[os.path.basename(dir_)].append( - salt.utils.stringutils.to_unicode(fn_) - ) - except OSError: - # key dir kind is not created yet, just skip - continue + ret = { + "minions_pre": [], + "minions_rejected": [], + "minions": [], + "minions_denied": [], + } + for id_ in salt.utils.data.sorted_ignorecase(self.cache.list("keys")): + key = self.cache.fetch("keys", id_) + + if key["state"] == "accepted": + ret["minions"].append(id_) + elif key["state"] == "pending": + ret["minions_pre"].append(id_) + elif key["state"] == "rejected": + ret["minions_rejected"].append(id_) + + for id_ in salt.utils.data.sorted_ignorecase(self.cache.list("denied_keys")): + ret["minions_denied"].append(id_) + return ret + + def local_keys(self): + """ + Return a dict of local keys + """ + ret = {"local": []} + for key in salt.utils.data.sorted_ignorecase(self.cache.list("master_keys")): + if key.endswith(".pub") or key.endswith(".pem"): + ret["local"].append(key) return ret def all_keys(self): @@ -564,116 +618,140 @@ def list_status(self, match): """ Return a dict of managed keys under a named status """ - acc, pre, rej, den = self._check_minions_directories() - ret = {} + ret = self.all_keys() if match.startswith("acc"): - ret[os.path.basename(acc)] = [] - for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(acc)): - if not fn_.startswith("."): - ret[os.path.basename(acc)].append(fn_) + return { + "minions": salt.utils.data.sorted_ignorecase(ret.get("minions", [])) + } elif match.startswith("pre") or match.startswith("un"): - ret[os.path.basename(pre)] = [] - for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(pre)): - if not fn_.startswith("."): - ret[os.path.basename(pre)].append(fn_) + return { + "minions_pre": salt.utils.data.sorted_ignorecase( + ret.get("minions_pre", []) + ) + } elif match.startswith("rej"): - ret[os.path.basename(rej)] = [] - for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(rej)): - if not fn_.startswith("."): - ret[os.path.basename(rej)].append(fn_) - elif match.startswith("den") and den is not None: - ret[os.path.basename(den)] = [] - for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(den)): - if not fn_.startswith("."): - ret[os.path.basename(den)].append(fn_) + return { + "minions_rejected": salt.utils.data.sorted_ignorecase( + ret.get("minions_rejected", []) + ) + } + elif match.startswith("den"): + return { + "minions_denied": salt.utils.data.sorted_ignorecase( + ret.get("minions_denied", []) + ) + } elif match.startswith("all"): - return self.all_keys() - return ret + return ret + # this should never be reached + return {} def key_str(self, match): """ Return the specified public key or keys based on a glob """ ret = {} - for status, keys in self.name_match(match).items(): + for status, keys in self.glob_match(match).items(): ret[status] = {} for key in salt.utils.data.sorted_ignorecase(keys): - path = os.path.join(self.pki_dir, status, key) - with salt.utils.files.fopen(path, "r") as fp_: - ret[status][key] = salt.utils.stringutils.to_unicode(fp_.read()) + if status == self.DEN: + denied = self.cache.fetch("denied_keys", key) + if len(denied) == 1: + ret[status][key] = denied[0] + else: + ret[status][key] = denied + else: + ret[status][key] = self.cache.fetch("keys", key).get("pub") return ret def key_str_all(self): """ Return all managed key strings """ - ret = {} - for status, keys in self.list_keys().items(): - ret[status] = {} - for key in salt.utils.data.sorted_ignorecase(keys): - path = os.path.join(self.pki_dir, status, key) - with salt.utils.files.fopen(path, "r") as fp_: - ret[status][key] = salt.utils.stringutils.to_unicode(fp_.read()) - return ret - - def accept( - self, match=None, match_dict=None, include_rejected=False, include_denied=False + return self.key_str("*") + + def change_state( + self, + from_state, + to_state, + match=None, + match_dict=None, + include_rejected=False, + include_denied=False, + include_accepted=False, ): """ - Accept public keys. If "match" is passed, it is evaluated as a glob. - Pre-gathered matches can also be passed via "match_dict". + change key state from one state to another """ if match is not None: - matches = self.name_match(match) + matches = self.glob_match(match) elif match_dict is not None and isinstance(match_dict, dict): matches = match_dict else: matches = {} - keydirs = [self.PEND] + keydirs = [from_state] if include_rejected: keydirs.append(self.REJ) if include_denied: keydirs.append(self.DEN) + if include_accepted: + keydirs.append(self.ACC) + invalid_keys = [] for keydir in keydirs: - for key in matches.get(keydir, []): - key_path = os.path.join(self.pki_dir, keydir, key) - try: - salt.crypt.get_rsa_pub_key(key_path) - except salt.exceptions.InvalidKeyError: - log.error("Invalid RSA public key: %s", key) - invalid_keys.append((keydir, key)) - continue - try: - shutil.move( - key_path, - os.path.join(self.pki_dir, self.ACC, key), - ) - eload = {"result": True, "act": "accept", "id": key} - self.event.fire_event(eload, salt.utils.event.tagify(prefix="key")) - except OSError: - pass - for keydir, key in invalid_keys: - matches[keydir].remove(key) + for keyname in matches.get(keydir, []): + if to_state == self.DEN: + key = self.cache.fetch("keys", keyname) + self.cache.flush("keys", keyname) + self.cache.store("denied_keys", keyname, [key["pub"]]) + else: + if keydir == self.DEN: + # denied keys can be many per id, but we assume first for legacy + pub = self.cache.fetch("denied_keys", keyname)[0] + self.cache.flush("denied_keys", keyname) + key = {"pub": pub} + else: + key = self.cache.fetch("keys", keyname) + + try: + salt.crypt.PublicKey.from_str(key["pub"]) + except salt.exceptions.InvalidKeyError: + log.error("Invalid RSA public key: %s", keyname) + invalid_keys.append(keyname) + continue + + key["state"] = self.DIR_MAP[to_state] + self.cache.store("keys", keyname, key) + + eload = {"result": True, "act": self.DIR_MAP[to_state], "id": keyname} + self.event.fire_event(eload, salt.utils.event.tagify(prefix="key")) + + for key in invalid_keys: sys.stderr.write(f"Unable to accept invalid key for {key}.\n") - return self.name_match(match) if match is not None else self.dict_match(matches) + + return self.glob_match(match) if match is not None else self.dict_match(matches) + + def accept( + self, match=None, match_dict=None, include_rejected=False, include_denied=False + ): + """ + Accept public keys. If "match" is passed, it is evaluated as a glob. + Pre-gathered matches can also be passed via "match_dict". + """ + return self.change_state( + self.PEND, + self.ACC, + match, + match_dict, + include_rejected=include_rejected, + include_denied=include_denied, + ) def accept_all(self): """ Accept all keys in pre """ - keys = self.list_keys() - for key in keys[self.PEND]: - try: - shutil.move( - os.path.join(self.pki_dir, self.PEND, key), - os.path.join(self.pki_dir, self.ACC, key), - ) - eload = {"result": True, "act": "accept", "id": key} - self.event.fire_event(eload, salt.utils.event.tagify(prefix="key")) - except OSError: - pass - return self.list_keys() + return self.accept(match="*") def delete_key( self, match=None, match_dict=None, preserve_minions=None, revoke_auth=False @@ -685,7 +763,7 @@ def delete_key( To preserve the master caches of minions who are matched, set preserve_minions """ if match is not None: - matches = self.name_match(match) + matches = self.glob_match(match) elif match_dict is not None and isinstance(match_dict, dict): matches = match_dict else: @@ -711,7 +789,10 @@ def delete_key( "master AES key is rotated or auth is revoked " "with 'saltutil.revoke_auth'.".format(key) ) - os.remove(os.path.join(self.pki_dir, status, key)) + if status == "minions_denied": + self.cache.flush("denied_keys", key) + else: + self.cache.flush("keys", key) eload = {"result": True, "act": "delete", "id": key} self.event.fire_event( eload, salt.utils.event.tagify(prefix="key") @@ -726,21 +807,13 @@ def delete_key( salt.crypt.dropfile( self.opts["cachedir"], self.opts["user"], self.opts["id"] ) - return self.name_match(match) if match is not None else self.dict_match(matches) + return self.glob_match(match) if match is not None else self.dict_match(matches) def delete_den(self): """ Delete all denied keys """ - keys = self.list_keys() - for status, keys in self.list_keys().items(): - for key in keys[self.DEN]: - try: - os.remove(os.path.join(self.pki_dir, status, key)) - eload = {"result": True, "act": "delete", "id": key} - self.event.fire_event(eload, salt.utils.event.tagify(prefix="key")) - except OSError: - pass + self.cache.flush("denied_keys") self.check_minion_cache() return self.list_keys() @@ -751,7 +824,7 @@ def delete_all(self): for status, keys in self.list_keys().items(): for key in keys: try: - os.remove(os.path.join(self.pki_dir, status, key)) + self.cache.flush("keys", key) eload = {"result": True, "act": "delete", "id": key} self.event.fire_event(eload, salt.utils.event.tagify(prefix="key")) except OSError: @@ -770,50 +843,26 @@ def reject( Reject public keys. If "match" is passed, it is evaluated as a glob. Pre-gathered matches can also be passed via "match_dict". """ - if match is not None: - matches = self.name_match(match) - elif match_dict is not None and isinstance(match_dict, dict): - matches = match_dict - else: - matches = {} - keydirs = [self.PEND] - if include_accepted: - keydirs.append(self.ACC) - if include_denied: - keydirs.append(self.DEN) - for keydir in keydirs: - for key in matches.get(keydir, []): - try: - shutil.move( - os.path.join(self.pki_dir, keydir, key), - os.path.join(self.pki_dir, self.REJ, key), - ) - eload = {"result": True, "act": "reject", "id": key} - self.event.fire_event(eload, salt.utils.event.tagify(prefix="key")) - except OSError: - pass + ret = self.change_state( + self.PEND, + self.REJ, + match, + match_dict, + include_accepted=include_accepted, + include_denied=include_denied, + ) self.check_minion_cache() if self.opts.get("rotate_aes_key"): salt.crypt.dropfile( self.opts["cachedir"], self.opts["user"], self.opts["id"] ) - return self.name_match(match) if match is not None else self.dict_match(matches) + return ret def reject_all(self): """ Reject all keys in pre """ - keys = self.list_keys() - for key in keys[self.PEND]: - try: - shutil.move( - os.path.join(self.pki_dir, self.PEND, key), - os.path.join(self.pki_dir, self.REJ, key), - ) - eload = {"result": True, "act": "reject", "id": key} - self.event.fire_event(eload, salt.utils.event.tagify(prefix="key")) - except OSError: - pass + self.reject(match="*") self.check_minion_cache() if self.opts.get("rotate_aes_key"): salt.crypt.dropfile( @@ -826,18 +875,32 @@ def finger(self, match, hash_type=None): Return the fingerprint for a specified key """ if hash_type is None: - hash_type = __opts__["hash_type"] + hash_type = self.opts["hash_type"] - matches = self.name_match(match, True) + matches = self.glob_match(match, full=True) ret = {} for status, keys in matches.items(): ret[status] = {} for key in keys: - if status == "local": - path = os.path.join(self.pki_dir, key) + if status == "minions_denied": + denied = self.cache.fetch("denied_keys", key) + for den in denied: + finger = salt.utils.crypt.pem_finger( + key=den.encode("utf-8"), sum_type=hash_type + ) + ret[status].setdefault(key, []).append(finger) + # brush over some dumb backcompat with how denied keys work + # with the legacy system + if len(denied) == 1: + ret[status][key] = ret[status][key][0] else: - path = os.path.join(self.pki_dir, status, key) - ret[status][key] = salt.utils.crypt.pem_finger(path, sum_type=hash_type) + if status == "local": + pub = self.cache.fetch("master_keys", key).encode("utf-8") + else: + pub = self.cache.fetch("keys", key)["pub"].encode("utf-8") + ret[status][key] = salt.utils.crypt.pem_finger( + key=pub, sum_type=hash_type + ) return ret def finger_all(self, hash_type=None): @@ -845,18 +908,9 @@ def finger_all(self, hash_type=None): Return fingerprints for all keys """ if hash_type is None: - hash_type = __opts__["hash_type"] + hash_type = self.opts["hash_type"] - ret = {} - for status, keys in self.all_keys().items(): - ret[status] = {} - for key in keys: - if status == "local": - path = os.path.join(self.pki_dir, key) - else: - path = os.path.join(self.pki_dir, status, key) - ret[status][key] = salt.utils.crypt.pem_finger(path, sum_type=hash_type) - return ret + return self.finger("*", hash_type=hash_type) def __enter__(self): return self diff --git a/salt/master.py b/salt/master.py index adf99feb0a07..563901cfebb7 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1410,6 +1410,9 @@ def __init__(self, opts): self.pki_dir = self.opts["cluster_pki_dir"] else: self.pki_dir = self.opts.get("pki_dir", "") + self.key_cache = salt.cache.Cache( + self.opts, driver=self.opts["keys.cache_driver"] + ) def __setup_fileserver(self): """ @@ -1442,18 +1445,24 @@ def __verify_minion(self, id_, token): """ if not salt.utils.verify.valid_id(self.opts, id_): return False - pub_path = os.path.join(self.pki_dir, "minions", id_) + key = self.key_cache.fetch("keys", id_) + + if not key: + log.error("Unexpectedly got no pub key for %s", id_) + return False + try: - pub = salt.crypt.PublicKey(pub_path) - except OSError: + pub = salt.crypt.PublicKey.from_str(key["pub"]) + except (OSError, KeyError): log.warning( "Salt minion claiming to be %s attempted to communicate with " "master, but key could not be read and verification was denied.", id_, + exc_info=True, ) return False except (ValueError, IndexError, TypeError) as err: - log.error('Unable to load public key "%s": %s', pub_path, err) + log.error('Unable to load public key "%s": %s', id_, err) try: if pub.decrypt(token) == b"salt": return True @@ -1863,14 +1872,17 @@ def _return(self, load): if "sig" in load: log.trace("Verifying signed event publish from minion") sig = load.pop("sig") - this_minion_pubkey = os.path.join( - self.pki_dir, "minions/{}".format(load["id"]) - ) + this_minion_pubkey = self.key_cache.fetch("keys", load["id"]) serialized_load = salt.serializers.msgpack.serialize(load) - if not salt.crypt.verify_signature( - this_minion_pubkey, serialized_load, sig + if not this_minion_pubkey or not this_minion_pubkey.verify( + serialized_load, sig ): - log.info("Failed to verify event signature from minion %s.", load["id"]) + if not this_minion_pubkey: + log.error("Failed to fetch pub key for minion %s.", load["id"]) + else: + log.info( + "Failed to verify event signature from minion %s.", load["id"] + ) if self.opts["drop_messages_signature_fail"]: log.critical( "drop_messages_signature_fail is enabled, dropping " @@ -2479,8 +2491,12 @@ async def publish(self, clear_load): }, } jid = self._prep_jid(clear_load, extra) - if jid is None: - return {"enc": "clear", "load": {"error": "Master failed to assign jid"}} + if jid is None or isinstance(jid, dict): + if jid and "error" in jid: + load = jid + else: + load = {"error": "Master failed to assign jid"} + return load payload = self._prep_pub(minions, jid, clear_load, extra, missing) if self.opts.get("order_masters"): diff --git a/salt/modules/seed.py b/salt/modules/seed.py index 657c7bccd64c..9bc53a30e983 100644 --- a/salt/modules/seed.py +++ b/salt/modules/seed.py @@ -237,17 +237,18 @@ def mkconfig( pubkeyfn = os.path.join(tmp, "minion.pub") privkeyfn = os.path.join(tmp, "minion.pem") preseeded = pub_key and priv_key - if preseeded: - log.debug("Writing minion.pub to %s", pubkeyfn) - log.debug("Writing minion.pem to %s", privkeyfn) - with salt.utils.files.fopen(pubkeyfn, "w") as fic: - fic.write(salt.utils.stringutils.to_str(_file_or_content(pub_key))) - with salt.utils.files.fopen(privkeyfn, "w") as fic: - fic.write(salt.utils.stringutils.to_str(_file_or_content(priv_key))) - os.chmod(pubkeyfn, 0o600) - os.chmod(privkeyfn, 0o600) - else: - salt.crypt.gen_keys(tmp, "minion", 2048) + log.debug("Writing minion.pub to %s", pubkeyfn) + log.debug("Writing minion.pem to %s", privkeyfn) + + if not priv_key or not pub_key: + (priv_key, pub_key) = salt.crypt.gen_keys(2048) + + with salt.utils.files.fopen(pubkeyfn, "w") as fic: + fic.write(salt.utils.stringutils.to_str(_file_or_content(pub_key))) + with salt.utils.files.fopen(privkeyfn, "w") as fic: + fic.write(salt.utils.stringutils.to_str(_file_or_content(priv_key))) + os.chmod(pubkeyfn, 0o600) + os.chmod(privkeyfn, 0o600) if approve_key and not preseeded: with salt.utils.files.fopen(pubkeyfn) as fp_: pubkey = salt.utils.stringutils.to_unicode(fp_.read()) diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py index 50857e60af24..a0922feeed71 100644 --- a/salt/netapi/rest_tornado/saltnado.py +++ b/salt/netapi/rest_tornado/saltnado.py @@ -185,12 +185,13 @@ .. |500| replace:: internal server error """ -import cgi +import cgi # pylint: disable=deprecated-module import fnmatch import logging import time from collections import defaultdict from copy import copy +from functools import cached_property import tornado.escape import tornado.gen @@ -443,8 +444,9 @@ def initialize(self): "runner_async": None, # empty, since we use the same client as `runner` } - if not hasattr(self, "ckminions"): - self.ckminions = salt.utils.minions.CkMinions(self.application.opts) + @cached_property + def ckminions(self): + return salt.utils.minions.CkMinions(self.application.opts) @property def token(self): diff --git a/salt/transport/tcp.py b/salt/transport/tcp.py index 57ae753d408d..59e10a56bd40 100644 --- a/salt/transport/tcp.py +++ b/salt/transport/tcp.py @@ -293,7 +293,7 @@ async def getstream(self, **kwargs): ) self.unpacker = salt.utils.msgpack.Unpacker() log.debug( - "PubClient conencted to %r %r:%r", self, self.host, self.port + "PubClient connected to %r %r:%r", self, self.host, self.port ) else: log.debug("PubClient connecting to %r %r", self, self.path) @@ -303,7 +303,7 @@ async def getstream(self, **kwargs): ) await asyncio.wait_for(stream.connect(self.path), 1) self.unpacker = salt.utils.msgpack.Unpacker() - log.debug("PubClient conencted to %r %r", self, self.path) + log.debug("PubClient connected to %r %r", self, self.path) except Exception as exc: # pylint: disable=broad-except if self.path: _connect_to = self.path @@ -1896,6 +1896,7 @@ async def _do_send(): def close(self): if self._closing: return + self._closing = True if self._stream is not None: self._stream.close() self._stream = None diff --git a/salt/utils/atomicfile.py b/salt/utils/atomicfile.py index a3bf23468042..eb2f7f5d9f78 100644 --- a/salt/utils/atomicfile.py +++ b/salt/utils/atomicfile.py @@ -5,6 +5,7 @@ import errno import os +import pathlib import random import shutil import sys @@ -31,6 +32,10 @@ def _rename_atomic(src, dst): _MoveFileEx = ctypes.windll.kernel32.MoveFileExW # pylint: disable=C0103 def _rename(src, dst): # pylint: disable=E0102 + if isinstance(src, pathlib.Path): + src = str(src) + if isinstance(dst, pathlib.Path): + dst = str(dst) if not isinstance(src, str): src = str(src, sys.getfilesystemencoding()) if not isinstance(dst, str): diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index 5784ed1ea6ef..da853f31a52e 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -253,16 +253,10 @@ def gen_keys(keysize=2048): # Mandate that keys are at least 2048 in size if keysize < 2048: keysize = 2048 - tdir = tempfile.mkdtemp() - - salt.crypt.gen_keys(tdir, "minion", keysize) - priv_path = os.path.join(tdir, "minion.pem") - pub_path = os.path.join(tdir, "minion.pub") - with salt.utils.files.fopen(priv_path) as fp_: - priv = salt.utils.stringutils.to_unicode(fp_.read()) - with salt.utils.files.fopen(pub_path) as fp_: - pub = salt.utils.stringutils.to_unicode(fp_.read()) - shutil.rmtree(tdir) + + (priv, pub) = salt.crypt.gen_keys(keysize) + priv = salt.utils.stringutils.to_unicode(priv) + pub = salt.utils.stringutils.to_unicode(pub) return priv, pub diff --git a/salt/utils/decorators/__init__.py b/salt/utils/decorators/__init__.py index 4ddf359d23c9..b86758e44d2b 100644 --- a/salt/utils/decorators/__init__.py +++ b/salt/utils/decorators/__init__.py @@ -863,3 +863,11 @@ def wrapped(*args, **kwargs): return function(*args, **kwargs) return wrapped + + +try: + from functools import cached_property +except ImportError: + # this should only be needed on <3.8 + def cached_property(func): + return func diff --git a/salt/utils/minions.py b/salt/utils/minions.py index 65d6fd5b8d96..ba0eff6b688f 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -5,10 +5,10 @@ import fnmatch import logging -import os import re import salt.cache +import salt.key import salt.payload import salt.roster import salt.transport @@ -209,15 +209,12 @@ class CkMinions: def __init__(self, opts): self.opts = opts self.cache = salt.cache.factory(opts) + self.key = salt.key.get_key(opts) # TODO: this is actually an *auth* check if self.opts.get("transport", "zeromq") in salt.transport.TRANSPORTS: self.acc = "minions" else: self.acc = "accepted" - if self.opts.get("cluster_id", None) is not None: - self.pki_dir = self.opts.get("cluster_pki_dir", "") - else: - self.pki_dir = self.opts.get("pki_dir", "") def _check_nodegroup_minions(self, expr, greedy): # pylint: disable=unused-argument """ @@ -227,33 +224,59 @@ def _check_nodegroup_minions(self, expr, greedy): # pylint: disable=unused-argu nodegroup_comp(expr, self.opts["nodegroups"]), DEFAULT_TARGET_DELIM, greedy ) - def _check_glob_minions(self, expr, greedy): # pylint: disable=unused-argument + def _check_glob_minions( + self, expr, greedy, minions=None + ): # pylint: disable=unused-argument """ Return the minions found by looking via globs """ - return {"minions": fnmatch.filter(self._pki_minions(), expr), "missing": []} + if minions: + matched = {"minions": fnmatch.filter(minions, expr), "missing": []} + else: + matched = self.key.glob_match(expr).get(self.key.ACC, []) + + return {"minions": matched, "missing": []} def _check_list_minions( - self, expr, greedy, ignore_missing=False + self, expr, greedy, ignore_missing=False, minions=None ): # pylint: disable=unused-argument """ Return the minions found by looking via a list """ if isinstance(expr, str): expr = [m for m in expr.split(",") if m] - minions = self._pki_minions() - return { - "minions": [x for x in expr if x in minions], - "missing": [] if ignore_missing else [x for x in expr if x not in minions], - } - def _check_pcre_minions(self, expr, greedy): # pylint: disable=unused-argument + if minions: + return { + "minions": [x for x in expr if x in minions], + "missing": ( + [] if ignore_missing else [x for x in expr if x not in minions] + ), + } + else: + found = self.key.list_match(expr) + return { + "minions": found.get(self.key.ACC, []), + "missing": ( + [] + if ignore_missing + else [x for x in expr if x not in found.get(self.key.ACC, [])] + ), + } + + def _check_pcre_minions( + self, expr, greedy, minions=None + ): # pylint: disable=unused-argument """ Return the minions found by looking via regular expressions """ reg = re.compile(expr) + + if not minions: + minions = self._pki_minions() + return { - "minions": [m for m in self._pki_minions() if reg.match(m)], + "minions": [m for m in minions if reg.match(m)], "missing": [], } @@ -262,32 +285,31 @@ def _pki_minions(self): Retrieve complete minion list from PKI dir. Respects cache if configured """ - minions = [] - pki_cache_fn = os.path.join(self.pki_dir, self.acc, ".key_cache") - try: - os.makedirs(os.path.dirname(pki_cache_fn)) - except OSError: - pass + + minions = set() + try: - if self.opts["key_cache"] and os.path.exists(pki_cache_fn): - log.debug("Returning cached minion list") - with salt.utils.files.fopen(pki_cache_fn, mode="rb") as fn_: - return salt.payload.load(fn_) - else: - for fn_ in salt.utils.data.sorted_ignorecase( - os.listdir(os.path.join(self.pki_dir, self.acc)) - ): - if not fn_.startswith("."): - minions.append(fn_) - return minions + accepted = self.key.list_status("accepted").get("minions") + if accepted: + minions = minions | set(accepted) except OSError as exc: log.error( "Encountered OSError while evaluating minions in PKI dir: %s", exc ) - return minions + except Exception as exc: # pylint: disable=broad-except + log.error("Encountered Exception while evaluating pki minions: %s", exc) + + return minions def _check_cache_minions( - self, expr, delimiter, greedy, search_type, regex_match=False, exact_match=False + self, + expr, + delimiter, + greedy, + search_type, + regex_match=False, + exact_match=False, + minions=None, ): """ Helper function to search for minions in master caches If 'greedy', @@ -301,12 +323,8 @@ def list_cached_minions(): return self.cache.list("minions") if greedy: - minions = [] - for fn_ in salt.utils.data.sorted_ignorecase( - os.listdir(os.path.join(self.pki_dir, self.acc)) - ): - if not fn_.startswith("."): - minions.append(fn_) + if not minions: + minions = self._pki_minions() elif cache_enabled: minions = list_cached_minions() else: @@ -340,50 +358,55 @@ def list_cached_minions(): minions = list(minions) return {"minions": minions, "missing": []} - def _check_grain_minions(self, expr, delimiter, greedy): + def _check_grain_minions(self, expr, delimiter, greedy, minions=None): """ Return the minions found by looking via grains """ - return self._check_cache_minions(expr, delimiter, greedy, "grains") + return self._check_cache_minions( + expr, delimiter, greedy, "grains", minions=minions + ) - def _check_grain_pcre_minions(self, expr, delimiter, greedy): + def _check_grain_pcre_minions(self, expr, delimiter, greedy, minions=None): """ Return the minions found by looking via grains with PCRE """ return self._check_cache_minions( - expr, delimiter, greedy, "grains", regex_match=True + expr, delimiter, greedy, "grains", regex_match=True, minions=minions ) - def _check_pillar_minions(self, expr, delimiter, greedy): + def _check_pillar_minions(self, expr, delimiter, greedy, minions=None): """ Return the minions found by looking via pillar """ - return self._check_cache_minions(expr, delimiter, greedy, "pillar") + return self._check_cache_minions( + expr, delimiter, greedy, "pillar", minions=minions + ) - def _check_pillar_pcre_minions(self, expr, delimiter, greedy): + def _check_pillar_pcre_minions(self, expr, delimiter, greedy, minions=None): """ Return the minions found by looking via pillar with PCRE """ return self._check_cache_minions( - expr, delimiter, greedy, "pillar", regex_match=True + expr, delimiter, greedy, "pillar", regex_match=True, minions=minions ) - def _check_pillar_exact_minions(self, expr, delimiter, greedy): + def _check_pillar_exact_minions(self, expr, delimiter, greedy, minions=None): """ Return the minions found by looking via pillar """ return self._check_cache_minions( - expr, delimiter, greedy, "pillar", exact_match=True + expr, delimiter, greedy, "pillar", exact_match=True, minions=minions ) - def _check_ipcidr_minions(self, expr, greedy): + def _check_ipcidr_minions(self, expr, greedy, minions=None): """ Return the minions found by looking via ipcidr """ cache_enabled = self.opts.get("minion_data_cache", False) if greedy: - minions = self._pki_minions() + if not minions: + minions = self._pki_minions() elif cache_enabled: minions = self.cache.list("minions") else: @@ -446,11 +469,10 @@ def _check_range_minions(self, expr, greedy): except seco.range.RangeException as exc: log.error("Range exception in compound match: %s", exc) cache_enabled = self.opts.get("minion_data_cache", False) + if greedy: mlist = [] - for fn_ in salt.utils.data.sorted_ignorecase( - os.listdir(os.path.join(self.pki_dir, self.acc)) - ): + for fn_ in self._pki_minions(): if not fn_.startswith("."): mlist.append(fn_) return {"minions": mlist, "missing": []} @@ -459,16 +481,20 @@ def _check_range_minions(self, expr, greedy): else: return {"minions": [], "missing": []} - def _check_compound_pillar_exact_minions(self, expr, delimiter, greedy): + def _check_compound_pillar_exact_minions( + self, expr, delimiter, greedy, minions=None + ): """ Return the minions found by looking via compound matcher Disable pillar glob matching """ - return self._check_compound_minions(expr, delimiter, greedy, pillar_exact=True) + return self._check_compound_minions( + expr, delimiter, greedy, pillar_exact=True, minions=minions + ) def _check_compound_minions( - self, expr, delimiter, greedy, pillar_exact=False + self, expr, delimiter, greedy, pillar_exact=False, minions=None ): # pylint: disable=unused-argument """ Return the minions found by looking via compound matcher @@ -476,8 +502,18 @@ def _check_compound_minions( if not isinstance(expr, str) and not isinstance(expr, (list, tuple)): log.error("Compound target that is neither string, list nor tuple") return {"minions": [], "missing": []} - minions = set(self._pki_minions()) - log.debug("minions: %s", minions) + + # we wrap this in a dict so we can assign to it from within the eval at + # the end of this method + _deferred_minions_scope = {"minions": minions} + + def _deferred_minions(): + if not _deferred_minions_scope["minions"]: + _deferred_minions_scope["minions"] = self._pki_minions() + + return _deferred_minions_scope["minions"] + + log.debug("expr: %s, delimiter: %s, minions: %s", expr, delimiter, minions) nodegroups = self.opts.get("nodegroups", {}) @@ -522,7 +558,7 @@ def _check_compound_minions( if not results[-1] in ("&", "|", "("): results.append("&") results.append("(") - results.append(str(set(minions))) + results.append("set(all_minions())") results.append("-") unmatched.append("-") elif word == "and": @@ -552,7 +588,7 @@ def _check_compound_minions( # seq start with oper, fail if word == "not": results.append("(") - results.append(str(set(minions))) + results.append("set(all_minions())") results.append("-") unmatched.append("-") elif word == "(": @@ -592,7 +628,7 @@ def _check_compound_minions( # a 'not' if "L" == target_info["engine"]: engine_args.append(results and results[-1] == "-") - _results = engine(*engine_args) + _results = engine(*engine_args, minions=minions) results.append(str(set(_results["minions"]))) missing.extend(_results["missing"]) if unmatched and unmatched[-1] == "-": @@ -600,8 +636,7 @@ def _check_compound_minions( unmatched.pop() else: - # The match is not explicitly defined, evaluate as a glob - _results = self._check_glob_minions(word, True) + _results = self._check_glob_minions(word, True, minions=minions) results.append(str(set(_results["minions"]))) if unmatched and unmatched[-1] == "-": results.append(")") @@ -613,7 +648,15 @@ def _check_compound_minions( results = " ".join(results) log.debug("Evaluating final compound matching expr: %s", results) try: - minions = list(eval(results)) # pylint: disable=W0123 + minions = list( + eval( # pylint: disable=eval-used + results, + { + "all_minions": _deferred_minions, + "_deferred_minions_scope": _deferred_minions_scope, + }, + ) + ) # pylint: disable=W0123 return {"minions": minions, "missing": missing} except Exception: # pylint: disable=broad-except log.error("Invalid compound target: %s", expr) @@ -673,17 +716,14 @@ def connected_ids(self, subset=None, show_ip=False): break return minions - def _all_minions(self, expr=None): + def _all_minions(self, expr=None, minions=None): """ Return a list of all minions that have auth'd """ - mlist = [] - for fn_ in salt.utils.data.sorted_ignorecase( - os.listdir(os.path.join(self.pki_dir, self.acc)) - ): - if not fn_.startswith("."): - mlist.append(fn_) - return {"minions": mlist, "missing": []} + if not minions: + minions = self._pki_minions() + + return {"minions": minions, "missing": []} def check_minions( self, expr, tgt_type="glob", delimiter=DEFAULT_TARGET_DELIM, greedy=True @@ -694,7 +734,6 @@ def check_minions( match the regex, this will then be used to parse the returns to make sure everyone has checked back in. """ - try: if expr is None: expr = "" @@ -741,6 +780,14 @@ def validate_tgt(self, valid, expr, tgt_type, minions=None, expr_form=None): otherwise return False. """ + # save some cpu cycles, also avoid a masterminion no-cache bug + if valid == "*": + return True + + # No minions can ever match a @runner,@master,etc + if valid.startswith("@"): + return False + v_minions = set(self.check_minions(valid, "compound").get("minions", [])) if minions is None: _res = self.check_minions(expr, tgt_type) diff --git a/salt/wheel/key.py b/salt/wheel/key.py index 76156b7e29e3..215b3bb2e5f7 100644 --- a/salt/wheel/key.py +++ b/salt/wheel/key.py @@ -26,21 +26,17 @@ using the :mod:`saltutil execution module `. """ -import hashlib import logging import os +import salt.cache import salt.crypt import salt.key import salt.utils.crypt -import salt.utils.files -import salt.utils.platform +import salt.utils.versions from salt.utils.sanitizers import clean -__func_alias__ = { - "list_": "list", - "key_str": "print", -} +__func_alias__ = {"list_": "list", "key_str": "print"} log = logging.getLogger(__name__) @@ -82,11 +78,23 @@ def list_all(): def name_match(match): + """ + Alias to glob_match + """ + salt.utils.versions.warn_until( + 3010, + "'wheel.key.name_match' has been renamed to 'wheel.key.glob_match', and will be removed in the Calcium release." + "Please update your workflows to use glob_match instead.", + ) + return glob_match(match) + + +def glob_match(match): """ List all the keys based on a glob match """ with salt.key.get_key(__opts__) as skey: - return skey.name_match(match) + return skey.glob_match(match) def accept(match, include_rejected=False, include_denied=False): @@ -290,11 +298,8 @@ def master_key_str(): ... TWugEQpPt\niQIDAQAB\n-----END PUBLIC KEY-----'}} """ - keyname = "master.pub" - path_to_pubkey = os.path.join(__opts__["pki_dir"], keyname) - with salt.utils.files.fopen(path_to_pubkey, "r") as fp_: - keyvalue = salt.utils.stringutils.to_unicode(fp_.read()) - return {"local": {keyname: keyvalue}} + master_key = salt.crypt.MasterKeys(__opts__, autocreate=False) + return {"local": {f"{master_key.master_id}.pub": master_key.get_pub_str()}} def finger(match, hash_type=None): @@ -370,26 +375,8 @@ def gen(id_=None, keysize=2048): -----END RSA PRIVATE KEY-----'} """ - if id_ is None: - id_ = hashlib.sha512(os.urandom(32)).hexdigest() - else: - id_ = clean.filename(id_) - ret = {"priv": "", "pub": ""} - priv = salt.crypt.gen_keys(__opts__["pki_dir"], id_, keysize) - pub = "{}.pub".format(priv[: priv.rindex(".")]) - with salt.utils.files.fopen(priv) as fp_: - ret["priv"] = salt.utils.stringutils.to_unicode(fp_.read()) - with salt.utils.files.fopen(pub) as fp_: - ret["pub"] = salt.utils.stringutils.to_unicode(fp_.read()) - - # The priv key is given the Read-Only attribute. The causes `os.remove` to - # fail in Windows. - if salt.utils.platform.is_windows(): - os.chmod(priv, 128) - - os.remove(priv) - os.remove(pub) - return ret + priv, pub = salt.crypt.gen_keys(keysize) + return {"priv": priv, "pub": pub} def gen_accept(id_, keysize=2048, force=False): @@ -434,11 +421,14 @@ def gen_accept(id_, keysize=2048, force=False): """ id_ = clean.id(id_) ret = gen(id_, keysize) - acc_path = os.path.join(__opts__["pki_dir"], "minions", id_) - if os.path.isfile(acc_path) and not force: + + cache = salt.cache.Cache(__opts__, driver=__opts__["keys.cache_driver"]) + key = cache.fetch("keys", id_) + + if key and not force: return {} - with salt.utils.files.fopen(acc_path, "w+") as fp_: - fp_.write(salt.utils.stringutils.to_str(ret["pub"])) + + cache.store("keys", id_, {"pub": ret["pub"], "state": "accepted"}) return ret diff --git a/tests/pytests/functional/cache/test_localfs_key.py b/tests/pytests/functional/cache/test_localfs_key.py new file mode 100644 index 000000000000..eb2cb0220fdc --- /dev/null +++ b/tests/pytests/functional/cache/test_localfs_key.py @@ -0,0 +1,224 @@ +import logging +import os +import time + +import pytest + +import salt.cache +from salt.exceptions import SaltCacheError +from salt.utils.files import fopen + +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def cache(minion_opts): + opts = minion_opts.copy() + opts["cache"] = "localfs_key" + cache = salt.cache.factory(opts) + try: + yield cache + finally: + for minion in ["minion_a", "minion_x", "minion_y", "minion_z", "minion_denied"]: + cache.flush("keys", minion) + cache.flush("denied_keys", minion) + + +# TODO: test user +def test_key_lifecycle(cache): + pki_dir = cache.opts["pki_dir"] + + # key is put into pending + cache.store("keys", "minion_a", {"state": "pending", "pub": "RSAKEY_minion_a"}) + + assert os.path.exists( + os.path.join(pki_dir, "minions_pre", "minion_a") + ), "key was created" + assert ( + fopen(os.path.join(pki_dir, "minions_pre", "minion_a"), "rb").read() + == b"RSAKEY_minion_a" + ), "key serialized to right content" + + assert cache.fetch("keys", "minion_a") == { + "state": "pending", + "pub": "RSAKEY_minion_a", + }, "key fetched as expected" + + # key is moved to rejected from pending + cache.store("keys", "minion_a", {"state": "rejected", "pub": "RSAKEY_minion_a"}) + + assert not os.path.exists( + os.path.join(pki_dir, "minions", "minion_a") + ), "key was removed from created" + assert os.path.exists( + os.path.join(pki_dir, "minions_rejected", "minion_a") + ), "key was added to rejected" + assert ( + fopen(os.path.join(pki_dir, "minions_rejected", "minion_a"), "rb").read() + == b"RSAKEY_minion_a" + ), "key serialized as expected" + + assert cache.fetch("keys", "minion_a") == { + "state": "rejected", + "pub": "RSAKEY_minion_a", + }, "key fetched as expected" + + # key is moved from rejected to accepted + cache.store("keys", "minion_a", {"state": "accepted", "pub": "RSAKEY_minion_a"}) + + assert not os.path.exists( + os.path.join(pki_dir, "minions_rejected", "minion_a") + ), "key was removed from rejected" + assert os.path.exists( + os.path.join(pki_dir, "minions", "minion_a") + ), "key was added to minions" + assert ( + fopen(os.path.join(pki_dir, "minions", "minion_a"), "rb").read() + == b"RSAKEY_minion_a" + ), "key serialized as expected" + assert cache.fetch("keys", "minion_a") == { + "state": "accepted", + "pub": "RSAKEY_minion_a", + }, "key fetched as expected" + + # key is moved to denied + cache.store("denied_keys", "minion_a", ["RSAKEY_minion_b"]) + assert os.path.exists( + os.path.join(pki_dir, "minions", "minion_a") + ), "key remained in minions" + assert os.path.exists( + os.path.join(pki_dir, "minions_denied", "minion_a") + ), "key remained in minions" + assert ( + fopen(os.path.join(pki_dir, "minions_denied", "minion_a"), "rb").read() + == b"RSAKEY_minion_b" + ), "key serialized as expected" + assert cache.fetch("denied_keys", "minion_a") == [ + "RSAKEY_minion_b" + ], "key fetched as expected" + + +def test_updated(cache): + now = time.time() + + cache.store("keys", "minion_a", {"state": "accepted", "pub": "RSAKEY_minion_a"}) + updated = cache.updated("keys", "minion_a") + + # add some buffer just incase + assert updated - int(now) <= 1 + + assert cache.updated("keys", "nonexistant") is None + + +def test_minion_id_validity(cache): + with pytest.raises(SaltCacheError, match="not a valid minion_id"): + cache.store("keys", "foo/bar/..", {}) + + with pytest.raises(SaltCacheError, match="not a valid minion_id"): + cache.fetch("keys", "foo/bar/..") + + with pytest.raises(SaltCacheError, match="not a valid minion_id"): + cache.updated("keys", "foo/bar/..") + + with pytest.raises(SaltCacheError, match="not a valid minion_id"): + cache.contains("keys", "foo/bar/..") + + with pytest.raises(SaltCacheError, match="not a valid minion_id"): + cache.flush("keys", "foo/bar/..") + + +def test_fetch(cache): + with pytest.raises(SaltCacheError, match="bug at call-site"): + cache.fetch("keys", ".key_cache") + + with fopen( + os.path.join(cache.opts["pki_dir"], "minions_rejected", "minion_x"), "w+b" + ) as fh_: + fh_.write(b"RSAKEY_minion_x") + + with fopen( + os.path.join(cache.opts["pki_dir"], "minions_pre", "minion_y"), "w+b" + ) as fh_: + fh_.write(b"RSAKEY_minion_y") + + with fopen( + os.path.join(cache.opts["pki_dir"], "minions", "minion_z"), "w+b" + ) as fh_: + fh_.write(b"RSAKEY_minion_z") + + # minions_denied does not get craeted automatically + if not os.path.exists(os.path.join(cache.opts["pki_dir"], "minions_denied")): + os.makedirs(os.path.join(cache.opts["pki_dir"], "minions_denied")) + + with fopen( + os.path.join(cache.opts["pki_dir"], "minions_denied", "minion_denied"), "w+b" + ) as fh_: + fh_.write(b"RSAKEY_minion_denied") + + assert cache.fetch("keys", "minion_x") == { + "state": "rejected", + "pub": "RSAKEY_minion_x", + } + assert cache.fetch("keys", "minion_y") == { + "state": "pending", + "pub": "RSAKEY_minion_y", + } + assert cache.fetch("keys", "minion_z") == { + "state": "accepted", + "pub": "RSAKEY_minion_z", + } + assert cache.fetch("denied_keys", "minion_denied") == ["RSAKEY_minion_denied"] + + +def test_flush_contains(cache): + # set up test state + cache.store("keys", "minion_x", {"state": "pending", "pub": "RSAKEY_minion_x"}) + cache.store("keys", "minion_y", {"state": "accepted", "pub": "RSAKEY_minion_y"}) + cache.store("keys", "minion_z", {"state": "pending", "pub": "RSAKEY_minion_z"}) + cache.store("denied_keys", "minion_a", ["RSAKEY_minion_a"]) + + # assert contains works as expected + assert cache.contains("keys", "minion_x") + assert cache.contains("keys", "minion_y") + assert cache.contains("keys", "minion_z") + assert cache.contains("denied_keys", "minion_a") + + # flush test state + cache.flush("keys", "minion_x") + cache.flush("keys", "minion_y") + cache.flush("keys", "minion_z") + cache.flush("denied_keys", "minion_a") + + # assert files on disk no longer exist mapping to the expected keys + assert not os.path.exists( + os.path.join(cache.opts["pki_dir"], "minions_pre", "minion_x") + ) + assert not os.path.exists( + os.path.join(cache.opts["pki_dir"], "minions_pre", "minion_y") + ) + assert not os.path.exists( + os.path.join(cache.opts["pki_dir"], "minions_", "minion_z") + ) + assert not os.path.exists( + os.path.join(cache.opts["pki_dir"], "minions_denied", "minion_a") + ) + + # assert contains no longer returns true + assert not cache.contains("keys", "minion_x") + assert not cache.contains("keys", "minion_y") + assert not cache.contains("keys", "minion_z") + assert not cache.contains("denied_keys", "minion_a") + + +def test_list(cache): + # set up test state + cache.store("keys", "minion_x", {"state": "pending", "pub": "RSAKEY_minion_x"}) + cache.store("keys", "minion_y", {"state": "accepted", "pub": "RSAKEY_minion_y"}) + cache.store("keys", "minion_z", {"state": "pending", "pub": "RSAKEY_minion_z"}) + cache.store("denied_keys", "minion_a", ["RSAKEY_minion_a"]) + + # assert contains works as expected + assert sorted(cache.list("keys")) == ["minion_x", "minion_y", "minion_z"] + # + # assert contains works as expected + assert cache.list("denied_keys") == ["minion_a"] diff --git a/tests/pytests/functional/channel/test_server.py b/tests/pytests/functional/channel/test_server.py index 32f71068ac00..2b56cd5bd348 100644 --- a/tests/pytests/functional/channel/test_server.py +++ b/tests/pytests/functional/channel/test_server.py @@ -16,6 +16,7 @@ import salt.channel.client import salt.channel.server import salt.config +import salt.crypt import salt.master import salt.utils.platform import salt.utils.process @@ -86,7 +87,7 @@ def master_config(root_dir, transport): ), ) os.makedirs(master_conf["pki_dir"]) - salt.crypt.gen_keys(master_conf["pki_dir"], "master", 4096) + master_keys = salt.crypt.MasterKeys(master_conf) minions_keys = os.path.join(master_conf["pki_dir"], "minions") os.makedirs(minions_keys) yield master_conf @@ -113,7 +114,7 @@ def minion_config(master_config, channel_minion_id): signing_algorithm="PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1", ) os.makedirs(minion_conf["pki_dir"]) - salt.crypt.gen_keys(minion_conf["pki_dir"], "minion", 4096) + salt.crypt.AsyncAuth(minion_conf).get_keys() # generate minion.pem/pub minion_pub = os.path.join(minion_conf["pki_dir"], "minion.pub") pub_on_master = os.path.join(master_config["pki_dir"], "minions", channel_minion_id) shutil.copyfile(minion_pub, pub_on_master) diff --git a/tests/pytests/functional/test_crypt.py b/tests/pytests/functional/test_crypt.py index b0cf862d641e..38249c905b84 100644 --- a/tests/pytests/functional/test_crypt.py +++ b/tests/pytests/functional/test_crypt.py @@ -6,8 +6,11 @@ @pytest.mark.windows_whitelisted -def test_generated_keys(tmp_path): - priv = pathlib.Path(salt.crypt.gen_keys(tmp_path, "aaa", 2048)) +def test_generated_keys(master_opts, tmp_path): + master_opts["pki_dir"] = str(tmp_path) + master_keys = salt.crypt.MasterKeys(master_opts) + master_keys.find_or_create_keys(name="aaa", keysize=2048) + priv = pathlib.Path(master_keys.opts["pki_dir"]) / "aaa.pem" pub = priv.with_suffix(".pub") assert "\r" not in priv.read_text(encoding="utf-8") assert "\r" not in pub.read_text(encoding="utf-8") diff --git a/tests/pytests/integration/cli/test_salt_key.py b/tests/pytests/integration/cli/test_salt_key.py index 173107e85eb0..73b19e2b1c40 100644 --- a/tests/pytests/integration/cli/test_salt_key.py +++ b/tests/pytests/integration/cli/test_salt_key.py @@ -207,8 +207,6 @@ def test_list_all_no_check_files( "-L", ) assert ret.returncode == 0 - # The directory will show up since there is no file check - expected["minions"].insert(0, "dir1") assert ret.data == expected diff --git a/tests/pytests/integration/minion/test_return_retries.py b/tests/pytests/integration/minion/test_return_retries.py index fcd73b0a46e1..507c8e8fcf41 100644 --- a/tests/pytests/integration/minion/test_return_retries.py +++ b/tests/pytests/integration/minion/test_return_retries.py @@ -125,7 +125,7 @@ def test_pillar_timeout(salt_master_factory, tmp_path): cmd = 'import time; time.sleep(6); print(\'{"foo": "bang"}\');\n' with salt.utils.files.fopen(tmp_path / "script.py", "w") as fp: fp.write(cmd) - proc = cli.run("state.sls", sls_name, minion_tgt="*") + proc = cli.run("state.sls", sls_name, minion_tgt="*", _timeout=60) # At least one minion should have a Pillar timeout assert proc.returncode == 1 minion_timed_out = False diff --git a/tests/pytests/integration/netapi/test_client.py b/tests/pytests/integration/netapi/test_client.py index 35032b81e709..3c2d9ad045e6 100644 --- a/tests/pytests/integration/netapi/test_client.py +++ b/tests/pytests/integration/netapi/test_client.py @@ -100,7 +100,10 @@ def test_wheel(client, auth_creds): assert "tag" in ret["data"] assert "return" in ret["data"] assert "local" in ret["data"]["return"] - assert {"master.pem", "master.pub"}.issubset(set(ret["data"]["return"]["local"])) + master_id = client.opts["id"].removesuffix("_master") + assert {f"{master_id}.pem", f"{master_id}.pub"}.issubset( + set(ret["data"]["return"]["local"]) + ) @pytest.mark.slow_test diff --git a/tests/pytests/integration/wheel/test_client.py b/tests/pytests/integration/wheel/test_client.py index 40363ead10a4..b5b470e78746 100644 --- a/tests/pytests/integration/wheel/test_client.py +++ b/tests/pytests/integration/wheel/test_client.py @@ -26,7 +26,8 @@ def test_master_call(client, auth_creds, salt_auto_account): assert data["user"] == salt_auto_account.username assert data["fun"] == "wheel.key.list_all" assert data["return"] - assert data["return"]["local"] == ["master.pem", "master.pub"] + master_id = client.opts["id"].removesuffix("_master") + assert data["return"]["local"] == [f"{master_id}.pem", f"{master_id}.pub"] def test_token(client, client_config, auth_creds, salt_auto_account): @@ -54,7 +55,8 @@ def test_token(client, client_config, auth_creds, salt_auto_account): assert data["user"] == salt_auto_account.username assert data["fun"] == "wheel.key.list_all" assert data["return"] - assert data["return"]["local"] == ["master.pem", "master.pub"] + master_id = client.opts["id"].removesuffix("_master") + assert data["return"]["local"] == [f"{master_id}.pem", f"{master_id}.pub"] def test_cmd_sync(client, auth_creds, salt_auto_account): @@ -68,7 +70,8 @@ def test_cmd_sync(client, auth_creds, salt_auto_account): assert data["user"] == salt_auto_account.username assert data["fun"] == "wheel.key.list_all" assert data["return"] - assert data["return"]["local"] == ["master.pem", "master.pub"] + master_id = client.opts["id"].removesuffix("_master") + assert data["return"]["local"] == [f"{master_id}.pem", f"{master_id}.pub"] # Remove this skipIf when https://github.com/saltstack/salt/issues/39616 is resolved @@ -100,8 +103,9 @@ def test_cmd_sync_w_arg(client, auth_creds, salt_auto_account): assert data["fun"] == "wheel.key.finger" assert data["return"] assert data["return"]["local"] - assert "master.pem" in data["return"]["local"] - assert "master.pub" in data["return"]["local"] + master_id = client.opts["id"].removesuffix("_master") + assert f"{master_id}.pem" in data["return"]["local"] + assert f"{master_id}.pub" in data["return"]["local"] def test_wildcard_auth(client): @@ -128,4 +132,5 @@ def test_wildcard_auth(client): assert data["user"] == username assert data["fun"] == "wheel.key.list_all" assert data["return"] - assert data["return"]["local"] == ["master.pem", "master.pub"] + master_id = client.opts["id"].removesuffix("_master") + assert data["return"]["local"] == [f"{master_id}.pem", f"{master_id}.pub"] diff --git a/tests/pytests/integration/wheel/test_key.py b/tests/pytests/integration/wheel/test_key.py index a56e4bba389d..e03c5ea72c5c 100644 --- a/tests/pytests/integration/wheel/test_key.py +++ b/tests/pytests/integration/wheel/test_key.py @@ -34,5 +34,6 @@ def test_master_key_str(client): assert ret assert "local" in ret data = ret["local"] - assert "master.pub" in data - assert data["master.pub"].startswith("-----BEGIN PUBLIC KEY-----") + assert data[f"{client.opts['id'].removesuffix('_master')}.pub"].startswith( + "-----BEGIN PUBLIC KEY-----" + ) diff --git a/tests/pytests/scenarios/cluster/test_cluster.py b/tests/pytests/scenarios/cluster/test_cluster.py index 107f25661815..8825170f6114 100644 --- a/tests/pytests/scenarios/cluster/test_cluster.py +++ b/tests/pytests/scenarios/cluster/test_cluster.py @@ -1,5 +1,5 @@ """ -Cluster scinarios. +Cluster scenarios. """ import getpass diff --git a/tests/pytests/unit/crypt/test_crypt.py b/tests/pytests/unit/crypt/test_crypt.py index 349e820701c7..d936ab04d1a7 100644 --- a/tests/pytests/unit/crypt/test_crypt.py +++ b/tests/pytests/unit/crypt/test_crypt.py @@ -5,16 +5,19 @@ Unit tests for salt's crypt module """ +import os.path import uuid import pytest +import salt.cache import salt.crypt import salt.master import salt.payload import salt.utils.files from tests.conftest import FIPS_TESTRUN from tests.support.helpers import dedent +from tests.support.mock import ANY, MagicMock, call from . import PRIV_KEY, PRIV_KEY2, PUB_KEY, PUB_KEY2 @@ -114,16 +117,28 @@ def test_master_keys_without_cluster_id(tmp_path, master_opts): master_opts["pki_dir"] = str(tmp_path) assert master_opts["cluster_id"] is None assert master_opts["cluster_pki_dir"] is None - mkeys = salt.crypt.MasterKeys(master_opts) + + # __init__ autocreate's keys by default, but we turn it off to test more easily + mkeys = salt.crypt.MasterKeys(master_opts, autocreate=False) + original_store = mkeys.cache.store + mkeys.cache.store = store_mock = MagicMock(wraps=original_store) + + mkeys._setup_keys() + expected_master_pub = str(tmp_path / "master.pub") expected_master_rsa = str(tmp_path / "master.pem") - assert expected_master_pub == mkeys.master_pub_path - assert expected_master_rsa == mkeys.master_rsa_path - assert mkeys.cluster_pub_path is None - assert mkeys.cluster_rsa_path is None - assert mkeys.pub_path == expected_master_pub - assert mkeys.rsa_path == expected_master_rsa - assert mkeys.key == mkeys.master_key + + assert os.path.exists(expected_master_pub) + assert os.path.exists(expected_master_rsa) + + expected_calls = [ + call("master_keys", "master.pem", ANY), + call("master_keys", "master.pub", ANY), + call("master_keys", master_opts["id"].removesuffix("_master") + ".pem", ANY), + call("master_keys", master_opts["id"].removesuffix("_master") + ".pub", ANY), + ] + # Assert all calls match the pattern + store_mock.assert_has_calls(expected_calls, any_order=False) def test_master_keys_with_cluster_id(tmp_path, master_opts): @@ -138,19 +153,40 @@ def test_master_keys_with_cluster_id(tmp_path, master_opts): master_opts["cluster_id"] = "cluster1" master_opts["cluster_pki_dir"] = str(cluster_pki_path) - mkeys = salt.crypt.MasterKeys(master_opts) - expected_master_pub = str(master_pki_path / "master.pub") expected_master_rsa = str(master_pki_path / "master.pem") expected_cluster_pub = str(cluster_pki_path / "cluster.pub") expected_cluster_rsa = str(cluster_pki_path / "cluster.pem") - assert expected_master_pub == mkeys.master_pub_path - assert expected_master_rsa == mkeys.master_rsa_path - assert expected_cluster_pub == mkeys.cluster_pub_path - assert expected_cluster_rsa == mkeys.cluster_rsa_path - assert mkeys.pub_path == expected_cluster_pub - assert mkeys.rsa_path == expected_cluster_rsa - assert mkeys.key == mkeys.cluster_key + + # __init__ autocreate's keys by default, but we turn it off to test more easily + mkeys = salt.crypt.MasterKeys(master_opts, autocreate=False) + original_store = mkeys.cache.store + original_flush = mkeys.cache.flush + mkeys.cache.store = store_mock = MagicMock(wraps=original_store) + mkeys.cache.flush = flush_mock = MagicMock(wraps=original_flush) + + mkeys._setup_keys() + + assert os.path.exists(expected_master_pub) + assert os.path.exists(expected_master_rsa) + assert os.path.exists(expected_cluster_pub) + assert os.path.exists(expected_cluster_rsa) + + expected_calls = [ + call("master_keys", "master.pem", ANY), + call("master_keys", "master.pub", ANY), + call("master_keys", master_opts["id"].removesuffix("_master") + ".pem", ANY), + call("master_keys", master_opts["id"].removesuffix("_master") + ".pub", ANY), + call( + "master_keys", + os.path.join("peers", master_opts["id"].removesuffix("_master") + ".pub"), + ANY, + ), + call("master_keys", "cluster.pem", ANY), + call("master_keys", "cluster.pub", ANY), + ] + # Assert all calls match the pattern + store_mock.assert_has_calls(expected_calls, any_order=False) def test_pwdata_decrypt(): diff --git a/tests/pytests/unit/crypt/test_crypt_cryptodome.py b/tests/pytests/unit/crypt/test_crypt_cryptodome.py deleted file mode 100644 index 8a27e36a1d86..000000000000 --- a/tests/pytests/unit/crypt/test_crypt_cryptodome.py +++ /dev/null @@ -1,57 +0,0 @@ -import logging -import os - -import pytest - -import salt.crypt -from tests.support.mock import MagicMock, MockCall, mock_open, patch - -RSA = pytest.importorskip("Cryptodome.PublicKey.RSA") - -try: - import M2Crypto # pylint: disable=unused-import - - HAS_M2 = True -except ImportError: - HAS_M2 = False - -log = logging.getLogger(__name__) - -pytestmark = [ - pytest.mark.skipif(HAS_M2, reason="m2crypto is used by salt.crypt if installed"), -] - - -@pytest.mark.slow_test -def test_gen_keys(tmp_path): - key_path = str(tmp_path / "keydir") - open_priv_wb = MockCall(os.path.join(key_path, "keyname.pem"), "wb+") - open_pub_wb = MockCall(os.path.join(key_path, "keyname.pub"), "wb+") - - real_is_file = os.path.isfile - - def is_file(path): - if path.startswith(str(tmp_path)): - return False - return real_is_file(path) - - with patch.multiple( - os, - umask=MagicMock(), - chmod=MagicMock(), - access=MagicMock(return_value=True), - ): - with patch("salt.utils.files.fopen", mock_open()) as m_open, patch( - "os.path.isfile", return_value=True - ): - result = salt.crypt.gen_keys(key_path, "keyname", 2048) - assert result == os.path.join(key_path, "keyname.pem") - assert open_priv_wb not in m_open.calls - assert open_pub_wb not in m_open.calls - - with patch("salt.utils.files.fopen", mock_open()) as m_open, patch( - "os.path.isfile", is_file - ): - salt.crypt.gen_keys(key_path, "keyname", 2048) - assert open_priv_wb in m_open.calls - assert open_pub_wb in m_open.calls diff --git a/tests/pytests/unit/crypt/test_crypt_cryptography.py b/tests/pytests/unit/crypt/test_crypt_cryptography.py index 9a641b292d55..8f5db5d12f5f 100644 --- a/tests/pytests/unit/crypt/test_crypt_cryptography.py +++ b/tests/pytests/unit/crypt/test_crypt_cryptography.py @@ -1,11 +1,13 @@ import hashlib import hmac import os +from pathlib import Path import pytest from cryptography.hazmat.backends.openssl import backend from cryptography.hazmat.primitives import serialization +import salt.config import salt.crypt as crypt import salt.utils.files import salt.utils.stringutils @@ -113,9 +115,13 @@ def signature(): def private_key(passphrase, tmp_path): keypath = tmp_path / "keys" keypath.mkdir() + opts = salt.config.master_config(None) + opts["pki_dir"] = keypath + mk = crypt.MasterKeys(opts, autocreate=False) keyname = "test" keysize = 2048 - return crypt.gen_keys(str(keypath), keyname, keysize, passphrase=passphrase) + mk.find_or_create_keys(name=keyname, keysize=keysize, passphrase=passphrase) + return str(Path(opts["pki_dir"]) / f"{keyname}.pem") def test_fips_mode(): @@ -139,21 +145,15 @@ def test_gen_keys_legacy(tmp_path): assert keybytes.startswith(b"-----BEGIN PUBLIC KEY-----\n") -def test_gen_keys(tmp_path): - keypath = tmp_path / "keys" - keypath.mkdir() - passphrase = "pass1234" - keyname = "test" - keysize = 2048 - ret = crypt.gen_keys(str(keypath), keyname, keysize, passphrase=passphrase) - with salt.utils.files.fopen(ret, "rb") as fp: +def test_gen_keys(private_key, passphrase): + with salt.utils.files.fopen(private_key, "rb") as fp: keybytes = fp.read() if FIPS_TESTRUN: assert keybytes.startswith(b"-----BEGIN ENCRYPTED PRIVATE KEY-----\n") else: assert keybytes.startswith(b"-----BEGIN RSA PRIVATE KEY-----\n") priv = serialization.load_pem_private_key(keybytes, passphrase.encode()) - with salt.utils.files.fopen(ret.replace(".pem", ".pub"), "rb") as fp: + with salt.utils.files.fopen(private_key.replace(".pem", ".pub"), "rb") as fp: keybytes = fp.read() assert keybytes.startswith(b"-----BEGIN PUBLIC KEY-----\n") @@ -164,14 +164,14 @@ def test_legacy_private_key_loading(private_key, passphrase): def test_private_key_loading(private_key, passphrase): - priv = crypt.PrivateKey(private_key, passphrase) + priv = crypt.PrivateKey.from_file(private_key, passphrase) assert priv.key @pytest.mark.skipif(FIPS_TESTRUN, reason="Legacy key can not be loaded in FIPS mode") def test_private_key_signing(private_key, passphrase): lpriv = LegacyPrivateKey(private_key.encode(), passphrase.encode()) - priv = crypt.PrivateKey(private_key, passphrase) + priv = crypt.PrivateKey.from_file(private_key, passphrase) data = b"meh" signature = priv.sign(data) lsignature = lpriv.sign(data) @@ -180,7 +180,7 @@ def test_private_key_signing(private_key, passphrase): @pytest.mark.skipif(FIPS_TESTRUN, reason="Legacy key can not be loaded in FIPS mode") def test_legacy_public_key_verify(private_key, passphrase): - lpriv = crypt.PrivateKey(private_key, passphrase) + lpriv = crypt.PrivateKey.from_file(private_key, passphrase) data = b"meh" signature = lpriv.sign(data) pubkey = LegacyPublicKey(private_key.replace(".pem", ".pub")) @@ -192,13 +192,13 @@ def test_public_key_verify(private_key, passphrase): lpriv = LegacyPrivateKey(private_key.encode(), passphrase.encode()) data = b"meh" signature = lpriv.sign(data) - pubkey = crypt.PublicKey(private_key.replace(".pem", ".pub")) + pubkey = crypt.PublicKey.from_file(private_key.replace(".pem", ".pub")) assert pubkey.verify(data, signature) @pytest.mark.skipif(FIPS_TESTRUN, reason="Legacy key can not be loaded in FIPS mode") def test_public_key_encrypt(private_key, passphrase): - pubkey = crypt.PublicKey(private_key.replace(".pem", ".pub")) + pubkey = crypt.PublicKey.from_file(private_key.replace(".pem", ".pub")) data = b"meh" enc = pubkey.encrypt(data) @@ -217,7 +217,7 @@ def test_private_key_decrypt(private_key, passphrase): lpubkey = LegacyPublicKey(private_key.replace(".pem", ".pub")) data = b"meh" enc = lpubkey.encrypt(data) - priv = crypt.PrivateKey(private_key, passphrase) + priv = crypt.PrivateKey.from_file(private_key, passphrase) dec = priv.key.decrypt( enc, crypt.padding.OAEP( @@ -289,32 +289,31 @@ def test_aes_encrypt(): def test_encrypt_decrypt(private_key, passphrase, encryption_algorithm): - pubkey = crypt.PublicKey(private_key.replace(".pem", ".pub")) + pubkey = crypt.PublicKey.from_file(private_key.replace(".pem", ".pub")) enc = pubkey.encrypt(b"meh", algorithm=encryption_algorithm) - privkey = crypt.PrivateKey(private_key, passphrase) + privkey = crypt.PrivateKey.from_file(private_key, passphrase) assert privkey.decrypt(enc, algorithm=encryption_algorithm) == b"meh" -def test_sign_message(signature, signing_algorithm): +def test_sign_message(private_key, signature, signing_algorithm): key = salt.crypt.serialization.load_pem_private_key(PRIVKEY_DATA.encode(), None) - with patch("salt.crypt.get_rsa_key", return_value=key): + stub_key = salt.crypt.PrivateKey.__new__(salt.crypt.PrivateKey) + stub_key.key = key + with patch("salt.crypt.PrivateKey.from_file", return_value=stub_key): assert ( - salt.crypt.sign_message( - "/keydir/keyname.pem", MSG, algorithm=signing_algorithm - ) + salt.crypt.sign_message(private_key, MSG, algorithm=signing_algorithm) == signature ) def test_sign_message_with_passphrase(signature, signing_algorithm): key = salt.crypt.serialization.load_pem_private_key(PRIVKEY_DATA.encode(), None) - with patch("salt.crypt.get_rsa_key", return_value=key): + stub_key = salt.crypt.PrivateKey.__new__(salt.crypt.PrivateKey) + stub_key.key = key + with patch("salt.crypt.PrivateKey.from_file", return_value=stub_key): assert ( salt.crypt.sign_message( - "/keydir/keyname.pem", - MSG, - passphrase="password", - algorithm=signing_algorithm, + "/keydir/keyname.pem", MSG, algorithm=signing_algorithm ) == signature ) @@ -342,7 +341,7 @@ def test_loading_encrypted_openssl_format(openssl_encrypted_key, passphrase, tmp @pytest.mark.skipif(not FIPS_TESTRUN, reason="Only valid when in FIPS mode") def test_fips_bad_signing_algo(private_key, passphrase): - key = salt.crypt.PrivateKey(private_key, passphrase) + key = salt.crypt.PrivateKey.from_file(private_key, passphrase) with pytest.raises(salt.exceptions.UnsupportedAlgorithm): key.sign("meh", salt.crypt.PKCS1v15_SHA1) @@ -352,14 +351,14 @@ def test_fips_bad_signing_algo_verification(private_key, passphrase): lpriv = LegacyPrivateKey(private_key.encode(), passphrase.encode()) data = b"meh" signature = lpriv.sign(data) - pubkey = salt.crypt.PublicKey(private_key.replace(".pem", ".pub")) + pubkey = salt.crypt.PublicKey.from_file(private_key.replace(".pem", ".pub")) # cryptogrpahy silently returns False on unsuppoted algorithm assert pubkey.verify(signature, salt.crypt.PKCS1v15_SHA1) is False @pytest.mark.skipif(not FIPS_TESTRUN, reason="Only valid when in FIPS mode") def test_fips_bad_encryption_algo(private_key, passphrase): - key = salt.crypt.PublicKey(private_key.replace(".pem", ".pub")) + key = salt.crypt.PublicKey.from_file(private_key.replace(".pem", ".pub")) with pytest.raises(salt.exceptions.UnsupportedAlgorithm): key.encrypt("meh", salt.crypt.OAEP_SHA1) @@ -368,6 +367,6 @@ def test_fips_bad_encryption_algo(private_key, passphrase): def test_fips_bad_decryption_algo(private_key, passphrase): pubkey = LegacyPublicKey(private_key.replace(".pem", ".pub")) data = pubkey.encrypt("meh") - key = salt.crypt.PrivateKey(private_key, passphrase) + key = salt.crypt.PrivateKey.from_file(private_key, passphrase) with pytest.raises(salt.exceptions.UnsupportedAlgorithm): key.decrypt(data) diff --git a/tests/pytests/unit/pillar/test_file_tree.py b/tests/pytests/unit/pillar/test_file_tree.py index 214a0605cc84..840851600386 100644 --- a/tests/pytests/unit/pillar/test_file_tree.py +++ b/tests/pytests/unit/pillar/test_file_tree.py @@ -93,6 +93,8 @@ def configure_loader_modules(tmp_path, minion_id, pillar_path): "renderer": "yaml_jinja", "renderer_blacklist": [], "renderer_whitelist": [], + "keys.cache_driver": "localfs_key", + "__role": "master", } } } diff --git a/tests/pytests/unit/pillar/test_nodegroups.py b/tests/pytests/unit/pillar/test_nodegroups.py index 2c4d1bf24311..74a5bc38ec2a 100644 --- a/tests/pytests/unit/pillar/test_nodegroups.py +++ b/tests/pytests/unit/pillar/test_nodegroups.py @@ -28,6 +28,8 @@ def configure_loader_modules(fake_minion_id, fake_nodegroups): "cache": "localfs", "nodegroups": fake_nodegroups, "id": fake_minion_id, + "keys.cache_driver": "localfs_key", + "__role": "master", } return {nodegroups: {"__opts__": fake_opts}} diff --git a/tests/pytests/unit/runners/test_cache.py b/tests/pytests/unit/runners/test_cache.py index eaa82fbd6497..8f416c069d8b 100644 --- a/tests/pytests/unit/runners/test_cache.py +++ b/tests/pytests/unit/runners/test_cache.py @@ -4,6 +4,7 @@ import pytest +import salt.config import salt.runners.cache as cache import salt.utils.master from tests.support.mock import patch @@ -11,15 +12,17 @@ @pytest.fixture def configure_loader_modules(tmp_path): - return { - cache: { - "__opts__": { - "cache": "localfs", - "pki_dir": str(tmp_path), - "key_cache": True, - } + master_config = salt.config.master_config(None) + master_config.update( + { + "cache": "localfs", + "pki_dir": str(tmp_path), + "key_cache": True, + "keys.cache_driver": "localfs_key", + "__role": "master", } - } + ) + return {cache: {"__opts__": master_config}} def test_grains(): diff --git a/tests/pytests/unit/runners/test_pillar.py b/tests/pytests/unit/runners/test_pillar.py index 02efc67ae9cc..8dc7fa767daf 100644 --- a/tests/pytests/unit/runners/test_pillar.py +++ b/tests/pytests/unit/runners/test_pillar.py @@ -25,6 +25,8 @@ def configure_loader_modules(): "pillar_cache": True, "pillar_cache_backend": "disk", "pillar_cache_ttl": 30, + "keys.cache_driver": "localfs_key", + "__role": "master", } } } diff --git a/tests/pytests/unit/test_master.py b/tests/pytests/unit/test_master.py index d4e959854edb..d158ba4f0efc 100644 --- a/tests/pytests/unit/test_master.py +++ b/tests/pytests/unit/test_master.py @@ -74,6 +74,8 @@ def encrypted_requests(tmp_path): "conf_file": str(tmp_path / "config.conf"), "fileserver_backend": "local", "master_job_cache": False, + "keys.cache_driver": "localfs_key", + "__role": "master", } ) @@ -118,6 +120,8 @@ def test_maintenance_duration(): "master_job_cache": "", "pki_dir": "/tmp", "eauth_tokens": "", + "keys.cache_driver": "localfs_key", + "__role": "master", } mp = salt.master.Maintenance(opts) with patch("salt.utils.verify.check_max_open_files") as check_files, patch.object( diff --git a/tests/pytests/unit/test_pillar.py b/tests/pytests/unit/test_pillar.py index 1b29c26248dd..bcd172e25947 100644 --- a/tests/pytests/unit/test_pillar.py +++ b/tests/pytests/unit/test_pillar.py @@ -1012,17 +1012,20 @@ def test_get_opts_in_pillar_override_call(minion_opts, grains): def test_multiple_keys_in_opts_added_to_pillar(grains, tmp_pki): - opts = { - "pki_dir": tmp_pki, - "id": "minion", - "master_uri": "tcp://127.0.0.1:4505", - "__role": "minion", - "keysize": 2048, - "renderer": "json", - "path_to_add": "fake_data", - "path_to_add2": {"fake_data2": ["fake_data3", "fake_data4"]}, - "pass_to_ext_pillars": ["path_to_add", "path_to_add2"], - } + opts = salt.config.minion_config(None) + opts.update( + { + "pki_dir": tmp_pki, + "id": "minion", + "master_uri": "tcp://127.0.0.1:4505", + "__role": "minion", + "keysize": 2048, + "renderer": "json", + "path_to_add": "fake_data", + "path_to_add2": {"fake_data2": ["fake_data3", "fake_data4"]}, + "pass_to_ext_pillars": ["path_to_add", "path_to_add2"], + } + ) pillar = salt.pillar.RemotePillar(opts, grains, "mocked-minion", "dev") assert pillar.extra_minion_data == { "path_to_add": "fake_data", @@ -1068,16 +1071,19 @@ def test_pillar_file_client_master_remote(tmp_pki, grains): returns a remote file client. """ mocked_minion = MagicMock() - opts = { - "pki_dir": tmp_pki, - "id": "minion", - "master_uri": "tcp://127.0.0.1:4505", - "__role": "minion", - "keysize": 2048, - "file_client": "local", - "use_master_when_local": True, - "pillar_cache": None, - } + opts = salt.config.minion_config(None) + opts.update( + { + "pki_dir": tmp_pki, + "id": "minion", + "master_uri": "tcp://127.0.0.1:4505", + "__role": "minion", + "keysize": 2048, + "file_client": "local", + "use_master_when_local": True, + "pillar_cache": None, + } + ) pillar = salt.pillar.get_pillar(opts, grains, mocked_minion) assert type(pillar) == salt.pillar.RemotePillar assert type(pillar) != salt.pillar.PillarCache @@ -1262,15 +1268,18 @@ def test_compile_pillar_disk_cache(master_opts, grains): def test_remote_pillar_bad_return(grains, tmp_pki): - opts = { - "pki_dir": tmp_pki, - "id": "minion", - "master_uri": "tcp://127.0.0.1:4505", - "__role": "minion", - "keysize": 2048, - "saltenv": "base", - "pillarenv": "base", - } + opts = salt.config.minion_config(None) + opts.update( + { + "pki_dir": tmp_pki, + "id": "minion", + "master_uri": "tcp://127.0.0.1:4505", + "__role": "minion", + "keysize": 2048, + "saltenv": "base", + "pillarenv": "base", + } + ) pillar = salt.pillar.RemotePillar(opts, grains, "mocked-minion", "dev") async def crypted_transfer_mock(): @@ -1282,15 +1291,18 @@ async def crypted_transfer_mock(): async def test_async_remote_pillar_bad_return(grains, tmp_pki): - opts = { - "pki_dir": tmp_pki, - "id": "minion", - "master_uri": "tcp://127.0.0.1:4505", - "__role": "minion", - "keysize": 2048, - "saltenv": "base", - "pillarenv": "base", - } + opts = salt.config.minion_config(None) + opts.update( + { + "pki_dir": tmp_pki, + "id": "minion", + "master_uri": "tcp://127.0.0.1:4505", + "__role": "minion", + "keysize": 2048, + "saltenv": "base", + "pillarenv": "base", + } + ) pillar = salt.pillar.AsyncRemotePillar(opts, grains, "mocked-minion", "dev") async def crypted_transfer_mock(): diff --git a/tests/pytests/unit/test_request_channel.py b/tests/pytests/unit/test_request_channel.py index 746d1bc7398b..4062ae102bc8 100644 --- a/tests/pytests/unit/test_request_channel.py +++ b/tests/pytests/unit/test_request_channel.py @@ -518,7 +518,9 @@ def test_req_server_chan_encrypt_v2( assert "key" in ret assert dictkey in ret - key = salt.crypt.PrivateKey(str(pki_dir.joinpath("minion", "minion.pem"))) + key = salt.crypt.PrivateKey.from_file( + str(pki_dir.joinpath("minion", "minion.pem")) + ) aes = key.decrypt(ret["key"], encryption_algorithm) pcrypt = salt.crypt.Crypticle(master_opts, aes) signed_msg = pcrypt.loads(ret[dictkey]) @@ -570,7 +572,9 @@ def test_req_server_chan_encrypt_v1(pki_dir, encryption_algorithm, master_opts): assert "key" in ret assert dictkey in ret - key = salt.crypt.PrivateKey(str(pki_dir.joinpath("minion", "minion.pem"))) + key = salt.crypt.PrivateKey.from_file( + str(pki_dir.joinpath("minion", "minion.pem")) + ) aes = key.decrypt(ret["key"], encryption_algorithm) pcrypt = salt.crypt.Crypticle(master_opts, aes) data = pcrypt.loads(ret[dictkey]) @@ -938,7 +942,7 @@ def mocksend(msg, timeout=60, tries=3): key = salt.crypt.Crypticle.generate_key_string() pcrypt = salt.crypt.Crypticle(master_opts, key) pubfn = os.path.join(master_opts["pki_dir"], "minions", "minion") - pub = salt.crypt.PublicKey(pubfn) + pub = salt.crypt.PublicKey.from_file(pubfn) ret[dictkey] = pcrypt.dumps(signed_msg) key = salt.utils.stringutils.to_bytes(key) ret["key"] = pub.encrypt(key, minion_opts["encryption_algorithm"]) @@ -1197,7 +1201,7 @@ async def test_req_chan_auth_v2_with_master_signing( assert ( pki_dir.joinpath("minion", "minion_master.pub").read_text() - == pki_dir.joinpath("master", "master.pub").read_text() + == pki_dir.joinpath("master", f"{server.master_key.master_id}.pub").read_text() ) client = salt.channel.client.AsyncReqChannel.factory(minion_opts, io_loop=io_loop) @@ -1218,10 +1222,10 @@ async def test_req_chan_auth_v2_with_master_signing( assert "publish_port" in ret # Now create a new master key pair and try auth with it. - mapriv = pki_dir.joinpath("master", "master.pem") + mapriv = pki_dir.joinpath("master", f"{server.master_key.master_id}.pem") mapriv.unlink() mapriv.write_text(MASTER2_PRIV_KEY.strip()) - mapub = pki_dir.joinpath("master", "master.pub") + mapub = pki_dir.joinpath("master", f"{server.master_key.master_id}.pub") mapub.unlink() mapub.write_text(MASTER2_PUB_KEY.strip()) @@ -1245,7 +1249,9 @@ async def test_req_chan_auth_v2_with_master_signing( assert ( pki_dir.joinpath("minion", "minion_master.pub").read_text() - == pki_dir.joinpath("master", "master.pub").read_text() + == pki_dir.joinpath( + "master", f"{server.master_key.master_id}.pub" + ).read_text() ) finally: client.close() @@ -1513,7 +1519,9 @@ def test_req_server_auth_garbage_sig_algo(pki_dir, minion_opts, master_opts, cap master_opts, master_opts["sock_dir"], listen=False ) server.master_key = salt.crypt.MasterKeys(server.opts) - pub = salt.crypt.PublicKey(str(pki_dir.joinpath("master", "master.pub"))) + pub = salt.crypt.PublicKey.from_file( + str(pki_dir.joinpath("master", f"{server.master_key.master_id}.pub")) + ) token = pub.encrypt( salt.utils.stringutils.to_bytes(salt.crypt.Crypticle.generate_key_string()), algorithm=minion_opts["encryption_algorithm"], @@ -1591,7 +1599,7 @@ def test_req_server_auth_unsupported_enc_algo( import tests.pytests.unit.crypt pub = tests.pytests.unit.crypt.LegacyPublicKey( - str(pki_dir.joinpath("master", "master.pub")) + str(pki_dir.joinpath("master", f"{server.master_key.master_id}.pub")) ) token = pub.encrypt( salt.utils.stringutils.to_bytes(salt.crypt.Crypticle.generate_key_string()), @@ -1666,7 +1674,7 @@ def test_req_server_auth_garbage_enc_algo(pki_dir, minion_opts, master_opts, cap import tests.pytests.unit.crypt pub = tests.pytests.unit.crypt.LegacyPublicKey( - str(pki_dir.joinpath("master", "master.pub")) + str(pki_dir.joinpath("master", f"{server.master_key.master_id}.pub")) ) token = pub.encrypt( salt.utils.stringutils.to_bytes(salt.crypt.Crypticle.generate_key_string()), diff --git a/tests/pytests/unit/transport/test_tcp.py b/tests/pytests/unit/transport/test_tcp.py index f66733c5be36..b5d1cb56bdf2 100644 --- a/tests/pytests/unit/transport/test_tcp.py +++ b/tests/pytests/unit/transport/test_tcp.py @@ -437,10 +437,11 @@ async def test_when_async_req_channel_with_syndic_role_should_use_syndic_master_ "acceptance_wait_time": 30, "acceptance_wait_time_max": 30, "signing_algorithm": "MOCK", + "keys.cache_driver": "localfs_key", } client = salt.channel.client.ReqChannel.factory(opts, io_loop=mockloop) assert client.master_pubkey_path == expected_pubkey_path - with patch("salt.crypt.PublicKey", return_value=MagicMock()) as mock: + with patch("salt.crypt.PublicKey.from_file", return_value=MagicMock()) as mock: client.verify_signature("mockdata", "mocksig") assert mock.call_args_list[0][0][0] == expected_pubkey_path @@ -461,6 +462,7 @@ async def test_mixin_should_use_correct_path_when_syndic(): "keysize": 4096, "sign_pub_messages": True, "transport": "tcp", + "keys.cache_driver": "localfs_key", } client = salt.channel.client.AsyncPubChannel.factory(opts, io_loop=mockloop) client.master_pubkey_path = expected_pubkey_path diff --git a/tests/pytests/unit/transport/test_zeromq.py b/tests/pytests/unit/transport/test_zeromq.py index fbf124fd7654..f851ed0f8e17 100644 --- a/tests/pytests/unit/transport/test_zeromq.py +++ b/tests/pytests/unit/transport/test_zeromq.py @@ -549,7 +549,7 @@ def test_req_server_chan_encrypt_v2( assert "key" in ret assert dictkey in ret - key = salt.crypt.PrivateKey(str(pki_dir.joinpath("minion", "minion.pem"))) + key = salt.crypt.PrivateKey.from_file(str(pki_dir.joinpath("minion", "minion.pem"))) aes = key.decrypt(ret["key"], encryption_algorithm) pcrypt = salt.crypt.Crypticle(master_opts, aes) signed_msg = pcrypt.loads(ret[dictkey]) @@ -598,7 +598,7 @@ def test_req_server_chan_encrypt_v1(pki_dir, encryption_algorithm, master_opts): assert "key" in ret assert dictkey in ret - key = salt.crypt.PrivateKey(str(pki_dir.joinpath("minion", "minion.pem"))) + key = salt.crypt.PrivateKey.from_file(str(pki_dir.joinpath("minion", "minion.pem"))) aes = key.decrypt(ret["key"], encryption_algorithm) pcrypt = salt.crypt.Crypticle(master_opts, aes) data = pcrypt.loads(ret[dictkey]) @@ -946,7 +946,7 @@ def mocksend(msg, timeout=60, tries=3): key = salt.crypt.Crypticle.generate_key_string() pcrypt = salt.crypt.Crypticle(master_opts, key) pubfn = os.path.join(master_opts["pki_dir"], "minions", "minion") - pub = salt.crypt.PublicKey(pubfn) + pub = salt.crypt.PublicKey.from_file(pubfn) ret[dictkey] = pcrypt.dumps(signed_msg) key = salt.utils.stringutils.to_bytes(key) ret["key"] = pub.encrypt(key, minion_opts["encryption_algorithm"]) @@ -1205,7 +1205,7 @@ async def test_req_chan_auth_v2_with_master_signing( assert ( pki_dir.joinpath("minion", "minion_master.pub").read_text() - == pki_dir.joinpath("master", "master.pub").read_text() + == pki_dir.joinpath("master", f"{server.master_key.master_id}.pub").read_text() ) client = salt.channel.client.AsyncReqChannel.factory(minion_opts, io_loop=io_loop) @@ -1226,10 +1226,10 @@ async def test_req_chan_auth_v2_with_master_signing( assert "publish_port" in ret # Now create a new master key pair and try auth with it. - mapriv = pki_dir.joinpath("master", "master.pem") + mapriv = pki_dir.joinpath("master", f"{server.master_key.master_id}.pem") mapriv.unlink() mapriv.write_text(MASTER2_PRIV_KEY.strip()) - mapub = pki_dir.joinpath("master", "master.pub") + mapub = pki_dir.joinpath("master", f"{server.master_key.master_id}.pub") mapub.unlink() mapub.write_text(MASTER2_PUB_KEY.strip()) @@ -1252,7 +1252,7 @@ async def test_req_chan_auth_v2_with_master_signing( assert ( pki_dir.joinpath("minion", "minion_master.pub").read_text() - == pki_dir.joinpath("master", "master.pub").read_text() + == pki_dir.joinpath("master", f"{server.master_key.master_id}.pub").read_text() ) diff --git a/tests/pytests/unit/utils/test_minions.py b/tests/pytests/unit/utils/test_minions.py index 8d17804071ab..e6fb63250201 100644 --- a/tests/pytests/unit/utils/test_minions.py +++ b/tests/pytests/unit/utils/test_minions.py @@ -1,5 +1,6 @@ import pytest +import salt.config import salt.utils.minions import salt.utils.network from tests.support.mock import patch @@ -10,11 +11,15 @@ def test_connected_ids(): test ckminion connected_ids when local_port_tcp returns 127.0.0.1 """ - opts = { - "publish_port": 4505, - "detect_remote_minions": False, - "minion_data_cache": True, - } + opts = salt.config.minion_config(None) + opts.update( + { + "publish_port": 4505, + "detect_remote_minions": False, + "minion_data_cache": True, + "__role": "minion", + } + ) minion = "minion" ips = {"203.0.113.1", "203.0.113.2", "127.0.0.1"} mdata = {"grains": {"ipv4": ips, "ipv6": []}} @@ -32,12 +37,16 @@ def test_connected_ids_remote_minions(): test ckminion connected_ids when detect_remote_minions is set """ - opts = { - "publish_port": 4505, - "detect_remote_minions": True, - "remote_minions_port": 22, - "minion_data_cache": True, - } + opts = salt.config.minion_config(None) + opts.update( + { + "publish_port": 4505, + "detect_remote_minions": True, + "remote_minions_port": 22, + "minion_data_cache": True, + "__role": "master", + } + ) minion = "minion" minion2 = "minion2" minion2_ip = "192.168.2.10" @@ -64,7 +73,8 @@ def test_validate_tgt_returns_true_when_no_valid_minions_have_been_found(): CKMinions is only able to check against minions the master knows about. If no minion keys have been accepted it will return True. """ - ckminions = salt.utils.minions.CkMinions(opts={}) + opts = salt.config.master_config(None) + ckminions = salt.utils.minions.CkMinions(opts=opts) with patch( "salt.utils.minions.CkMinions.check_minions", autospec=True, return_value={} ): @@ -83,7 +93,8 @@ def test_validate_tgt_returns_true_when_no_valid_minions_have_been_found(): def test_validate_tgt_should_return_false_when_minions_have_minions_not_in_valid_minions( valid_minions, target_minions ): - ckminions = salt.utils.minions.CkMinions(opts={}) + opts = salt.config.master_config(None) + ckminions = salt.utils.minions.CkMinions(opts=opts) with patch( "salt.utils.minions.CkMinions.check_minions", autospec=True, @@ -106,7 +117,8 @@ def test_validate_tgt_should_return_false_when_minions_have_minions_not_in_valid def test_validate_tgt_should_return_true_when_all_minions_are_found_in_valid_minions( valid_minions, target_minions ): - ckminions = salt.utils.minions.CkMinions(opts={}) + opts = salt.config.master_config(None) + ckminions = salt.utils.minions.CkMinions(opts=opts) with patch( "salt.utils.minions.CkMinions.check_minions", autospec=True, diff --git a/tests/unit/netapi/rest_tornado/test_saltnado.py b/tests/unit/netapi/rest_tornado/test_saltnado.py index 8c14aedf9f84..67015c1a6bf4 100644 --- a/tests/unit/netapi/rest_tornado/test_saltnado.py +++ b/tests/unit/netapi/rest_tornado/test_saltnado.py @@ -1,6 +1,7 @@ import tornado import tornado.testing +import salt.config import salt.netapi.rest_tornado.saltnado as saltnado from tests.support.mock import MagicMock, patch @@ -604,15 +605,20 @@ class TestDisbatchLocal(tornado.testing.AsyncTestCase): def setUp(self): super().setUp() self.mock = MagicMock() - self.mock.opts = { - "syndic_wait": 0.1, - "cachedir": "/tmp/testing/cachedir", - "sock_dir": "/tmp/testing/sock_drawer", - "transport": "zeromq", - "extension_modules": "/tmp/testing/moduuuuules", - "order_masters": False, - "gather_job_timeout": 10.001, - } + self.mock.opts = salt.config.master_config(None) + self.mock.opts.update( + { + "syndic_wait": 0.1, + "cachedir": "/tmp/testing/cachedir", + "sock_dir": "/tmp/testing/sock_drawer", + "transport": "zeromq", + "extension_modules": "/tmp/testing/moduuuuules", + "order_masters": False, + "gather_job_timeout": 10.001, + "keys.cache_driver": "localfs_key", + "__role": "master", + } + ) self.handler = saltnado.SaltAPIHandler(self.mock, self.mock) @tornado.testing.gen_test diff --git a/tests/unit/utils/test_minions.py b/tests/unit/utils/test_minions.py index 38a2c52c12bf..bcc4e8ac0f0f 100644 --- a/tests/unit/utils/test_minions.py +++ b/tests/unit/utils/test_minions.py @@ -1,3 +1,4 @@ +import salt.config import salt.utils.minions from tests.support.mock import MagicMock, patch from tests.support.unit import TestCase @@ -70,7 +71,12 @@ class CkMinionsTestCase(TestCase): """ def setUp(self): - self.ckminions = salt.utils.minions.CkMinions({"minion_data_cache": True}) + opts = salt.config.master_config(None) + opts["minion_data_cache"] = True + self.ckminions = salt.utils.minions.CkMinions(opts) + + def tearDown(self): + del self.ckminions def test_spec_check(self): # Test spec-only rule @@ -255,8 +261,8 @@ def test_spec_check(self): self.assertFalse(ret) @patch( - "salt.utils.minions.CkMinions._pki_minions", - MagicMock(return_value=["alpha", "beta", "gamma"]), + "salt.key.Key.list_keys", + MagicMock(return_value={"minions": ["alpha", "beta", "gamma"]}), ) def test_auth_check(self): # Test function-only rule