From 18469dbb3e3bf92492c141e8f685fc495f05111a Mon Sep 17 00:00:00 2001 From: dkjii Date: Fri, 2 Apr 2021 09:50:36 -0400 Subject: [PATCH 01/25] ansible.posix.mount: add absent_from_fstab option --- plugins/modules/mount.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/plugins/modules/mount.py b/plugins/modules/mount.py index e7ce7ee..31ea269 100644 --- a/plugins/modules/mount.py +++ b/plugins/modules/mount.py @@ -78,9 +78,12 @@ options: if I(opts) is set, and the remount command fails, the module will error to prevent unexpected mount changes. Try using C(mounted) instead to work around this issue. + - C(absent_from_fstab) specifies that the device mount's entry will be + removed from I(fstab). This option does not unmount it or delete the + mountpoint. type: str required: true - choices: [ absent, mounted, present, unmounted, remounted ] + choices: [ absent, absent_from_fstab, mounted, present, unmounted, remounted ] fstab: description: - File to use instead of C(/etc/fstab). @@ -651,7 +654,7 @@ def main(): passno=dict(type='str', no_log=False), src=dict(type='path'), backup=dict(type='bool', default=False), - state=dict(type='str', required=True, choices=['absent', 'mounted', 'present', 'unmounted', 'remounted']), + state=dict(type='str', required=True, choices=['absent', 'absent_from_fstab', 'mounted', 'present', 'unmounted', 'remounted']), ), supports_check_mode=True, required_if=( @@ -734,7 +737,9 @@ def main(): name = module.params['path'] changed = False - if state == 'absent': + if state == 'absent_from_fstab': + name, changed = unset_mount(module, args) + elif state == 'absent': name, changed = unset_mount(module, args) if changed and not module.check_mode: From 20e294e0264647f4aa2078ad0eaf320ba2ee1879 Mon Sep 17 00:00:00 2001 From: dkjii Date: Fri, 2 Apr 2021 12:33:58 -0400 Subject: [PATCH 02/25] add changelog --- changelogs/fragments/166_mount_absent_fstab.yml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 changelogs/fragments/166_mount_absent_fstab.yml diff --git a/changelogs/fragments/166_mount_absent_fstab.yml b/changelogs/fragments/166_mount_absent_fstab.yml new file mode 100644 index 0000000..2f7400c --- /dev/null +++ b/changelogs/fragments/166_mount_absent_fstab.yml @@ -0,0 +1,2 @@ +minor_changes: +- mount - Add absent_from_fstab state From 553b0ea4f74c2c4e0d0d3252bc57b1dabed54913 Mon Sep 17 00:00:00 2001 From: dkjii-g <41760646+dkjii-g@users.noreply.github.com> Date: Fri, 2 Apr 2021 21:00:51 -0400 Subject: [PATCH 03/25] Update changelogs/fragments/166_mount_absent_fstab.yml Co-authored-by: Amin Vakil --- changelogs/fragments/166_mount_absent_fstab.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelogs/fragments/166_mount_absent_fstab.yml b/changelogs/fragments/166_mount_absent_fstab.yml index 2f7400c..be11324 100644 --- a/changelogs/fragments/166_mount_absent_fstab.yml +++ b/changelogs/fragments/166_mount_absent_fstab.yml @@ -1,2 +1,2 @@ minor_changes: -- mount - Add absent_from_fstab state + - mount - Add ``absent_from_fstab`` state (https://github.com/ansible-collections/ansible.posix/pull/166). From d7c7d1d2c89f961e1aa85c6d1bc136687d4dd99d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Jun 2022 18:39:01 -0400 Subject: [PATCH 04/25] More complete missing lib msg adds 'exact' python used by module and hostname to avoid confusion --- plugins/module_utils/firewalld.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/module_utils/firewalld.py b/plugins/module_utils/firewalld.py index c79a126..6a76c32 100644 --- a/plugins/module_utils/firewalld.py +++ b/plugins/module_utils/firewalld.py @@ -5,6 +5,7 @@ from __future__ import absolute_import, division, print_function from ansible_collections.ansible.posix.plugins.module_utils.version import LooseVersion +from ansible.module_utils.basic import missing_required_lib __metaclass__ = type @@ -314,6 +315,5 @@ class FirewallTransaction(object): if import_failure: module.fail_json( - msg='Python Module not found: firewalld and its python module are required for this module, \ - version 0.2.11 or newer required (0.3.9 or newer for offline operations)' + msg=missing_required_lib('firewall') + '. Version 0.2.11 or newer required (0.3.9 or newer for offline operations)' ) From 17fc3bcce6ac18d8a1c6ab7baa7d9a29a43990f9 Mon Sep 17 00:00:00 2001 From: Hideki Saito Date: Wed, 15 Jun 2022 18:26:15 +0900 Subject: [PATCH 05/25] Bump AZP container version - https://github.com/ansible-collections/news-for-maintainers/issues/18 Signed-off-by: Hideki Saito --- .azure-pipelines/azure-pipelines.yml | 2 +- changelogs/fragments/375_update_azp_container.yml | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/375_update_azp_container.yml diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 078109b..4b1b3a9 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -36,7 +36,7 @@ variables: resources: containers: - container: default - image: quay.io/ansible/azure-pipelines-test-container:1.9.0 + image: quay.io/ansible/azure-pipelines-test-container:3.0.0 pool: Standard diff --git a/changelogs/fragments/375_update_azp_container.yml b/changelogs/fragments/375_update_azp_container.yml new file mode 100644 index 0000000..6d02987 --- /dev/null +++ b/changelogs/fragments/375_update_azp_container.yml @@ -0,0 +1,3 @@ +--- +trivial: + - CI - AZP test container to 3.0.0 (https://github.com/ansible-collections/news-for-maintainers/issues/18). From c4be75114b876ff61478a4173a352bacf5ea499d Mon Sep 17 00:00:00 2001 From: Sean Cavanaugh Date: Fri, 15 Jul 2022 14:04:06 -0400 Subject: [PATCH 06/25] Update profile_tasks.py removing contentious terminology to match reference documentation https://docs.ansible.com/ansible/latest/reference_appendices/config.html --- plugins/callback/profile_tasks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/callback/profile_tasks.py b/plugins/callback/profile_tasks.py index c6118df..b7fc3ac 100644 --- a/plugins/callback/profile_tasks.py +++ b/plugins/callback/profile_tasks.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' - It also lists the top/bottom time consuming tasks in the summary (configurable) - Before 2.4 only the environment variables were available for configuration. requirements: - - whitelisting in configuration - see examples section below for details. + - enable in configuration - see examples section below for details. options: output_limit: description: Number of tasks to display in the summary @@ -46,7 +46,7 @@ EXAMPLES = ''' example: > To enable, add this to your ansible.cfg file in the defaults block [defaults] - callback_whitelist = ansible.posix.profile_tasks + callbacks_enabled=ansible.posix.profile_tasks sample output: > # # TASK: [ensure messaging security group exists] ******************************** From f7fac900098cfa117c5035091e456b617f04ea02 Mon Sep 17 00:00:00 2001 From: Hideki Saito Date: Fri, 19 Aug 2022 12:53:51 +0900 Subject: [PATCH 07/25] Removing Fedora 35 and FreeBSD 12.3 from CI tests Signed-off-by: Hideki Saito --- .azure-pipelines/azure-pipelines.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 4b1b3a9..cd21121 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -53,8 +53,6 @@ stages: targets: - name: CentOS 7 test: centos7 - - name: Fedora 35 - test: fedora35 - name: Fedora 36 test: fedora36 - name: openSUSE 15 py3 @@ -197,8 +195,6 @@ stages: test: rhel/8.6 - name: RHEL 9.0 test: rhel/9.0 - - name: FreeBSD 12.3 - test: freebsd/12.3 - name: FreeBSD 13.1 test: freebsd/13.1 - stage: Remote_2_13 From abfe36c62f2e58d6660dce6559a8c38ea37f6b1c Mon Sep 17 00:00:00 2001 From: Hideki Saito Date: Wed, 7 Sep 2022 11:05:24 +0900 Subject: [PATCH 08/25] Fix to follow pylint check in ansible-test-sanity-docker-devel Signed-off-by: Hideki Saito --- changelogs/fragments/386_follow_ci_testing_rules.yml | 3 +++ plugins/modules/authorized_key.py | 2 ++ plugins/modules/mount.py | 2 +- .../targets/authorized_key/tasks/setup_steps.yml | 9 +++++++++ 4 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/386_follow_ci_testing_rules.yml diff --git a/changelogs/fragments/386_follow_ci_testing_rules.yml b/changelogs/fragments/386_follow_ci_testing_rules.yml new file mode 100644 index 0000000..f59e82a --- /dev/null +++ b/changelogs/fragments/386_follow_ci_testing_rules.yml @@ -0,0 +1,3 @@ +--- +trivial: + - CI - following the new CI testing rule ansible-test-sanity-docker-devel. diff --git a/plugins/modules/authorized_key.py b/plugins/modules/authorized_key.py index e11b416..5e37c28 100644 --- a/plugins/modules/authorized_key.py +++ b/plugins/modules/authorized_key.py @@ -347,6 +347,8 @@ def keyfile(module, user, write=False, path=None, manage_dir=True, follow=False) basedir = os.path.dirname(keysfile) if not os.path.exists(basedir): os.makedirs(basedir) + + f = None try: f = open(keysfile, "w") # touches file so we can set ownership and perms finally: diff --git a/plugins/modules/mount.py b/plugins/modules/mount.py index 2021464..58b49bc 100644 --- a/plugins/modules/mount.py +++ b/plugins/modules/mount.py @@ -226,7 +226,7 @@ def _escape_fstab(v): if isinstance(v, int): return v else: - return( + return ( v. replace('\\', '\\134'). replace(' ', '\\040'). diff --git a/tests/integration/targets/authorized_key/tasks/setup_steps.yml b/tests/integration/targets/authorized_key/tasks/setup_steps.yml index a3c21dc..2144b7a 100644 --- a/tests/integration/targets/authorized_key/tasks/setup_steps.yml +++ b/tests/integration/targets/authorized_key/tasks/setup_steps.yml @@ -1,5 +1,14 @@ # ------------------------------------------------------------- # Setup steps +- name: Clean up the working directory and files + file: + path: '{{ output_dir }}' + state: absent + +- name: Create the working directory + file: + path: '{{ output_dir }}' + state: directory - name: copy an existing file in place with comments copy: From acd5a2b17e8d27608a45ae5113852b05d3f11379 Mon Sep 17 00:00:00 2001 From: Hideki Saito Date: Mon, 12 Sep 2022 13:47:55 +0900 Subject: [PATCH 09/25] Add changelog fragments to PR #380 Signed-off-by: Hideki Saito --- changelogs/fragments/380_update_usage_profile_tasks.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 changelogs/fragments/380_update_usage_profile_tasks.yml diff --git a/changelogs/fragments/380_update_usage_profile_tasks.yml b/changelogs/fragments/380_update_usage_profile_tasks.yml new file mode 100644 index 0000000..5b23d40 --- /dev/null +++ b/changelogs/fragments/380_update_usage_profile_tasks.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Removed contentious terminology to match reference documentation in profile_tasks. From cc20deaad240e24e0083b3d1c6589ad833c040f6 Mon Sep 17 00:00:00 2001 From: Hideki Saito Date: Fri, 16 Sep 2022 16:37:23 +0900 Subject: [PATCH 10/25] Add changelog fragment file to PR #373 Signed-off-by: Hideki Saito --- .../fragments/373_firewall_fix_missing_library_message.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 changelogs/fragments/373_firewall_fix_missing_library_message.yml diff --git a/changelogs/fragments/373_firewall_fix_missing_library_message.yml b/changelogs/fragments/373_firewall_fix_missing_library_message.yml new file mode 100644 index 0000000..a5faea8 --- /dev/null +++ b/changelogs/fragments/373_firewall_fix_missing_library_message.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - firewall - Fixed to output a more complete missing library message. From 6c9616291ee6531752a15e2a468f0a5f73e53d0e Mon Sep 17 00:00:00 2001 From: Hideki Saito Date: Tue, 27 Sep 2022 20:43:22 +0900 Subject: [PATCH 11/25] Add stable-2.14 branch to AZP * Fixes #388 Signed-off-by: Hideki Saito --- .azure-pipelines/azure-pipelines.yml | 38 +++++++++++++++++++ .../fragments/389_ci_add_stable_214.yml | 3 ++ tests/sanity/ignore-2.15.txt | 8 ++++ 3 files changed, 49 insertions(+) create mode 100644 changelogs/fragments/389_ci_add_stable_214.yml create mode 100644 tests/sanity/ignore-2.15.txt diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index cd21121..9aef200 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -61,6 +61,24 @@ stages: test: ubuntu2004 - name: Ubuntu 22.04 test: ubuntu2204 + - stage: Docker_2_14 + displayName: Docker 2.14 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.14/linux/{0}/1 + targets: + - name: CentOS 7 + test: centos7 + - name: Fedora 36 + test: fedora36 + - name: openSUSE 15 py3 + test: opensuse15 + - name: Ubuntu 20.04 + test: ubuntu2004 + - name: Ubuntu 22.04 + test: ubuntu2204 - stage: Docker_2_13 displayName: Docker 2.13 dependsOn: [] @@ -197,6 +215,24 @@ stages: test: rhel/9.0 - name: FreeBSD 13.1 test: freebsd/13.1 + - stage: Remote_2_14 + displayName: Remote 2.14 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.14/{0}/1 + targets: + - name: MacOS 12.0 + test: macos/12.0 + - name: RHEL 7.9 + test: rhel/7.9 + - name: RHEL 8.6 + test: rhel/8.6 + - name: RHEL 9.0 + test: rhel/9.0 + - name: FreeBSD 13.1 + test: freebsd/13.1 - stage: Remote_2_13 displayName: Remote 2.13 dependsOn: [] @@ -293,6 +329,8 @@ stages: - Docker_2_12 - Remote_2_13 - Docker_2_13 + - Remote_2_14 + - Docker_2_14 - Remote_devel - Docker_devel jobs: diff --git a/changelogs/fragments/389_ci_add_stable_214.yml b/changelogs/fragments/389_ci_add_stable_214.yml new file mode 100644 index 0000000..6a174fd --- /dev/null +++ b/changelogs/fragments/389_ci_add_stable_214.yml @@ -0,0 +1,3 @@ +--- +trivial: +- CI - Add stable-2.14 to AZP (https://github.com/ansible-collections/ansible.posix/issues/388). diff --git a/tests/sanity/ignore-2.15.txt b/tests/sanity/ignore-2.15.txt new file mode 100644 index 0000000..0b6905e --- /dev/null +++ b/tests/sanity/ignore-2.15.txt @@ -0,0 +1,8 @@ +plugins/modules/synchronize.py pylint:disallowed-name +plugins/modules/synchronize.py use-argspec-type-path +plugins/modules/synchronize.py validate-modules:doc-default-does-not-match-spec +plugins/modules/synchronize.py validate-modules:nonexistent-parameter-documented +plugins/modules/synchronize.py validate-modules:parameter-type-not-in-doc +plugins/modules/synchronize.py validate-modules:undocumented-parameter +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang From 139e103b0f2f211bcf73b0bdff4307ff3ce9d2dc Mon Sep 17 00:00:00 2001 From: Juan Antonio Valino Garcia Date: Fri, 7 Oct 2022 17:20:20 +0200 Subject: [PATCH 12/25] Fixes ##390. Hosts involved must have same password --- plugins/action/synchronize.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/plugins/action/synchronize.py b/plugins/action/synchronize.py index a5752b9..7a330ac 100644 --- a/plugins/action/synchronize.py +++ b/plugins/action/synchronize.py @@ -225,7 +225,6 @@ class ActionModule(ActionBase): # Parameter name needed by the ansible module _tmp_args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync' - _tmp_args['_local_rsync_password'] = task_vars.get('ansible_ssh_pass') or task_vars.get('ansible_password') # rsync thinks that one end of the connection is localhost and the # other is the host we're running the task for (Note: We use @@ -333,8 +332,9 @@ class ActionModule(ActionBase): if src is None or dest is None: return dict(failed=True, msg="synchronize requires both src and dest parameters are set") - # Determine if we need a user@ + # Determine if we need a user@ and a password user = None + password = task_vars.get('ansible_ssh_pass', None) or task_vars.get('ansible_password', None) if not dest_is_local: # Src and dest rsync "path" handling if boolean(_tmp_args.get('set_remote_user', 'yes'), strict=False): @@ -344,10 +344,11 @@ class ActionModule(ActionBase): user = task_vars.get('ansible_user') or self._play_context.remote_user if not user: user = C.DEFAULT_REMOTE_USER - else: user = task_vars.get('ansible_user') or self._play_context.remote_user + user = self._templar.template(user) + # Private key handling # Use the private_key parameter if passed else use context private_key_file _tmp_args['private_key'] = _tmp_args.get('private_key', self._play_context.private_key_file) @@ -361,12 +362,15 @@ class ActionModule(ActionBase): # src is a local path, dest is a remote path: @ src = self._process_origin(src_host, src, user) dest = self._process_remote(_tmp_args, dest_host, dest, user, inv_port in localhost_ports) + + password = dest_host_inventory_vars.get('ansible_ssh_pass', None) or dest_host_inventory_vars.get('ansible_password', None) else: # Still need to munge paths (to account for roles) even if we aren't # copying files between hosts src = self._get_absolute_path(path=src) dest = self._get_absolute_path(path=dest) + _tmp_args['_local_rsync_password'] = self._templar.template(password) _tmp_args['src'] = src _tmp_args['dest'] = dest From 297a10fec7eb431c95584eb7317f90434e22dbe3 Mon Sep 17 00:00:00 2001 From: Juan Antonio Valino Garcia Date: Fri, 7 Oct 2022 18:04:09 +0200 Subject: [PATCH 13/25] handle missing templar --- synchronize.py | 434 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 434 insertions(+) create mode 100644 synchronize.py diff --git a/synchronize.py b/synchronize.py new file mode 100644 index 0000000..c70db5f --- /dev/null +++ b/synchronize.py @@ -0,0 +1,434 @@ +# -*- coding: utf-8 -*- + +# (c) 2012-2013, Timothy Appnel +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os.path + +from ansible import constants as C +from ansible.module_utils.six import string_types +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_text +from ansible.module_utils.common._collections_compat import MutableSequence +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.plugins.loader import connection_loader + + +DOCKER = ['docker', 'community.general.docker', 'community.docker.docker'] +PODMAN = ['podman', 'ansible.builtin.podman', 'containers.podman.podman'] +BUILDAH = ['buildah', 'containers.podman.buildah'] + + +class ActionModule(ActionBase): + + def _get_absolute_path(self, path): + original_path = path + + # + # Check if we have a local relative path and do not process + # * remote paths (some.server.domain:/some/remote/path/...) + # * URLs (rsync://...) + # * local absolute paths (/some/local/path/...) + # + if ':' in path or path.startswith('/'): + return path + + if self._task._role is not None: + path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path) + else: + path = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', path) + + if original_path and original_path[-1] == '/' and path[-1] != '/': + # make sure the dwim'd path ends in a trailing "/" + # if the original path did + path += '/' + + return path + + def _host_is_ipv6_address(self, host): + return ':' in to_text(host, errors='surrogate_or_strict') + + def _format_rsync_rsh_target(self, host, path, user): + ''' formats rsync rsh target, escaping ipv6 addresses if needed ''' + + user_prefix = '' + + if path.startswith('rsync://'): + return path + + # If using docker or buildah, do not add user information + if self._remote_transport not in DOCKER + PODMAN + BUILDAH and user: + user_prefix = '%s@' % (user, ) + + if self._host_is_ipv6_address(host): + return '[%s%s]:%s' % (user_prefix, host, path) + return '%s%s:%s' % (user_prefix, host, path) + + def _process_origin(self, host, path, user): + + if host not in C.LOCALHOST: + return self._format_rsync_rsh_target(host, path, user) + + path = self._get_absolute_path(path=path) + return path + + def _process_remote(self, task_args, host, path, user, port_matches_localhost_port): + """ + :arg host: hostname for the path + :arg path: file path + :arg user: username for the transfer + :arg port_matches_localhost_port: boolean whether the remote port + matches the port used by localhost's sshd. This is used in + conjunction with seeing whether the host is localhost to know + if we need to have the module substitute the pathname or if it + is a different host (for instance, an ssh tunnelled port or an + alternative ssh port to a vagrant host.) + """ + transport = self._connection.transport + # If we're connecting to a remote host or we're delegating to another + # host or we're connecting to a different ssh instance on the + # localhost then we have to format the path as a remote rsync path + if host not in C.LOCALHOST or transport != "local" or \ + (host in C.LOCALHOST and not port_matches_localhost_port): + # If we're delegating to non-localhost and but the + # inventory_hostname host is localhost then we need the module to + # fix up the rsync path to use the controller's public DNS/IP + # instead of "localhost" + if port_matches_localhost_port and host in C.LOCALHOST: + task_args['_substitute_controller'] = True + return self._format_rsync_rsh_target(host, path, user) + + path = self._get_absolute_path(path=path) + return path + + def _override_module_replaced_vars(self, task_vars): + """ Some vars are substituted into the modules. Have to make sure + that those are correct for localhost when synchronize creates its own + connection to localhost.""" + + # Clear the current definition of these variables as they came from the + # connection to the remote host + if 'ansible_syslog_facility' in task_vars: + del task_vars['ansible_syslog_facility'] + for key in list(task_vars.keys()): + if key.startswith("ansible_") and key.endswith("_interpreter"): + del task_vars[key] + + # Add the definitions from localhost + for host in C.LOCALHOST: + if host in task_vars['hostvars']: + localhost = task_vars['hostvars'][host] + break + if 'ansible_syslog_facility' in localhost: + task_vars['ansible_syslog_facility'] = localhost['ansible_syslog_facility'] + for key in localhost: + if key.startswith("ansible_") and key.endswith("_interpreter"): + task_vars[key] = localhost[key] + + def run(self, tmp=None, task_vars=None): + ''' generates params and passes them on to the rsync module ''' + # When modifying this function be aware of the tricky convolutions + # your thoughts have to go through: + # + # In normal ansible, we connect from controller to inventory_hostname + # (playbook's hosts: field) or controller to delegate_to host and run + # a module on one of those hosts. + # + # So things that are directly related to the core of ansible are in + # terms of that sort of connection that always originate on the + # controller. + # + # In synchronize we use ansible to connect to either the controller or + # to the delegate_to host and then run rsync which makes its own + # connection from controller to inventory_hostname or delegate_to to + # inventory_hostname. + # + # That means synchronize needs to have some knowledge of the + # controller to inventory_host/delegate host that ansible typically + # establishes and use those to construct a command line for rsync to + # connect from the inventory_host to the controller/delegate. The + # challenge for coders is remembering which leg of the trip is + # associated with the conditions that you're checking at any one time. + if task_vars is None: + task_vars = dict() + + # We make a copy of the args here because we may fail and be asked to + # retry. If that happens we don't want to pass the munged args through + # to our next invocation. Munged args are single use only. + _tmp_args = self._task.args.copy() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + # Store remote connection type + self._remote_transport = self._connection.transport + use_ssh_args = _tmp_args.pop('use_ssh_args', None) + + if use_ssh_args and self._connection.transport == 'ssh': + ssh_args = [ + self._connection.get_option('ssh_args'), + self._connection.get_option('ssh_common_args'), + self._connection.get_option('ssh_extra_args'), + ] + _tmp_args['ssh_args'] = ' '.join([a for a in ssh_args if a]) + + # Handle docker connection options + if self._remote_transport in DOCKER: + self._docker_cmd = self._connection.docker_cmd + if self._play_context.docker_extra_args: + self._docker_cmd = "%s %s" % (self._docker_cmd, self._play_context.docker_extra_args) + elif self._remote_transport in PODMAN: + self._docker_cmd = self._connection._options['podman_executable'] + if self._connection._options.get('podman_extra_args'): + self._docker_cmd = "%s %s" % (self._docker_cmd, self._connection._options['podman_extra_args']) + + # self._connection accounts for delegate_to so + # remote_transport is the transport ansible thought it would need + # between the controller and the delegate_to host or the controller + # and the remote_host if delegate_to isn't set. + + remote_transport = False + if self._connection.transport != 'local': + remote_transport = True + + try: + delegate_to = self._task.delegate_to + except (AttributeError, KeyError): + delegate_to = None + + # ssh paramiko docker buildah and local are fully supported transports. Anything + # else only works with delegate_to + if delegate_to is None and self._connection.transport not in [ + 'ssh', 'paramiko', 'local'] + DOCKER + PODMAN + BUILDAH: + result['failed'] = True + result['msg'] = ( + "synchronize uses rsync to function. rsync needs to connect to the remote " + "host via ssh, docker client or a direct filesystem " + "copy. This remote host is being accessed via %s instead " + "so it cannot work." % self._connection.transport) + return result + + # Parameter name needed by the ansible module + _tmp_args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync' + + # rsync thinks that one end of the connection is localhost and the + # other is the host we're running the task for (Note: We use + # ansible's delegate_to mechanism to determine which host rsync is + # running on so localhost could be a non-controller machine if + # delegate_to is used) + src_host = '127.0.0.1' + inventory_hostname = task_vars.get('inventory_hostname') + dest_host_inventory_vars = task_vars['hostvars'].get(inventory_hostname) + dest_host = dest_host_inventory_vars.get('ansible_host', inventory_hostname) + + dest_host_ids = [hostid for hostid in (dest_host_inventory_vars.get('inventory_hostname'), + dest_host_inventory_vars.get('ansible_host')) + if hostid is not None] + + localhost_ports = set() + for host in C.LOCALHOST: + localhost_vars = task_vars['hostvars'].get(host, {}) + for port_var in C.MAGIC_VARIABLE_MAPPING['port']: + port = localhost_vars.get(port_var, None) + if port: + break + else: + port = C.DEFAULT_REMOTE_PORT + localhost_ports.add(port) + + # dest_is_local tells us if the host rsync runs on is the same as the + # host rsync puts the files on. This is about *rsync's connection*, + # not about the ansible connection to run the module. + dest_is_local = False + if delegate_to is None and remote_transport is False: + dest_is_local = True + elif delegate_to is not None and delegate_to in dest_host_ids: + dest_is_local = True + + # CHECK FOR NON-DEFAULT SSH PORT + inv_port = task_vars.get('ansible_port', None) or C.DEFAULT_REMOTE_PORT + if _tmp_args.get('dest_port', None) is None: + if inv_port is not None: + _tmp_args['dest_port'] = inv_port + + # Set use_delegate if we are going to run rsync on a delegated host + # instead of localhost + use_delegate = False + if delegate_to is not None and delegate_to in dest_host_ids: + # edge case: explicit delegate and dest_host are the same + # so we run rsync on the remote machine targeting its localhost + # (itself) + dest_host = '127.0.0.1' + use_delegate = True + elif delegate_to is not None and remote_transport: + # If we're delegating to a remote host then we need to use the + # delegate_to settings + use_delegate = True + + # Delegate to localhost as the source of the rsync unless we've been + # told (via delegate_to) that a different host is the source of the + # rsync + if not use_delegate and remote_transport: + # Create a connection to localhost to run rsync on + new_stdin = self._connection._new_stdin + + # Unlike port, there can be only one shell + localhost_shell = None + for host in C.LOCALHOST: + localhost_vars = task_vars['hostvars'].get(host, {}) + for shell_var in C.MAGIC_VARIABLE_MAPPING['shell']: + localhost_shell = localhost_vars.get(shell_var, None) + if localhost_shell: + break + if localhost_shell: + break + else: + localhost_shell = os.path.basename(C.DEFAULT_EXECUTABLE) + self._play_context.shell = localhost_shell + + # Unlike port, there can be only one executable + localhost_executable = None + for host in C.LOCALHOST: + localhost_vars = task_vars['hostvars'].get(host, {}) + for executable_var in C.MAGIC_VARIABLE_MAPPING['executable']: + localhost_executable = localhost_vars.get(executable_var, None) + if localhost_executable: + break + if localhost_executable: + break + else: + localhost_executable = C.DEFAULT_EXECUTABLE + self._play_context.executable = localhost_executable + + new_connection = connection_loader.get('local', self._play_context, new_stdin) + self._connection = new_connection + # Override _remote_is_local as an instance attribute specifically for the synchronize use case + # ensuring we set local tmpdir correctly + self._connection._remote_is_local = True + self._override_module_replaced_vars(task_vars) + + # SWITCH SRC AND DEST HOST PER MODE + if _tmp_args.get('mode', 'push') == 'pull': + (dest_host, src_host) = (src_host, dest_host) + + # MUNGE SRC AND DEST PER REMOTE_HOST INFO + src = _tmp_args.get('src', None) + dest = _tmp_args.get('dest', None) + if src is None or dest is None: + return dict(failed=True, msg="synchronize requires both src and dest parameters are set") + + # Determine if we need a user@ and a password + user = None + password = task_vars.get('ansible_ssh_pass', None) or task_vars.get('ansible_password', None) + if not dest_is_local: + # Src and dest rsync "path" handling + if boolean(_tmp_args.get('set_remote_user', 'yes'), strict=False): + if use_delegate: + user = task_vars.get('ansible_delegated_vars', dict()).get('ansible_user', None) + if not user: + user = task_vars.get('ansible_user') or self._play_context.remote_user + if not user: + user = C.DEFAULT_REMOTE_USER + else: + user = task_vars.get('ansible_user') or self._play_context.remote_user + + if self._templar is not None: + user = self._templar.template(user) + + # Private key handling + # Use the private_key parameter if passed else use context private_key_file + _tmp_args['private_key'] = _tmp_args.get('private_key', self._play_context.private_key_file) + + # use the mode to define src and dest's url + if _tmp_args.get('mode', 'push') == 'pull': + # src is a remote path: @, dest is a local path + src = self._process_remote(_tmp_args, src_host, src, user, inv_port in localhost_ports) + dest = self._process_origin(dest_host, dest, user) + else: + # src is a local path, dest is a remote path: @ + src = self._process_origin(src_host, src, user) + dest = self._process_remote(_tmp_args, dest_host, dest, user, inv_port in localhost_ports) + + password = dest_host_inventory_vars.get('ansible_ssh_pass', None) or dest_host_inventory_vars.get('ansible_password', None) + if self._templar is not None: + password = self._templar.template(password) + else: + # Still need to munge paths (to account for roles) even if we aren't + # copying files between hosts + src = self._get_absolute_path(path=src) + dest = self._get_absolute_path(path=dest) + + _tmp_args['_local_rsync_password'] = password + _tmp_args['src'] = src + _tmp_args['dest'] = dest + + # Allow custom rsync path argument + rsync_path = _tmp_args.get('rsync_path', None) + + # backup original become as we are probably about to unset it + become = self._play_context.become + + if not dest_is_local: + # don't escalate for docker. doing --rsync-path with docker exec fails + # and we can switch directly to the user via docker arguments + if self._play_context.become and not rsync_path and self._remote_transport not in DOCKER + PODMAN: + # If no rsync_path is set, become was originally set, and dest is + # remote then add privilege escalation here. + if self._play_context.become_method == 'sudo': + if self._play_context.become_user: + rsync_path = 'sudo -u %s rsync' % self._play_context.become_user + else: + rsync_path = 'sudo rsync' + # TODO: have to add in the rest of the become methods here + + # We cannot use privilege escalation on the machine running the + # module. Instead we run it on the machine rsync is connecting + # to. + self._play_context.become = False + + _tmp_args['rsync_path'] = rsync_path + + # If launching synchronize against docker container + # use rsync_opts to support container to override rsh options + if self._remote_transport in DOCKER + BUILDAH + PODMAN and not use_delegate: + # Replicate what we do in the module argumentspec handling for lists + if not isinstance(_tmp_args.get('rsync_opts'), MutableSequence): + tmp_rsync_opts = _tmp_args.get('rsync_opts', []) + if isinstance(tmp_rsync_opts, string_types): + tmp_rsync_opts = tmp_rsync_opts.split(',') + elif isinstance(tmp_rsync_opts, (int, float)): + tmp_rsync_opts = [to_text(tmp_rsync_opts)] + _tmp_args['rsync_opts'] = tmp_rsync_opts + + if '--blocking-io' not in _tmp_args['rsync_opts']: + _tmp_args['rsync_opts'].append('--blocking-io') + + if self._remote_transport in DOCKER + PODMAN: + if become and self._play_context.become_user: + _tmp_args['rsync_opts'].append('--rsh=' + shlex_quote('%s exec -u %s -i' % (self._docker_cmd, self._play_context.become_user))) + elif user is not None: + _tmp_args['rsync_opts'].append('--rsh=' + shlex_quote('%s exec -u %s -i' % (self._docker_cmd, user))) + else: + _tmp_args['rsync_opts'].append('--rsh=' + shlex_quote('%s exec -i' % self._docker_cmd)) + elif self._remote_transport in BUILDAH: + _tmp_args['rsync_opts'].append('--rsh=' + shlex_quote('buildah run --')) + + # run the module and store the result + result.update(self._execute_module('ansible.posix.synchronize', module_args=_tmp_args, task_vars=task_vars)) + + return result From 4512e7b1e9d195bd30a535f09f207b322da53177 Mon Sep 17 00:00:00 2001 From: Juan Antonio Valino Garcia Date: Fri, 7 Oct 2022 18:10:45 +0200 Subject: [PATCH 14/25] add changelog fragment --- changelogs/fragments/390_hosts_involved_same_password.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 changelogs/fragments/390_hosts_involved_same_password.yml diff --git a/changelogs/fragments/390_hosts_involved_same_password.yml b/changelogs/fragments/390_hosts_involved_same_password.yml new file mode 100644 index 0000000..1169a31 --- /dev/null +++ b/changelogs/fragments/390_hosts_involved_same_password.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - synchronize - Fixed hosts involved in rsync require the same password From 50f87b0d15903cfc3f7cd700d550b1e633310a0b Mon Sep 17 00:00:00 2001 From: Juan Antonio Valino Garcia Date: Fri, 7 Oct 2022 18:30:49 +0200 Subject: [PATCH 15/25] move plugin to correct dir --- plugins/action/synchronize.py | 7 +- synchronize.py | 434 ---------------------------------- 2 files changed, 5 insertions(+), 436 deletions(-) delete mode 100644 synchronize.py diff --git a/plugins/action/synchronize.py b/plugins/action/synchronize.py index 7a330ac..c70db5f 100644 --- a/plugins/action/synchronize.py +++ b/plugins/action/synchronize.py @@ -347,7 +347,8 @@ class ActionModule(ActionBase): else: user = task_vars.get('ansible_user') or self._play_context.remote_user - user = self._templar.template(user) + if self._templar is not None: + user = self._templar.template(user) # Private key handling # Use the private_key parameter if passed else use context private_key_file @@ -364,13 +365,15 @@ class ActionModule(ActionBase): dest = self._process_remote(_tmp_args, dest_host, dest, user, inv_port in localhost_ports) password = dest_host_inventory_vars.get('ansible_ssh_pass', None) or dest_host_inventory_vars.get('ansible_password', None) + if self._templar is not None: + password = self._templar.template(password) else: # Still need to munge paths (to account for roles) even if we aren't # copying files between hosts src = self._get_absolute_path(path=src) dest = self._get_absolute_path(path=dest) - _tmp_args['_local_rsync_password'] = self._templar.template(password) + _tmp_args['_local_rsync_password'] = password _tmp_args['src'] = src _tmp_args['dest'] = dest diff --git a/synchronize.py b/synchronize.py deleted file mode 100644 index c70db5f..0000000 --- a/synchronize.py +++ /dev/null @@ -1,434 +0,0 @@ -# -*- coding: utf-8 -*- - -# (c) 2012-2013, Timothy Appnel -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os.path - -from ansible import constants as C -from ansible.module_utils.six import string_types -from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils._text import to_text -from ansible.module_utils.common._collections_compat import MutableSequence -from ansible.module_utils.parsing.convert_bool import boolean -from ansible.plugins.action import ActionBase -from ansible.plugins.loader import connection_loader - - -DOCKER = ['docker', 'community.general.docker', 'community.docker.docker'] -PODMAN = ['podman', 'ansible.builtin.podman', 'containers.podman.podman'] -BUILDAH = ['buildah', 'containers.podman.buildah'] - - -class ActionModule(ActionBase): - - def _get_absolute_path(self, path): - original_path = path - - # - # Check if we have a local relative path and do not process - # * remote paths (some.server.domain:/some/remote/path/...) - # * URLs (rsync://...) - # * local absolute paths (/some/local/path/...) - # - if ':' in path or path.startswith('/'): - return path - - if self._task._role is not None: - path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path) - else: - path = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', path) - - if original_path and original_path[-1] == '/' and path[-1] != '/': - # make sure the dwim'd path ends in a trailing "/" - # if the original path did - path += '/' - - return path - - def _host_is_ipv6_address(self, host): - return ':' in to_text(host, errors='surrogate_or_strict') - - def _format_rsync_rsh_target(self, host, path, user): - ''' formats rsync rsh target, escaping ipv6 addresses if needed ''' - - user_prefix = '' - - if path.startswith('rsync://'): - return path - - # If using docker or buildah, do not add user information - if self._remote_transport not in DOCKER + PODMAN + BUILDAH and user: - user_prefix = '%s@' % (user, ) - - if self._host_is_ipv6_address(host): - return '[%s%s]:%s' % (user_prefix, host, path) - return '%s%s:%s' % (user_prefix, host, path) - - def _process_origin(self, host, path, user): - - if host not in C.LOCALHOST: - return self._format_rsync_rsh_target(host, path, user) - - path = self._get_absolute_path(path=path) - return path - - def _process_remote(self, task_args, host, path, user, port_matches_localhost_port): - """ - :arg host: hostname for the path - :arg path: file path - :arg user: username for the transfer - :arg port_matches_localhost_port: boolean whether the remote port - matches the port used by localhost's sshd. This is used in - conjunction with seeing whether the host is localhost to know - if we need to have the module substitute the pathname or if it - is a different host (for instance, an ssh tunnelled port or an - alternative ssh port to a vagrant host.) - """ - transport = self._connection.transport - # If we're connecting to a remote host or we're delegating to another - # host or we're connecting to a different ssh instance on the - # localhost then we have to format the path as a remote rsync path - if host not in C.LOCALHOST or transport != "local" or \ - (host in C.LOCALHOST and not port_matches_localhost_port): - # If we're delegating to non-localhost and but the - # inventory_hostname host is localhost then we need the module to - # fix up the rsync path to use the controller's public DNS/IP - # instead of "localhost" - if port_matches_localhost_port and host in C.LOCALHOST: - task_args['_substitute_controller'] = True - return self._format_rsync_rsh_target(host, path, user) - - path = self._get_absolute_path(path=path) - return path - - def _override_module_replaced_vars(self, task_vars): - """ Some vars are substituted into the modules. Have to make sure - that those are correct for localhost when synchronize creates its own - connection to localhost.""" - - # Clear the current definition of these variables as they came from the - # connection to the remote host - if 'ansible_syslog_facility' in task_vars: - del task_vars['ansible_syslog_facility'] - for key in list(task_vars.keys()): - if key.startswith("ansible_") and key.endswith("_interpreter"): - del task_vars[key] - - # Add the definitions from localhost - for host in C.LOCALHOST: - if host in task_vars['hostvars']: - localhost = task_vars['hostvars'][host] - break - if 'ansible_syslog_facility' in localhost: - task_vars['ansible_syslog_facility'] = localhost['ansible_syslog_facility'] - for key in localhost: - if key.startswith("ansible_") and key.endswith("_interpreter"): - task_vars[key] = localhost[key] - - def run(self, tmp=None, task_vars=None): - ''' generates params and passes them on to the rsync module ''' - # When modifying this function be aware of the tricky convolutions - # your thoughts have to go through: - # - # In normal ansible, we connect from controller to inventory_hostname - # (playbook's hosts: field) or controller to delegate_to host and run - # a module on one of those hosts. - # - # So things that are directly related to the core of ansible are in - # terms of that sort of connection that always originate on the - # controller. - # - # In synchronize we use ansible to connect to either the controller or - # to the delegate_to host and then run rsync which makes its own - # connection from controller to inventory_hostname or delegate_to to - # inventory_hostname. - # - # That means synchronize needs to have some knowledge of the - # controller to inventory_host/delegate host that ansible typically - # establishes and use those to construct a command line for rsync to - # connect from the inventory_host to the controller/delegate. The - # challenge for coders is remembering which leg of the trip is - # associated with the conditions that you're checking at any one time. - if task_vars is None: - task_vars = dict() - - # We make a copy of the args here because we may fail and be asked to - # retry. If that happens we don't want to pass the munged args through - # to our next invocation. Munged args are single use only. - _tmp_args = self._task.args.copy() - - result = super(ActionModule, self).run(tmp, task_vars) - del tmp # tmp no longer has any effect - - # Store remote connection type - self._remote_transport = self._connection.transport - use_ssh_args = _tmp_args.pop('use_ssh_args', None) - - if use_ssh_args and self._connection.transport == 'ssh': - ssh_args = [ - self._connection.get_option('ssh_args'), - self._connection.get_option('ssh_common_args'), - self._connection.get_option('ssh_extra_args'), - ] - _tmp_args['ssh_args'] = ' '.join([a for a in ssh_args if a]) - - # Handle docker connection options - if self._remote_transport in DOCKER: - self._docker_cmd = self._connection.docker_cmd - if self._play_context.docker_extra_args: - self._docker_cmd = "%s %s" % (self._docker_cmd, self._play_context.docker_extra_args) - elif self._remote_transport in PODMAN: - self._docker_cmd = self._connection._options['podman_executable'] - if self._connection._options.get('podman_extra_args'): - self._docker_cmd = "%s %s" % (self._docker_cmd, self._connection._options['podman_extra_args']) - - # self._connection accounts for delegate_to so - # remote_transport is the transport ansible thought it would need - # between the controller and the delegate_to host or the controller - # and the remote_host if delegate_to isn't set. - - remote_transport = False - if self._connection.transport != 'local': - remote_transport = True - - try: - delegate_to = self._task.delegate_to - except (AttributeError, KeyError): - delegate_to = None - - # ssh paramiko docker buildah and local are fully supported transports. Anything - # else only works with delegate_to - if delegate_to is None and self._connection.transport not in [ - 'ssh', 'paramiko', 'local'] + DOCKER + PODMAN + BUILDAH: - result['failed'] = True - result['msg'] = ( - "synchronize uses rsync to function. rsync needs to connect to the remote " - "host via ssh, docker client or a direct filesystem " - "copy. This remote host is being accessed via %s instead " - "so it cannot work." % self._connection.transport) - return result - - # Parameter name needed by the ansible module - _tmp_args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync' - - # rsync thinks that one end of the connection is localhost and the - # other is the host we're running the task for (Note: We use - # ansible's delegate_to mechanism to determine which host rsync is - # running on so localhost could be a non-controller machine if - # delegate_to is used) - src_host = '127.0.0.1' - inventory_hostname = task_vars.get('inventory_hostname') - dest_host_inventory_vars = task_vars['hostvars'].get(inventory_hostname) - dest_host = dest_host_inventory_vars.get('ansible_host', inventory_hostname) - - dest_host_ids = [hostid for hostid in (dest_host_inventory_vars.get('inventory_hostname'), - dest_host_inventory_vars.get('ansible_host')) - if hostid is not None] - - localhost_ports = set() - for host in C.LOCALHOST: - localhost_vars = task_vars['hostvars'].get(host, {}) - for port_var in C.MAGIC_VARIABLE_MAPPING['port']: - port = localhost_vars.get(port_var, None) - if port: - break - else: - port = C.DEFAULT_REMOTE_PORT - localhost_ports.add(port) - - # dest_is_local tells us if the host rsync runs on is the same as the - # host rsync puts the files on. This is about *rsync's connection*, - # not about the ansible connection to run the module. - dest_is_local = False - if delegate_to is None and remote_transport is False: - dest_is_local = True - elif delegate_to is not None and delegate_to in dest_host_ids: - dest_is_local = True - - # CHECK FOR NON-DEFAULT SSH PORT - inv_port = task_vars.get('ansible_port', None) or C.DEFAULT_REMOTE_PORT - if _tmp_args.get('dest_port', None) is None: - if inv_port is not None: - _tmp_args['dest_port'] = inv_port - - # Set use_delegate if we are going to run rsync on a delegated host - # instead of localhost - use_delegate = False - if delegate_to is not None and delegate_to in dest_host_ids: - # edge case: explicit delegate and dest_host are the same - # so we run rsync on the remote machine targeting its localhost - # (itself) - dest_host = '127.0.0.1' - use_delegate = True - elif delegate_to is not None and remote_transport: - # If we're delegating to a remote host then we need to use the - # delegate_to settings - use_delegate = True - - # Delegate to localhost as the source of the rsync unless we've been - # told (via delegate_to) that a different host is the source of the - # rsync - if not use_delegate and remote_transport: - # Create a connection to localhost to run rsync on - new_stdin = self._connection._new_stdin - - # Unlike port, there can be only one shell - localhost_shell = None - for host in C.LOCALHOST: - localhost_vars = task_vars['hostvars'].get(host, {}) - for shell_var in C.MAGIC_VARIABLE_MAPPING['shell']: - localhost_shell = localhost_vars.get(shell_var, None) - if localhost_shell: - break - if localhost_shell: - break - else: - localhost_shell = os.path.basename(C.DEFAULT_EXECUTABLE) - self._play_context.shell = localhost_shell - - # Unlike port, there can be only one executable - localhost_executable = None - for host in C.LOCALHOST: - localhost_vars = task_vars['hostvars'].get(host, {}) - for executable_var in C.MAGIC_VARIABLE_MAPPING['executable']: - localhost_executable = localhost_vars.get(executable_var, None) - if localhost_executable: - break - if localhost_executable: - break - else: - localhost_executable = C.DEFAULT_EXECUTABLE - self._play_context.executable = localhost_executable - - new_connection = connection_loader.get('local', self._play_context, new_stdin) - self._connection = new_connection - # Override _remote_is_local as an instance attribute specifically for the synchronize use case - # ensuring we set local tmpdir correctly - self._connection._remote_is_local = True - self._override_module_replaced_vars(task_vars) - - # SWITCH SRC AND DEST HOST PER MODE - if _tmp_args.get('mode', 'push') == 'pull': - (dest_host, src_host) = (src_host, dest_host) - - # MUNGE SRC AND DEST PER REMOTE_HOST INFO - src = _tmp_args.get('src', None) - dest = _tmp_args.get('dest', None) - if src is None or dest is None: - return dict(failed=True, msg="synchronize requires both src and dest parameters are set") - - # Determine if we need a user@ and a password - user = None - password = task_vars.get('ansible_ssh_pass', None) or task_vars.get('ansible_password', None) - if not dest_is_local: - # Src and dest rsync "path" handling - if boolean(_tmp_args.get('set_remote_user', 'yes'), strict=False): - if use_delegate: - user = task_vars.get('ansible_delegated_vars', dict()).get('ansible_user', None) - if not user: - user = task_vars.get('ansible_user') or self._play_context.remote_user - if not user: - user = C.DEFAULT_REMOTE_USER - else: - user = task_vars.get('ansible_user') or self._play_context.remote_user - - if self._templar is not None: - user = self._templar.template(user) - - # Private key handling - # Use the private_key parameter if passed else use context private_key_file - _tmp_args['private_key'] = _tmp_args.get('private_key', self._play_context.private_key_file) - - # use the mode to define src and dest's url - if _tmp_args.get('mode', 'push') == 'pull': - # src is a remote path: @, dest is a local path - src = self._process_remote(_tmp_args, src_host, src, user, inv_port in localhost_ports) - dest = self._process_origin(dest_host, dest, user) - else: - # src is a local path, dest is a remote path: @ - src = self._process_origin(src_host, src, user) - dest = self._process_remote(_tmp_args, dest_host, dest, user, inv_port in localhost_ports) - - password = dest_host_inventory_vars.get('ansible_ssh_pass', None) or dest_host_inventory_vars.get('ansible_password', None) - if self._templar is not None: - password = self._templar.template(password) - else: - # Still need to munge paths (to account for roles) even if we aren't - # copying files between hosts - src = self._get_absolute_path(path=src) - dest = self._get_absolute_path(path=dest) - - _tmp_args['_local_rsync_password'] = password - _tmp_args['src'] = src - _tmp_args['dest'] = dest - - # Allow custom rsync path argument - rsync_path = _tmp_args.get('rsync_path', None) - - # backup original become as we are probably about to unset it - become = self._play_context.become - - if not dest_is_local: - # don't escalate for docker. doing --rsync-path with docker exec fails - # and we can switch directly to the user via docker arguments - if self._play_context.become and not rsync_path and self._remote_transport not in DOCKER + PODMAN: - # If no rsync_path is set, become was originally set, and dest is - # remote then add privilege escalation here. - if self._play_context.become_method == 'sudo': - if self._play_context.become_user: - rsync_path = 'sudo -u %s rsync' % self._play_context.become_user - else: - rsync_path = 'sudo rsync' - # TODO: have to add in the rest of the become methods here - - # We cannot use privilege escalation on the machine running the - # module. Instead we run it on the machine rsync is connecting - # to. - self._play_context.become = False - - _tmp_args['rsync_path'] = rsync_path - - # If launching synchronize against docker container - # use rsync_opts to support container to override rsh options - if self._remote_transport in DOCKER + BUILDAH + PODMAN and not use_delegate: - # Replicate what we do in the module argumentspec handling for lists - if not isinstance(_tmp_args.get('rsync_opts'), MutableSequence): - tmp_rsync_opts = _tmp_args.get('rsync_opts', []) - if isinstance(tmp_rsync_opts, string_types): - tmp_rsync_opts = tmp_rsync_opts.split(',') - elif isinstance(tmp_rsync_opts, (int, float)): - tmp_rsync_opts = [to_text(tmp_rsync_opts)] - _tmp_args['rsync_opts'] = tmp_rsync_opts - - if '--blocking-io' not in _tmp_args['rsync_opts']: - _tmp_args['rsync_opts'].append('--blocking-io') - - if self._remote_transport in DOCKER + PODMAN: - if become and self._play_context.become_user: - _tmp_args['rsync_opts'].append('--rsh=' + shlex_quote('%s exec -u %s -i' % (self._docker_cmd, self._play_context.become_user))) - elif user is not None: - _tmp_args['rsync_opts'].append('--rsh=' + shlex_quote('%s exec -u %s -i' % (self._docker_cmd, user))) - else: - _tmp_args['rsync_opts'].append('--rsh=' + shlex_quote('%s exec -i' % self._docker_cmd)) - elif self._remote_transport in BUILDAH: - _tmp_args['rsync_opts'].append('--rsh=' + shlex_quote('buildah run --')) - - # run the module and store the result - result.update(self._execute_module('ansible.posix.synchronize', module_args=_tmp_args, task_vars=task_vars)) - - return result From d1fbbb79058511bd2ea7e8fda4c512fab6a94450 Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Tue, 8 Nov 2022 10:17:53 -0600 Subject: [PATCH 16/25] rhel4edge modules Signed-off-by: Adam Miller --- plugins/modules/r4e_rpm_ostree.py | 123 +++++++++++++++++++++++++ plugins/modules/rpm_ostree_upgrade.py | 125 ++++++++++++++++++++++++++ 2 files changed, 248 insertions(+) create mode 100644 plugins/modules/r4e_rpm_ostree.py create mode 100644 plugins/modules/rpm_ostree_upgrade.py diff --git a/plugins/modules/r4e_rpm_ostree.py b/plugins/modules/r4e_rpm_ostree.py new file mode 100644 index 0000000..04e25d8 --- /dev/null +++ b/plugins/modules/r4e_rpm_ostree.py @@ -0,0 +1,123 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: r4e_rpm_ostree +version_added: 2.14.0 +short_description: Ensure packages exist in a RHEL for Edge rpm-ostree based system +description: + - Compatibility layer for using the "package" module for RHEL for Edge systems utilizing the RHEL System Roles. +author: + - Adam Miller (@maxamillion) +requirements: + - rpm-ostree +options: + name: + description: + - A package name or package specifier with version, like C(name-1.0). + - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0) + - If a previous version is specified, the task also needs to turn C(allow_downgrade) on. + See the C(allow_downgrade) documentation for caveats with downgrading packages. + - When using state=latest, this can be C('*') which means run C(yum -y update). + - You can also pass a url or a local path to a rpm file (using state=present). + To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages. + aliases: [ pkg ] + type: list + elements: str + state: + description: + - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package. + - C(present) and C(installed) will simply ensure that a desired package is installed. + - C(latest) will update the specified package if it's not of the latest available version. + - C(absent) and C(removed) will remove the specified package. + - Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is + enabled for this module, then C(absent) is inferred. + type: str + choices: [ absent, installed, latest, present, removed ] +notes: + - This module does not support installing or removing packages to/from an overlay as this is not supported + by RHEL for Edge, packages needed should be defined in the osbuild Blueprint and provided to Image Builder + at build time. This module exists only for C(package) module compatibility. +''' + +EXAMPLES = ''' +- name: Install htop and ansible on rpm-ostree based overlay + ansible.builtin.rpm_ostree: + name: + - htop + - ansible + state: present +''' + +RETURN = """ +msg: + description: status of rpm transaction + returned: always + type: str + sample: "No changes made." +""" + +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text + + +def locally_installed(module, pkgname): + (rc, out, err) = module.run_command('rpm -q {0}'.format(pkgname).split()) + return (rc == 0) + + +def rpm_ostree_transaction(module): + pkgs = [] + + if module.params['state'] in ['present', 'installed', 'latest']: + for pkg in module.params['name']: + if not locally_installed(module, pkg): + pkgs.append(pkg) + elif module.params['state'] in ['absent', 'removed']: + for pkg in module.params['name']: + if locally_installed(module, pkg): + pkgs.append(pkg) + + if not pkgs: + module.exit_json(msg="No changes made.") + else: + if module.params['state'] in ['present', 'installed', 'latest']: + module.fail_json(msg="The following packages are absent in the currently booted rpm-ostree commit: %s" ' '.join(pkgs)) + else: + module.fail_json(msg="The following packages are present in the currently booted rpm-ostree commit: %s" ' '.join(pkgs)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', aliases=['pkg'], default=[]), + state=dict(type='str', default=None, choices=['absent', 'installed', 'latest', 'present', 'removed']), + ), + ) + + # Verify that the platform is an rpm-ostree based system + if not os.path.exists("/run/ostree-booted"): + module.fail_json(msg="Module rpm_ostree is only applicable for rpm-ostree based systems.") + + try: + rpm_ostree_transaction(module) + except Exception as e: + module.fail_json(msg=to_text(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rpm_ostree_upgrade.py b/plugins/modules/rpm_ostree_upgrade.py new file mode 100644 index 0000000..069e8b7 --- /dev/null +++ b/plugins/modules/rpm_ostree_upgrade.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: rpm_ostree_upgrade +short_description: Manage rpm-ostree upgrade transactions +description: + - Manage an rpm-ostree upgrade transactions +version_added: "2.14" +author: +- Adam Miller (@maxamillion) +requirements: + - rpm-ostree +options: + os: + description: + - The OSNAME upon which to operate + type: str + default: "" + required: false + cache_only: + description: + - Perform the transaction using only pre-cached data, don't download + type: bool + default: false + required: false + allow_downgrade: + description: + - Allow for the upgrade to be a chronologically older tree + type: bool + default: false + required: false + peer: + description: + - Force peer-to-peer connection instead of using system message bus + type: bool + default: false + required: false + +''' + +EXAMPLES = ''' +- name: Upgrade the rpm-ostree image without options, accept all defaults + ansible.builtin.rpm_ostree_upgrade: + +- name: Upgrade the rpm-ostree image allowing downgrades + ansible.builtin.rpm_ostree_upgrade: + allow_downgrade: true +''' + +RETURN = ''' +msg: + description: The command standard output + returned: always + type: str + sample: 'No upgrade available.' +''' + +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native, to_text + + +def rpm_ostree_transaction(module): + cmd = [] + cmd.append(module.get_bin_path("rpm-ostree")) + cmd.append('upgrade') + + if module.params['os']: + cmd += ['--os', module.params['os']] + if module.params['cache_only']: + cmd += ['--cache-only'] + if module.params['allow_downgrade']: + cmd += ['--allow-downgrade'] + if module.params['peer']: + cmd += ['--peer'] + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + rc, out, err = module.run_command(cmd) + + if rc != 0: + module.fail_json(rc=rc, msg=err) + else: + if to_text("No upgrade available.") in to_text(out): + module.exit_json(msg=out, changed=False) + else: + module.exit_json(msg=out, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + os=dict(type='str', default=''), + cache_only=dict(type='bool', default=False), + allow_downgrade=dict(type='bool', default=False), + peer=dict(type='bool', default=False), + ), + ) + + # Verify that the platform is an rpm-ostree based system + if not os.path.exists("/run/ostree-booted"): + module.fail_json(msg="Module rpm_ostree_upgrade is only applicable for rpm-ostree based systems.") + + try: + rpm_ostree_transaction(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() From 69228e79d281a89c358c3a0fbbf8d6ea2253f30c Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Tue, 8 Nov 2022 11:25:25 -0600 Subject: [PATCH 17/25] fix up some sanity things Signed-off-by: Adam Miller --- plugins/modules/r4e_rpm_ostree.py | 4 ++-- plugins/modules/rpm_ostree_upgrade.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/modules/r4e_rpm_ostree.py b/plugins/modules/r4e_rpm_ostree.py index 04e25d8..47b0d7e 100644 --- a/plugins/modules/r4e_rpm_ostree.py +++ b/plugins/modules/r4e_rpm_ostree.py @@ -14,7 +14,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1', DOCUMENTATION = ''' --- module: r4e_rpm_ostree -version_added: 2.14.0 +version_added: 1.5.0 short_description: Ensure packages exist in a RHEL for Edge rpm-ostree based system description: - Compatibility layer for using the "package" module for RHEL for Edge systems utilizing the RHEL System Roles. @@ -53,7 +53,7 @@ notes: EXAMPLES = ''' - name: Install htop and ansible on rpm-ostree based overlay - ansible.builtin.rpm_ostree: + ansible.posix.r4e_rpm_ostree: name: - htop - ansible diff --git a/plugins/modules/rpm_ostree_upgrade.py b/plugins/modules/rpm_ostree_upgrade.py index 069e8b7..5b799ae 100644 --- a/plugins/modules/rpm_ostree_upgrade.py +++ b/plugins/modules/rpm_ostree_upgrade.py @@ -17,7 +17,7 @@ module: rpm_ostree_upgrade short_description: Manage rpm-ostree upgrade transactions description: - Manage an rpm-ostree upgrade transactions -version_added: "2.14" +version_added: 1.5.0 author: - Adam Miller (@maxamillion) requirements: @@ -52,10 +52,10 @@ options: EXAMPLES = ''' - name: Upgrade the rpm-ostree image without options, accept all defaults - ansible.builtin.rpm_ostree_upgrade: + ansible.posix.rpm_ostree_upgrade: - name: Upgrade the rpm-ostree image allowing downgrades - ansible.builtin.rpm_ostree_upgrade: + ansible.posix.rpm_ostree_upgrade: allow_downgrade: true ''' From a3b8fdbf25e2fc60a3879410ad7cdd5ba0d37584 Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Tue, 8 Nov 2022 16:23:08 -0600 Subject: [PATCH 18/25] add changelog fragment Signed-off-by: Adam Miller --- changelogs/fragments/393_rhel_for_edge.yml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 changelogs/fragments/393_rhel_for_edge.yml diff --git a/changelogs/fragments/393_rhel_for_edge.yml b/changelogs/fragments/393_rhel_for_edge.yml new file mode 100644 index 0000000..118d377 --- /dev/null +++ b/changelogs/fragments/393_rhel_for_edge.yml @@ -0,0 +1,4 @@ +--- +minor_changes: +- r4e_rpm_ostree - new module for validating package state on RHEL for Edge +- rpm_ostree_upgrade - new module to manage upgrades for rpm-ostree based systems From fc5894171d5daa6008ce0c723440d27ddec2ca1f Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Tue, 15 Nov 2022 16:59:48 -0600 Subject: [PATCH 19/25] add rhel_facts, move r4e_rpm_ostree to rhel_rpm_ostree Signed-off-by: Adam Miller --- plugins/modules/rhel_facts.py | 73 +++++++++++++++++++ .../{r4e_rpm_ostree.py => rhel_rpm_ostree.py} | 6 +- 2 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 plugins/modules/rhel_facts.py rename plugins/modules/{r4e_rpm_ostree.py => rhel_rpm_ostree.py} (97%) diff --git a/plugins/modules/rhel_facts.py b/plugins/modules/rhel_facts.py new file mode 100644 index 0000000..e8084e5 --- /dev/null +++ b/plugins/modules/rhel_facts.py @@ -0,0 +1,73 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: rhel_facts +version_added: 1.5.0 +short_description: Facts module to set or override RHEL specific facts +description: + - Compatibility layer for using the "package" module for rpm-ostree based systems via setting the "pkg_mgr" fact correctly. +author: + - Adam Miller (@maxamillion) +requirements: + - rpm-ostree +seealso: + - module: ansible.builtin.package +options: {} +''' + +EXAMPLES = ''' +- name: Playbook to use the package module on all RHEL footprints + vars: + ansible_facts_modules: + - setup # REQUIRED to be run before all custom fact modules + - ansible.posix.rhel_facts + tasks: + - name: Ensure packages are installed + ansible.builtin.package: + name: + - htop + - ansible + state: present +''' + +RETURN = """ +ansible_facts: + description: Relevant Ansible Facts + returned: always + type: complex + sample: {'pkg_mgr': 'ansible.posix.rhel_facts'} +""" + +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + ansible_facts = {} + + # Verify that the platform is an rpm-ostree based system + if os.path.exists("/run/ostree-booted"): + ansible_facts['pkg_mgr'] = 'ansible.posix.rhel_rpm_ostree' + + module.exit_json(ansible_facts, changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/r4e_rpm_ostree.py b/plugins/modules/rhel_rpm_ostree.py similarity index 97% rename from plugins/modules/r4e_rpm_ostree.py rename to plugins/modules/rhel_rpm_ostree.py index 47b0d7e..84ca19a 100644 --- a/plugins/modules/r4e_rpm_ostree.py +++ b/plugins/modules/rhel_rpm_ostree.py @@ -13,7 +13,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1', DOCUMENTATION = ''' --- -module: r4e_rpm_ostree +module: rhel_rpm_ostree version_added: 1.5.0 short_description: Ensure packages exist in a RHEL for Edge rpm-ostree based system description: @@ -52,8 +52,8 @@ notes: ''' EXAMPLES = ''' -- name: Install htop and ansible on rpm-ostree based overlay - ansible.posix.r4e_rpm_ostree: +- name: Ensure htop and ansible are installed on rpm-ostree based RHEL + ansible.posix.rhel_rpm_ostree: name: - htop - ansible From dcd9598e48bfb219948d780fed23843c82137ec8 Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Mon, 21 Nov 2022 12:14:34 -0600 Subject: [PATCH 20/25] make sanity checks happy Signed-off-by: Adam Miller --- plugins/modules/rhel_facts.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/plugins/modules/rhel_facts.py b/plugins/modules/rhel_facts.py index e8084e5..4ecfca5 100644 --- a/plugins/modules/rhel_facts.py +++ b/plugins/modules/rhel_facts.py @@ -45,19 +45,26 @@ EXAMPLES = ''' RETURN = """ ansible_facts: description: Relevant Ansible Facts - returned: always + returned: when needed type: complex - sample: {'pkg_mgr': 'ansible.posix.rhel_facts'} + contains: + pkg_mgr: + description: System-level package manager override + returned: when needed + type: str + sample: {'pkg_mgr': 'ansible.posix.rhel_facts'} """ import os -import traceback from ansible.module_utils.basic import AnsibleModule + def main(): + module = AnsibleModule( argument_spec=dict(), + supports_check_mode=True, ) ansible_facts = {} From 7df358d74fbc2a635379ccc413908ce91d0b5a04 Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Mon, 21 Nov 2022 12:26:48 -0600 Subject: [PATCH 21/25] add changelog fragment Signed-off-by: Adam Miller --- changelogs/fragments/393-rpm-ostree.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 changelogs/fragments/393-rpm-ostree.yml diff --git a/changelogs/fragments/393-rpm-ostree.yml b/changelogs/fragments/393-rpm-ostree.yml new file mode 100644 index 0000000..e473b39 --- /dev/null +++ b/changelogs/fragments/393-rpm-ostree.yml @@ -0,0 +1,5 @@ +--- +minor_changes: + - rhel_facts - new facts module to handle RHEL specific facts + - rhel_rpm_ostree - new module to handle RHEL rpm-ostree specific package management functionality + - rpm_ostree_upgrade - new module to automate rpm-ostree upgrades From e52ae8a9bcb1ff12de10b112279fb4a2fe691608 Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Tue, 22 Nov 2022 09:03:04 -0600 Subject: [PATCH 22/25] fixes based on feedback Signed-off-by: Adam Miller --- plugins/modules/rhel_facts.py | 6 +----- plugins/modules/rhel_rpm_ostree.py | 2 +- plugins/modules/rpm_ostree_upgrade.py | 10 +++++----- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/plugins/modules/rhel_facts.py b/plugins/modules/rhel_facts.py index 4ecfca5..57c15f7 100644 --- a/plugins/modules/rhel_facts.py +++ b/plugins/modules/rhel_facts.py @@ -7,15 +7,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - DOCUMENTATION = ''' --- module: rhel_facts version_added: 1.5.0 -short_description: Facts module to set or override RHEL specific facts +short_description: Facts module to set or override RHEL specific facts. description: - Compatibility layer for using the "package" module for rpm-ostree based systems via setting the "pkg_mgr" fact correctly. author: diff --git a/plugins/modules/rhel_rpm_ostree.py b/plugins/modules/rhel_rpm_ostree.py index 84ca19a..e5e8f2b 100644 --- a/plugins/modules/rhel_rpm_ostree.py +++ b/plugins/modules/rhel_rpm_ostree.py @@ -76,7 +76,7 @@ from ansible.module_utils._text import to_text def locally_installed(module, pkgname): - (rc, out, err) = module.run_command('rpm -q {0}'.format(pkgname).split()) + (rc, out, err) = module.run_command('{0} -q {1}'.format(module.get_bin_path("rpm"), pkgname).split()) return (rc == 0) diff --git a/plugins/modules/rpm_ostree_upgrade.py b/plugins/modules/rpm_ostree_upgrade.py index 5b799ae..16689ca 100644 --- a/plugins/modules/rpm_ostree_upgrade.py +++ b/plugins/modules/rpm_ostree_upgrade.py @@ -16,7 +16,7 @@ DOCUMENTATION = ''' module: rpm_ostree_upgrade short_description: Manage rpm-ostree upgrade transactions description: - - Manage an rpm-ostree upgrade transactions + - Manage an rpm-ostree upgrade transactions. version_added: 1.5.0 author: - Adam Miller (@maxamillion) @@ -25,25 +25,25 @@ requirements: options: os: description: - - The OSNAME upon which to operate + - The OSNAME upon which to operate. type: str default: "" required: false cache_only: description: - - Perform the transaction using only pre-cached data, don't download + - Perform the transaction using only pre-cached data, do not download. type: bool default: false required: false allow_downgrade: description: - - Allow for the upgrade to be a chronologically older tree + - Allow for the upgrade to be a chronologically older tree. type: bool default: false required: false peer: description: - - Force peer-to-peer connection instead of using system message bus + - Force peer-to-peer connection instead of using a system message bus. type: bool default: false required: false From adcb28f8069f33d0d93628ec106e3b19a7f9ee4e Mon Sep 17 00:00:00 2001 From: Gregory Furlong Date: Tue, 13 Dec 2022 10:40:51 -0500 Subject: [PATCH 23/25] Update documented default value for acl's entry parameter to match implementation. --- plugins/modules/acl.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/acl.py b/plugins/modules/acl.py index a2e3d6d..119520e 100644 --- a/plugins/modules/acl.py +++ b/plugins/modules/acl.py @@ -44,6 +44,7 @@ options: description: - The actual user or group that the ACL applies to when matching entity types user or group are selected. type: str + default: "" etype: description: - The entity type of the ACL to apply, see C(setfacl) documentation for more info. From 0fff8fde30b8daee3ab66699bce082839576dd28 Mon Sep 17 00:00:00 2001 From: Gregory Furlong Date: Tue, 13 Dec 2022 11:48:39 -0500 Subject: [PATCH 24/25] Update documented default value for rhel_rpm_ostree's name parameter to match implementation. --- plugins/modules/rhel_rpm_ostree.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/rhel_rpm_ostree.py b/plugins/modules/rhel_rpm_ostree.py index e5e8f2b..0976e02 100644 --- a/plugins/modules/rhel_rpm_ostree.py +++ b/plugins/modules/rhel_rpm_ostree.py @@ -35,6 +35,7 @@ options: aliases: [ pkg ] type: list elements: str + default: [] state: description: - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package. From bf0ad4aad236ba719a41caf812d8f8e03ae966bf Mon Sep 17 00:00:00 2001 From: Gregory Furlong Date: Tue, 13 Dec 2022 12:46:39 -0500 Subject: [PATCH 25/25] Document pr #401 as a changelog fragment. --- changelogs/fragments/401_document_module_default_values.yml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 changelogs/fragments/401_document_module_default_values.yml diff --git a/changelogs/fragments/401_document_module_default_values.yml b/changelogs/fragments/401_document_module_default_values.yml new file mode 100644 index 0000000..8a631dc --- /dev/null +++ b/changelogs/fragments/401_document_module_default_values.yml @@ -0,0 +1,4 @@ +--- +trivial: +- acl - document default value for the ``entry`` parameter +- rhel_rpm_ostree - document default value for the ``name`` parameter