commit 6f928621f05bc6a891067f40661054c382ed79b8 Author: Ansible Core Team Date: Mon Mar 9 13:15:28 2020 +0000 Initial commit diff --git a/.github/workflows/collection-continuous-integration.yml b/.github/workflows/collection-continuous-integration.yml new file mode 100644 index 0000000..ba73893 --- /dev/null +++ b/.github/workflows/collection-continuous-integration.yml @@ -0,0 +1,308 @@ +name: Collection test suite + +on: + push: + pull_request: + schedule: + - cron: 3 0 * * * # Run daily at 0:03 UTC + +jobs: + build-collection-artifact: + name: Build collection + runs-on: ${{ matrix.runner-os }} + strategy: + matrix: + runner-os: + - ubuntu-latest + ansible-version: + - git+https://github.com/ansible/ansible.git@devel + runner-python-version: + - 3.8 + steps: + - name: Check out ${{ github.repository }} on disk + uses: actions/checkout@master + - name: Set up Python ${{ matrix.runner-python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.runner-python-version }} + - name: Set up pip cache + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('tests/sanity/requirements.txt') }}-${{ hashFiles('tests/unit/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install Ansible ${{ matrix.ansible-version }} + run: >- + python -m + pip + install + --user + ${{ matrix.ansible-version }} + - name: Build a collection tarball + run: >- + ~/.local/bin/ansible-galaxy + collection + build + --output-path + "${GITHUB_WORKSPACE}/.cache/collection-tarballs" + - name: Store migrated collection artifacts + uses: actions/upload-artifact@v1 + with: + name: >- + collection + path: .cache/collection-tarballs + + sanity-test-collection-via-vms: + name: Sanity in VM ${{ matrix.os.vm || 'ubuntu-latest' }} + needs: + - build-collection-artifact + runs-on: ${{ matrix.os.vm || 'ubuntu-latest' }} + strategy: + fail-fast: false + matrix: + ansible-version: + - git+https://github.com/ansible/ansible.git@devel + os: + - vm: ubuntu-latest + - vm: ubuntu-16.04 + - vm: macos-latest + python-version: + - 3.8 + - 3.7 + - 3.6 + - 3.5 + - 2.7 + steps: + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Set up pip cache + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ github.ref }}-sanity-VMs + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install Ansible ${{ matrix.ansible-version }} + run: >- + python -m + pip + install + --user + ${{ matrix.ansible-version }} + - name: Download migrated collection artifacts + uses: actions/download-artifact@v1 + with: + name: >- + collection + path: .cache/collection-tarballs + - name: Install the collection tarball + run: >- + ~/.local/bin/ansible-galaxy + collection + install + .cache/collection-tarballs/*.tar.gz + - name: Run collection sanity tests + run: >- + ~/.local/bin/ansible-test + sanity + --color + --requirements + --venv + --python + "${{ matrix.python-version }}" + -vvv + working-directory: >- + /${{ runner.os == 'Linux' && 'home' || 'Users' }}/runner/.ansible/collections/ansible_collections/ansible/posix + + sanity-test-collection-via-containers: + name: Sanity in container via Python ${{ matrix.python-version }} + needs: + - build-collection-artifact + runs-on: ${{ matrix.runner-os }} + strategy: + fail-fast: false + matrix: + runner-os: + - ubuntu-latest + runner-python-version: + - 3.8 + ansible-version: + - git+https://github.com/ansible/ansible.git@devel + python-version: + - 3.8 + - 2.7 + - 3.7 + - 3.6 + - 3.5 + - 2.6 + steps: + - name: Set up Python ${{ matrix.runner-python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.runner-python-version }} + - name: Set up pip cache + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ github.ref }}-sanity-containers + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install Ansible ${{ matrix.ansible-version }} + run: >- + python -m + pip + install + --user + ${{ matrix.ansible-version }} + - name: Download migrated collection artifacts + uses: actions/download-artifact@v1 + with: + name: >- + collection + path: .cache/collection-tarballs + - name: Install the collection tarball + run: >- + ~/.local/bin/ansible-galaxy + collection + install + .cache/collection-tarballs/*.tar.gz + - name: Run collection sanity tests + run: >- + ~/.local/bin/ansible-test + sanity + --color + --requirements + --docker + --python + "${{ matrix.python-version }}" + -vvv + working-directory: >- + /home/runner/.ansible/collections/ansible_collections/ansible/posix + + unit-test-collection-via-vms: + name: Units in VM ${{ matrix.os.vm || 'ubuntu-latest' }} + needs: + - build-collection-artifact + runs-on: ${{ matrix.os.vm || 'ubuntu-latest' }} + strategy: + fail-fast: false + matrix: + ansible-version: + - git+https://github.com/ansible/ansible.git@devel + os: + - vm: ubuntu-latest + - vm: ubuntu-16.04 + - vm: macos-latest + python-version: + - 3.8 + - 3.7 + - 3.6 + - 3.5 + - 2.7 + steps: + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Set up pip cache + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ github.ref }}-units-VMs + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install Ansible ${{ matrix.ansible-version }} + run: >- + python -m + pip + install + --user + ${{ matrix.ansible-version }} + - name: Download migrated collection artifacts + uses: actions/download-artifact@v1 + with: + name: >- + collection + path: .cache/collection-tarballs + - name: Install the collection tarball + run: >- + ~/.local/bin/ansible-galaxy + collection + install + .cache/collection-tarballs/*.tar.gz + - name: Run collection unit tests + run: | + [[ ! -d 'tests/unit' ]] && echo This collection does not have unit tests. Skipping... || \ + ~/.local/bin/ansible-test units --color --coverage --requirements --venv --python "${{ matrix.python-version }}" -vvv + working-directory: >- + /${{ runner.os == 'Linux' && 'home' || 'Users' }}/runner/.ansible/collections/ansible_collections/ansible/posix + + unit-test-collection-via-containers: + name: Units in container ${{ matrix.container-image }} + needs: + - build-collection-artifact + runs-on: ${{ matrix.runner-os }} + strategy: + fail-fast: false + matrix: + runner-os: + - ubuntu-latest + runner-python-version: + - 3.8 + ansible-version: + - git+https://github.com/ansible/ansible.git@devel + container-image: + - fedora31 + - ubuntu1804 + - centos8 + - opensuse15 + - fedora30 + - centos7 + - opensuse15py2 + - ubuntu1604 + - centos6 + steps: + - name: Set up Python ${{ matrix.runner-python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.runner-python-version }} + - name: Set up pip cache + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ github.ref }}-units-containers + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install Ansible ${{ matrix.ansible-version }} + run: >- + python -m + pip + install + --user + ${{ matrix.ansible-version }} + - name: Download migrated collection artifacts + uses: actions/download-artifact@v1 + with: + name: >- + collection + path: .cache/collection-tarballs + - name: Install the collection tarball + run: >- + ~/.local/bin/ansible-galaxy + collection + install + .cache/collection-tarballs/*.tar.gz + - name: Run collection unit tests + run: | + [[ ! -d 'tests/unit' ]] && echo This collection does not have unit tests. Skipping... || \ + ~/.local/bin/ansible-test units --color --coverage --requirements --docker "${{ matrix.container-image }}" -vvv + working-directory: >- + /home/runner/.ansible/collections/ansible_collections/ansible/posix \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c6fc14a --- /dev/null +++ b/.gitignore @@ -0,0 +1,387 @@ + +# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv + +### dotenv ### +.env + +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data + + +### Git ### +# Created by git for backups. To disable backups in Git: +# $ git config --global mergetool.keepBackup false +*.orig + +# Created by git when using merge tools for conflicts +*.BACKUP.* +*.BASE.* +*.LOCAL.* +*.REMOTE.* +*_BACKUP_*.txt +*_BASE_*.txt +*_LOCAL_*.txt +*_REMOTE_*.txt + +#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!# + +### Linux ### + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### pydev ### +.pydevproject + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# Mr Developer +.mr.developer.cfg +.project + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### WebStorm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff + +# Generated files + +# Sensitive or high-churn files + +# Gradle + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake + +# Mongo Explorer plugin + +# File-based project format + +# IntelliJ + +# mpeltonen/sbt-idea plugin + +# JIRA plugin + +# Cursive Clojure plugin + +# Crashlytics plugin (for Android Studio and IntelliJ) + +# Editor-based Rest Client + +# Android studio 3.1+ serialized cache file + +### WebStorm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +.idea/**/sonarlint/ + +# SonarQube Plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator/ + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..10926e8 --- /dev/null +++ b/COPYING @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/README.md b/README.md new file mode 100644 index 0000000..8feba0e --- /dev/null +++ b/README.md @@ -0,0 +1,4 @@ +[![GitHub Actions CI/CD build status — Collection test suite](https://github.com/ansible-collection-migration/ansible.posix/workflows/Collection%20test%20suite/badge.svg?branch=master)](https://github.com/ansible-collection-migration/ansible.posix/actions?query=workflow%3A%22Collection%20test%20suite%22) + +Ansible Collection: ansible.posix +================================================= \ No newline at end of file diff --git a/galaxy.yml b/galaxy.yml new file mode 100644 index 0000000..a073b38 --- /dev/null +++ b/galaxy.yml @@ -0,0 +1,14 @@ +namespace: ansible +name: posix +version: 0.1.0 +readme: README.md +authors: null +description: null +license: GPL-3.0-or-later +license_file: COPYING +tags: null +dependencies: {} +repository: git@github.com:ansible-collection-migration/ansible.posix.git +documentation: https://github.com/ansible-collection-migration/ansible.posix/tree/master/docs +homepage: https://github.com/ansible-collection-migration/ansible.posix +issues: https://github.com/ansible-collection-migration/ansible.posix/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc diff --git a/plugins/action/__init__.py b/plugins/action/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/plugins/action/patch.py b/plugins/action/patch.py new file mode 100644 index 0000000..f4ad7ce --- /dev/null +++ b/plugins/action/patch.py @@ -0,0 +1,72 @@ +# (c) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail +from ansible.module_utils._text import to_native +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + src = self._task.args.get('src', None) + remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False) + + try: + if src is None: + raise AnsibleActionFail("src is required") + elif remote_src: + # everything is remote, so we just execute the module + # without changing any of the module arguments + raise _AnsibleActionDone(result=self._execute_module(task_vars=task_vars)) + + try: + src = self._find_needle('files', src) + except AnsibleError as e: + raise AnsibleActionFail(to_native(e)) + + tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src)) + self._transfer_file(src, tmp_src) + self._fixup_perms2((self._connection._shell.tmpdir, tmp_src)) + + new_module_args = self._task.args.copy() + new_module_args.update( + dict( + src=tmp_src, + ) + ) + + result.update(self._execute_module('patch', module_args=new_module_args, task_vars=task_vars)) + except AnsibleAction as e: + result.update(e.result) + finally: + self._remove_tmp_path(self._connection._shell.tmpdir) + return result diff --git a/plugins/action/synchronize.py b/plugins/action/synchronize.py new file mode 100644 index 0000000..527c72d --- /dev/null +++ b/plugins/action/synchronize.py @@ -0,0 +1,420 @@ +# -*- coding: utf-8 -*- + +# (c) 2012-2013, Timothy Appnel +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os.path + +from ansible import constants as C +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text +from ansible.module_utils.common._collections_compat import MutableSequence +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.plugins.loader import connection_loader + + +class ActionModule(ActionBase): + + def _get_absolute_path(self, path): + original_path = path + + if path.startswith('rsync://'): + return path + + if self._task._role is not None: + path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path) + else: + path = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', path) + + if original_path and original_path[-1] == '/' and path[-1] != '/': + # make sure the dwim'd path ends in a trailing "/" + # if the original path did + path += '/' + + return path + + def _host_is_ipv6_address(self, host): + return ':' in to_text(host, errors='surrogate_or_strict') + + def _format_rsync_rsh_target(self, host, path, user): + ''' formats rsync rsh target, escaping ipv6 addresses if needed ''' + + user_prefix = '' + + if path.startswith('rsync://'): + return path + + # If using docker or buildah, do not add user information + if self._remote_transport not in ['docker', 'buildah'] and user: + user_prefix = '%s@' % (user, ) + + if self._host_is_ipv6_address(host): + return '[%s%s]:%s' % (user_prefix, host, path) + else: + return '%s%s:%s' % (user_prefix, host, path) + + def _process_origin(self, host, path, user): + + if host not in C.LOCALHOST: + return self._format_rsync_rsh_target(host, path, user) + + if ':' not in path and not path.startswith('/'): + path = self._get_absolute_path(path=path) + return path + + def _process_remote(self, task_args, host, path, user, port_matches_localhost_port): + """ + :arg host: hostname for the path + :arg path: file path + :arg user: username for the transfer + :arg port_matches_localhost_port: boolean whether the remote port + matches the port used by localhost's sshd. This is used in + conjunction with seeing whether the host is localhost to know + if we need to have the module substitute the pathname or if it + is a different host (for instance, an ssh tunnelled port or an + alternative ssh port to a vagrant host.) + """ + transport = self._connection.transport + # If we're connecting to a remote host or we're delegating to another + # host or we're connecting to a different ssh instance on the + # localhost then we have to format the path as a remote rsync path + if host not in C.LOCALHOST or transport != "local" or \ + (host in C.LOCALHOST and not port_matches_localhost_port): + # If we're delegating to non-localhost and but the + # inventory_hostname host is localhost then we need the module to + # fix up the rsync path to use the controller's public DNS/IP + # instead of "localhost" + if port_matches_localhost_port and host in C.LOCALHOST: + task_args['_substitute_controller'] = True + return self._format_rsync_rsh_target(host, path, user) + + if ':' not in path and not path.startswith('/'): + path = self._get_absolute_path(path=path) + return path + + def _override_module_replaced_vars(self, task_vars): + """ Some vars are substituted into the modules. Have to make sure + that those are correct for localhost when synchronize creates its own + connection to localhost.""" + + # Clear the current definition of these variables as they came from the + # connection to the remote host + if 'ansible_syslog_facility' in task_vars: + del task_vars['ansible_syslog_facility'] + for key in list(task_vars.keys()): + if key.startswith("ansible_") and key.endswith("_interpreter"): + del task_vars[key] + + # Add the definitions from localhost + for host in C.LOCALHOST: + if host in task_vars['hostvars']: + localhost = task_vars['hostvars'][host] + break + if 'ansible_syslog_facility' in localhost: + task_vars['ansible_syslog_facility'] = localhost['ansible_syslog_facility'] + for key in localhost: + if key.startswith("ansible_") and key.endswith("_interpreter"): + task_vars[key] = localhost[key] + + def run(self, tmp=None, task_vars=None): + ''' generates params and passes them on to the rsync module ''' + # When modifying this function be aware of the tricky convolutions + # your thoughts have to go through: + # + # In normal ansible, we connect from controller to inventory_hostname + # (playbook's hosts: field) or controller to delegate_to host and run + # a module on one of those hosts. + # + # So things that are directly related to the core of ansible are in + # terms of that sort of connection that always originate on the + # controller. + # + # In synchronize we use ansible to connect to either the controller or + # to the delegate_to host and then run rsync which makes its own + # connection from controller to inventory_hostname or delegate_to to + # inventory_hostname. + # + # That means synchronize needs to have some knowledge of the + # controller to inventory_host/delegate host that ansible typically + # establishes and use those to construct a command line for rsync to + # connect from the inventory_host to the controller/delegate. The + # challenge for coders is remembering which leg of the trip is + # associated with the conditions that you're checking at any one time. + if task_vars is None: + task_vars = dict() + + # We make a copy of the args here because we may fail and be asked to + # retry. If that happens we don't want to pass the munged args through + # to our next invocation. Munged args are single use only. + _tmp_args = self._task.args.copy() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + # Store remote connection type + self._remote_transport = self._connection.transport + + # Handle docker connection options + if self._remote_transport == 'docker': + self._docker_cmd = self._connection.docker_cmd + if self._play_context.docker_extra_args: + self._docker_cmd = "%s %s" % (self._docker_cmd, self._play_context.docker_extra_args) + + # self._connection accounts for delegate_to so + # remote_transport is the transport ansible thought it would need + # between the controller and the delegate_to host or the controller + # and the remote_host if delegate_to isn't set. + + remote_transport = False + if self._connection.transport != 'local': + remote_transport = True + + try: + delegate_to = self._task.delegate_to + except (AttributeError, KeyError): + delegate_to = None + + # ssh paramiko docker buildah and local are fully supported transports. Anything + # else only works with delegate_to + if delegate_to is None and self._connection.transport not in \ + ('ssh', 'paramiko', 'local', 'docker', 'buildah'): + result['failed'] = True + result['msg'] = ( + "synchronize uses rsync to function. rsync needs to connect to the remote " + "host via ssh, docker client or a direct filesystem " + "copy. This remote host is being accessed via %s instead " + "so it cannot work." % self._connection.transport) + return result + + use_ssh_args = _tmp_args.pop('use_ssh_args', None) + + # Parameter name needed by the ansible module + _tmp_args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync' + _tmp_args['_local_rsync_password'] = task_vars.get('ansible_ssh_pass') or task_vars.get('ansible_password') + + # rsync thinks that one end of the connection is localhost and the + # other is the host we're running the task for (Note: We use + # ansible's delegate_to mechanism to determine which host rsync is + # running on so localhost could be a non-controller machine if + # delegate_to is used) + src_host = '127.0.0.1' + inventory_hostname = task_vars.get('inventory_hostname') + dest_host_inventory_vars = task_vars['hostvars'].get(inventory_hostname) + try: + dest_host = dest_host_inventory_vars['ansible_host'] + except KeyError: + dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname) + + dest_host_ids = [hostid for hostid in (dest_host_inventory_vars.get('inventory_hostname'), + dest_host_inventory_vars.get('ansible_host'), + dest_host_inventory_vars.get('ansible_ssh_host')) + if hostid is not None] + + localhost_ports = set() + for host in C.LOCALHOST: + localhost_vars = task_vars['hostvars'].get(host, {}) + for port_var in C.MAGIC_VARIABLE_MAPPING['port']: + port = localhost_vars.get(port_var, None) + if port: + break + else: + port = C.DEFAULT_REMOTE_PORT + localhost_ports.add(port) + + # dest_is_local tells us if the host rsync runs on is the same as the + # host rsync puts the files on. This is about *rsync's connection*, + # not about the ansible connection to run the module. + dest_is_local = False + if delegate_to is None and remote_transport is False: + dest_is_local = True + elif delegate_to is not None and delegate_to in dest_host_ids: + dest_is_local = True + + # CHECK FOR NON-DEFAULT SSH PORT + inv_port = task_vars.get('ansible_ssh_port', None) or C.DEFAULT_REMOTE_PORT + if _tmp_args.get('dest_port', None) is None: + if inv_port is not None: + _tmp_args['dest_port'] = inv_port + + # Set use_delegate if we are going to run rsync on a delegated host + # instead of localhost + use_delegate = False + if delegate_to is not None and delegate_to in dest_host_ids: + # edge case: explicit delegate and dest_host are the same + # so we run rsync on the remote machine targeting its localhost + # (itself) + dest_host = '127.0.0.1' + use_delegate = True + elif delegate_to is not None and remote_transport: + # If we're delegating to a remote host then we need to use the + # delegate_to settings + use_delegate = True + + # Delegate to localhost as the source of the rsync unless we've been + # told (via delegate_to) that a different host is the source of the + # rsync + if not use_delegate and remote_transport: + # Create a connection to localhost to run rsync on + new_stdin = self._connection._new_stdin + + # Unlike port, there can be only one shell + localhost_shell = None + for host in C.LOCALHOST: + localhost_vars = task_vars['hostvars'].get(host, {}) + for shell_var in C.MAGIC_VARIABLE_MAPPING['shell']: + localhost_shell = localhost_vars.get(shell_var, None) + if localhost_shell: + break + if localhost_shell: + break + else: + localhost_shell = os.path.basename(C.DEFAULT_EXECUTABLE) + self._play_context.shell = localhost_shell + + # Unlike port, there can be only one executable + localhost_executable = None + for host in C.LOCALHOST: + localhost_vars = task_vars['hostvars'].get(host, {}) + for executable_var in C.MAGIC_VARIABLE_MAPPING['executable']: + localhost_executable = localhost_vars.get(executable_var, None) + if localhost_executable: + break + if localhost_executable: + break + else: + localhost_executable = C.DEFAULT_EXECUTABLE + self._play_context.executable = localhost_executable + + new_connection = connection_loader.get('local', self._play_context, new_stdin) + self._connection = new_connection + # Override _remote_is_local as an instance attribute specifically for the synchronize use case + # ensuring we set local tmpdir correctly + self._connection._remote_is_local = True + self._override_module_replaced_vars(task_vars) + + # SWITCH SRC AND DEST HOST PER MODE + if _tmp_args.get('mode', 'push') == 'pull': + (dest_host, src_host) = (src_host, dest_host) + + # MUNGE SRC AND DEST PER REMOTE_HOST INFO + src = _tmp_args.get('src', None) + dest = _tmp_args.get('dest', None) + if src is None or dest is None: + return dict(failed=True, msg="synchronize requires both src and dest parameters are set") + + # Determine if we need a user@ + user = None + if not dest_is_local: + # Src and dest rsync "path" handling + if boolean(_tmp_args.get('set_remote_user', 'yes'), strict=False): + if use_delegate: + user = task_vars.get('ansible_delegated_vars', dict()).get('ansible_ssh_user', None) + if not user: + user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user + if not user: + user = C.DEFAULT_REMOTE_USER + + else: + user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user + + # Private key handling + private_key = self._play_context.private_key_file + + if private_key is not None: + _tmp_args['private_key'] = private_key + + # use the mode to define src and dest's url + if _tmp_args.get('mode', 'push') == 'pull': + # src is a remote path: @, dest is a local path + src = self._process_remote(_tmp_args, src_host, src, user, inv_port in localhost_ports) + dest = self._process_origin(dest_host, dest, user) + else: + # src is a local path, dest is a remote path: @ + src = self._process_origin(src_host, src, user) + dest = self._process_remote(_tmp_args, dest_host, dest, user, inv_port in localhost_ports) + else: + # Still need to munge paths (to account for roles) even if we aren't + # copying files between hosts + if not src.startswith('/'): + src = self._get_absolute_path(path=src) + if not dest.startswith('/'): + dest = self._get_absolute_path(path=dest) + + _tmp_args['src'] = src + _tmp_args['dest'] = dest + + # Allow custom rsync path argument + rsync_path = _tmp_args.get('rsync_path', None) + + # backup original become as we are probably about to unset it + become = self._play_context.become + + if not dest_is_local: + # don't escalate for docker. doing --rsync-path with docker exec fails + # and we can switch directly to the user via docker arguments + if self._play_context.become and not rsync_path and self._remote_transport != 'docker': + # If no rsync_path is set, become was originally set, and dest is + # remote then add privilege escalation here. + if self._play_context.become_method == 'sudo': + rsync_path = 'sudo rsync' + # TODO: have to add in the rest of the become methods here + + # We cannot use privilege escalation on the machine running the + # module. Instead we run it on the machine rsync is connecting + # to. + self._play_context.become = False + + _tmp_args['rsync_path'] = rsync_path + + if use_ssh_args: + ssh_args = [ + getattr(self._play_context, 'ssh_args', ''), + getattr(self._play_context, 'ssh_common_args', ''), + getattr(self._play_context, 'ssh_extra_args', ''), + ] + _tmp_args['ssh_args'] = ' '.join([a for a in ssh_args if a]) + + # If launching synchronize against docker container + # use rsync_opts to support container to override rsh options + if self._remote_transport in ['docker', 'buildah'] and not use_delegate: + # Replicate what we do in the module argumentspec handling for lists + if not isinstance(_tmp_args.get('rsync_opts'), MutableSequence): + tmp_rsync_opts = _tmp_args.get('rsync_opts', []) + if isinstance(tmp_rsync_opts, string_types): + tmp_rsync_opts = tmp_rsync_opts.split(',') + elif isinstance(tmp_rsync_opts, (int, float)): + tmp_rsync_opts = [to_text(tmp_rsync_opts)] + _tmp_args['rsync_opts'] = tmp_rsync_opts + + if '--blocking-io' not in _tmp_args['rsync_opts']: + _tmp_args['rsync_opts'].append('--blocking-io') + + if self._remote_transport in ['docker']: + if become and self._play_context.become_user: + _tmp_args['rsync_opts'].append("--rsh=%s exec -u %s -i" % (self._docker_cmd, self._play_context.become_user)) + elif user is not None: + _tmp_args['rsync_opts'].append("--rsh=%s exec -u %s -i" % (self._docker_cmd, user)) + else: + _tmp_args['rsync_opts'].append("--rsh=%s exec -i" % self._docker_cmd) + elif self._remote_transport in ['buildah']: + _tmp_args['rsync_opts'].append("--rsh=buildah run --") + + # run the module and store the result + result.update(self._execute_module('synchronize', module_args=_tmp_args, task_vars=task_vars)) + + return result diff --git a/plugins/callback/__init__.py b/plugins/callback/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/plugins/callback/cgroup_perf_recap.py b/plugins/callback/cgroup_perf_recap.py new file mode 100644 index 0000000..5313353 --- /dev/null +++ b/plugins/callback/cgroup_perf_recap.py @@ -0,0 +1,465 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + callback: cgroup_perf_recap + callback_type: aggregate + requirements: + - whitelist in configuration + - cgroups + short_description: Profiles system activity of tasks and full execution using cgroups + description: + - This is an ansible callback plugin utilizes cgroups to profile system activity of ansible and + individual tasks, and display a recap at the end of the playbook execution + notes: + - Requires ansible to be run from within a cgroup, such as with + C(cgexec -g cpuacct,memory,pids:ansible_profile ansible-playbook ...) + - This cgroup should only be used by ansible to get accurate results + - To create the cgroup, first use a command such as + C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g cpuacct,memory,pids:ansible_profile) + options: + control_group: + required: True + description: Name of cgroups control group + env: + - name: CGROUP_CONTROL_GROUP + ini: + - section: callback_cgroup_perf_recap + key: control_group + cpu_poll_interval: + description: Interval between CPU polling for determining CPU usage. A lower value may produce inaccurate + results, a higher value may not be short enough to collect results for short tasks. + default: 0.25 + type: float + env: + - name: CGROUP_CPU_POLL_INTERVAL + ini: + - section: callback_cgroup_perf_recap + key: cpu_poll_interval + memory_poll_interval: + description: Interval between memory polling for determining memory usage. A lower value may produce inaccurate + results, a higher value may not be short enough to collect results for short tasks. + default: 0.25 + type: float + env: + - name: CGROUP_MEMORY_POLL_INTERVAL + ini: + - section: callback_cgroup_perf_recap + key: memory_poll_interval + pid_poll_interval: + description: Interval between PID polling for determining PID count. A lower value may produce inaccurate + results, a higher value may not be short enough to collect results for short tasks. + default: 0.25 + type: float + env: + - name: CGROUP_PID_POLL_INTERVAL + ini: + - section: callback_cgroup_perf_recap + key: pid_poll_interval + display_recap: + description: Controls whether the recap is printed at the end, useful if you will automatically + process the output files + env: + - name: CGROUP_DISPLAY_RECAP + ini: + - section: callback_cgroup_perf_recap + key: display_recap + type: bool + default: true + file_name_format: + description: Format of filename. Accepts C(%(counter)s), C(%(task_uuid)s), + C(%(feature)s), C(%(ext)s). Defaults to C(%(feature)s.%(ext)s) when C(file_per_task) is C(False) + and C(%(counter)s-%(task_uuid)s-%(feature)s.%(ext)s) when C(True) + env: + - name: CGROUP_FILE_NAME_FORMAT + ini: + - section: callback_cgroup_perf_recap + key: file_name_format + type: str + default: '%(feature)s.%(ext)s' + output_dir: + description: Output directory for files containing recorded performance readings. If the value contains a + single %s, the start time of the playbook run will be inserted in that space. Only the deepest + level directory will be created if it does not exist, parent directories will not be created. + type: path + default: /tmp/ansible-perf-%s + env: + - name: CGROUP_OUTPUT_DIR + ini: + - section: callback_cgroup_perf_recap + key: output_dir + output_format: + description: Output format, either CSV or JSON-seq + env: + - name: CGROUP_OUTPUT_FORMAT + ini: + - section: callback_cgroup_perf_recap + key: output_format + type: str + default: csv + choices: + - csv + - json + file_per_task: + description: When set as C(True) along with C(write_files), this callback will write 1 file per task + instead of 1 file for the entire playbook run + env: + - name: CGROUP_FILE_PER_TASK + ini: + - section: callback_cgroup_perf_recap + key: file_per_task + type: bool + default: False + write_files: + description: Dictates whether files will be written containing performance readings + env: + - name: CGROUP_WRITE_FILES + ini: + - section: callback_cgroup_perf_recap + key: write_files + type: bool + default: false +''' + +import csv +import datetime +import os +import time +import threading + +from abc import ABCMeta, abstractmethod + +from functools import partial + +from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.six import with_metaclass +from ansible.parsing.ajson import AnsibleJSONEncoder, json +from ansible.plugins.callback import CallbackBase + + +RS = '\x1e' # RECORD SEPARATOR +LF = '\x0a' # LINE FEED + + +def dict_fromkeys(keys, default=None): + d = {} + for key in keys: + d[key] = default() if callable(default) else default + return d + + +class BaseProf(with_metaclass(ABCMeta, threading.Thread)): + def __init__(self, path, obj=None, writer=None): + threading.Thread.__init__(self) # pylint: disable=non-parent-init-called + self.obj = obj + self.path = path + self.max = 0 + self.running = True + self.writer = writer + + def run(self): + while self.running: + self.poll() + + @abstractmethod + def poll(self): + pass + + +class MemoryProf(BaseProf): + """Python thread for recording memory usage""" + def __init__(self, path, poll_interval=0.25, obj=None, writer=None): + super(MemoryProf, self).__init__(path, obj=obj, writer=writer) + self._poll_interval = poll_interval + + def poll(self): + with open(self.path) as f: + val = int(f.read().strip()) / 1024**2 + if val > self.max: + self.max = val + if self.writer: + try: + self.writer(time.time(), self.obj.get_name(), self.obj._uuid, val) + except ValueError: + # We may be profiling after the playbook has ended + self.running = False + time.sleep(self._poll_interval) + + +class CpuProf(BaseProf): + def __init__(self, path, poll_interval=0.25, obj=None, writer=None): + super(CpuProf, self).__init__(path, obj=obj, writer=writer) + self._poll_interval = poll_interval + + def poll(self): + with open(self.path) as f: + start_time = time.time() * 1000**2 + start_usage = int(f.read().strip()) / 1000 + time.sleep(self._poll_interval) + with open(self.path) as f: + end_time = time.time() * 1000**2 + end_usage = int(f.read().strip()) / 1000 + val = (end_usage - start_usage) / (end_time - start_time) * 100 + if val > self.max: + self.max = val + if self.writer: + try: + self.writer(time.time(), self.obj.get_name(), self.obj._uuid, val) + except ValueError: + # We may be profiling after the playbook has ended + self.running = False + + +class PidsProf(BaseProf): + def __init__(self, path, poll_interval=0.25, obj=None, writer=None): + super(PidsProf, self).__init__(path, obj=obj, writer=writer) + self._poll_interval = poll_interval + + def poll(self): + with open(self.path) as f: + val = int(f.read().strip()) + if val > self.max: + self.max = val + if self.writer: + try: + self.writer(time.time(), self.obj.get_name(), self.obj._uuid, val) + except ValueError: + # We may be profiling after the playbook has ended + self.running = False + time.sleep(self._poll_interval) + + +def csv_writer(writer, timestamp, task_name, task_uuid, value): + writer.writerow([timestamp, task_name, task_uuid, value]) + + +def json_writer(writer, timestamp, task_name, task_uuid, value): + data = { + 'timestamp': timestamp, + 'task_name': task_name, + 'task_uuid': task_uuid, + 'value': value, + } + writer.write('%s%s%s' % (RS, json.dumps(data, cls=AnsibleJSONEncoder), LF)) + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'ansible.posix.cgroup_perf_recap' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display) + + self._features = ('memory', 'cpu', 'pids') + + self._units = { + 'memory': 'MB', + 'cpu': '%', + 'pids': '', + } + + self.task_results = dict_fromkeys(self._features, default=list) + self._profilers = dict.fromkeys(self._features) + self._files = dict.fromkeys(self._features) + self._writers = dict.fromkeys(self._features) + + self._file_per_task = False + self._counter = 0 + self.write_files = False + + def _open_files(self, task_uuid=None): + output_format = self._output_format + output_dir = self._output_dir + + for feature in self._features: + data = { + b'counter': to_bytes(self._counter), + b'task_uuid': to_bytes(task_uuid), + b'feature': to_bytes(feature), + b'ext': to_bytes(output_format) + } + + if self._files.get(feature): + try: + self._files[feature].close() + except Exception: + pass + + if self.write_files: + filename = self._file_name_format % data + + self._files[feature] = open(os.path.join(output_dir, filename), 'w+') + if output_format == b'csv': + self._writers[feature] = partial(csv_writer, csv.writer(self._files[feature])) + elif output_format == b'json': + self._writers[feature] = partial(json_writer, self._files[feature]) + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + cpu_poll_interval = self.get_option('cpu_poll_interval') + memory_poll_interval = self.get_option('memory_poll_interval') + pid_poll_interval = self.get_option('pid_poll_interval') + self._display_recap = self.get_option('display_recap') + + control_group = to_bytes(self.get_option('control_group'), errors='surrogate_or_strict') + self.mem_max_file = b'/sys/fs/cgroup/memory/%s/memory.max_usage_in_bytes' % control_group + mem_current_file = b'/sys/fs/cgroup/memory/%s/memory.usage_in_bytes' % control_group + cpu_usage_file = b'/sys/fs/cgroup/cpuacct/%s/cpuacct.usage' % control_group + pid_current_file = b'/sys/fs/cgroup/pids/%s/pids.current' % control_group + + for path in (self.mem_max_file, mem_current_file, cpu_usage_file, pid_current_file): + try: + with open(path) as f: + pass + except Exception as e: + self._display.warning( + u'Cannot open %s for reading (%s). Disabling %s' % (to_text(path), to_text(e), self.CALLBACK_NAME) + ) + self.disabled = True + return + + try: + with open(self.mem_max_file, 'w+') as f: + f.write('0') + except Exception as e: + self._display.warning( + u'Unable to reset max memory value in %s: %s' % (to_text(self.mem_max_file), to_text(e)) + ) + self.disabled = True + return + + try: + with open(cpu_usage_file, 'w+') as f: + f.write('0') + except Exception as e: + self._display.warning( + u'Unable to reset CPU usage value in %s: %s' % (to_text(cpu_usage_file), to_text(e)) + ) + self.disabled = True + return + + self._profiler_map = { + 'memory': partial(MemoryProf, mem_current_file, poll_interval=memory_poll_interval), + 'cpu': partial(CpuProf, cpu_usage_file, poll_interval=cpu_poll_interval), + 'pids': partial(PidsProf, pid_current_file, poll_interval=pid_poll_interval), + } + + self.write_files = self.get_option('write_files') + file_per_task = self.get_option('file_per_task') + self._output_format = to_bytes(self.get_option('output_format')) + output_dir = to_bytes(self.get_option('output_dir'), errors='surrogate_or_strict') + try: + output_dir %= to_bytes(datetime.datetime.now().isoformat()) + except TypeError: + pass + + self._output_dir = output_dir + + file_name_format = to_bytes(self.get_option('file_name_format')) + + if self.write_files: + if file_per_task: + self._file_per_task = True + if file_name_format == b'%(feature)s.%(ext)s': + file_name_format = b'%(counter)s-%(task_uuid)s-%(feature)s.%(ext)s' + else: + file_name_format = to_bytes(self.get_option('file_name_format')) + + self._file_name_format = file_name_format + + if not os.path.exists(output_dir): + try: + os.mkdir(output_dir) + except Exception as e: + self._display.warning( + u'Could not create the output directory at %s: %s' % (to_text(output_dir), to_text(e)) + ) + self.disabled = True + return + + if not self._file_per_task: + self._open_files() + + def _profile(self, obj=None): + prev_task = None + results = dict.fromkeys(self._features) + if not obj or self._file_per_task: + for dummy, f in self._files.items(): + if f is None: + continue + try: + f.close() + except Exception: + pass + + try: + for name, prof in self._profilers.items(): + prof.running = False + + for name, prof in self._profilers.items(): + results[name] = prof.max + prev_task = prof.obj + except AttributeError: + pass + + for name, result in results.items(): + if result is not None: + try: + self.task_results[name].append((prev_task, result)) + except ValueError: + pass + + if obj is not None: + if self._file_per_task or self._counter == 0: + self._open_files(task_uuid=obj._uuid) + + for feature in self._features: + self._profilers[feature] = self._profiler_map[feature](obj=obj, writer=self._writers[feature]) + self._profilers[feature].start() + + self._counter += 1 + + def v2_playbook_on_task_start(self, task, is_conditional): + self._profile(task) + + def v2_playbook_on_stats(self, stats): + self._profile() + + if not self._display_recap: + return + + with open(self.mem_max_file) as f: + max_results = int(f.read().strip()) / 1024 / 1024 + + self._display.banner('CGROUP PERF RECAP') + self._display.display('Memory Execution Maximum: %0.2fMB\n' % max_results) + for name, data in self.task_results.items(): + if name == 'memory': + continue + try: + self._display.display( + '%s Execution Maximum: %0.2f%s\n' % (name, max((t[1] for t in data)), self._units[name]) + ) + except Exception as e: + self._display.display('%s profiling error: no results collected: %s\n' % (name, e)) + + self._display.display('\n') + + for name, data in self.task_results.items(): + if data: + self._display.display('%s:\n' % name) + for task, value in data: + self._display.display('%s (%s): %0.2f%s' % (task.get_name(), task._uuid, value, self._units[name])) + self._display.display('\n') diff --git a/plugins/callback/debug.py b/plugins/callback/debug.py new file mode 100644 index 0000000..a7e1d15 --- /dev/null +++ b/plugins/callback/debug.py @@ -0,0 +1,53 @@ +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: debug + type: stdout + short_description: formatted stdout/stderr display + description: + - Use this callback to sort through extensive debug output + extends_documentation_fragment: + - default_callback + requirements: + - set as stdout in configuration +''' + +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default + + +class CallbackModule(CallbackModule_default): # pylint: disable=too-few-public-methods,no-init + ''' + Override for the default callback module. + + Render std err/out outside of the rest of the result which it prints with + indentation. + ''' + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'ansible.posix.debug' + + def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): + '''Return the text to output for a result.''' + + # Enable JSON identation + result['_ansible_verbose_always'] = True + + save = {} + for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg', 'module_stdout', 'module_stderr']: + if key in result: + save[key] = result.pop(key) + + output = CallbackModule_default._dump_results(self, result) + + for key in ['stdout', 'stderr', 'msg', 'module_stdout', 'module_stderr']: + if key in save and save[key]: + output += '\n\n%s:\n\n%s\n' % (key.upper(), save[key]) + + for key, value in save.items(): + result[key] = value + + return output diff --git a/plugins/callback/json.py b/plugins/callback/json.py new file mode 100644 index 0000000..0009ac0 --- /dev/null +++ b/plugins/callback/json.py @@ -0,0 +1,140 @@ +# (c) 2016, Matt Martz +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: json + short_description: Ansible screen output as JSON + description: + - This callback converts all events into JSON output to stdout + type: stdout + requirements: + - Set as stdout in config + options: + show_custom_stats: + name: Show custom stats + description: 'This adds the custom stats set via the set_stats plugin to the play recap' + default: False + env: + - name: ANSIBLE_SHOW_CUSTOM_STATS + ini: + - key: show_custom_stats + section: defaults + type: bool +''' + +import datetime +import json + +from functools import partial + +from ansible.inventory.host import Host +from ansible.parsing.ajson import AnsibleJSONEncoder +from ansible.plugins.callback import CallbackBase + + +def current_time(): + return '%sZ' % datetime.datetime.utcnow().isoformat() + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'ansible.posix.json' + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display) + self.results = [] + + def _new_play(self, play): + return { + 'play': { + 'name': play.get_name(), + 'id': str(play._uuid), + 'duration': { + 'start': current_time() + } + }, + 'tasks': [] + } + + def _new_task(self, task): + return { + 'task': { + 'name': task.get_name(), + 'id': str(task._uuid), + 'duration': { + 'start': current_time() + } + }, + 'hosts': {} + } + + def v2_playbook_on_play_start(self, play): + self.results.append(self._new_play(play)) + + def v2_playbook_on_task_start(self, task, is_conditional): + self.results[-1]['tasks'].append(self._new_task(task)) + + def v2_playbook_on_handler_task_start(self, task): + self.results[-1]['tasks'].append(self._new_task(task)) + + def _convert_host_to_name(self, key): + if isinstance(key, (Host,)): + return key.get_name() + return key + + def v2_playbook_on_stats(self, stats): + """Display info about playbook statistics""" + + hosts = sorted(stats.processed.keys()) + + summary = {} + for h in hosts: + s = stats.summarize(h) + summary[h] = s + + custom_stats = {} + global_custom_stats = {} + + if self.get_option('show_custom_stats') and stats.custom: + custom_stats.update(dict((self._convert_host_to_name(k), v) for k, v in stats.custom.items())) + global_custom_stats.update(custom_stats.pop('_run', {})) + + output = { + 'plays': self.results, + 'stats': summary, + 'custom_stats': custom_stats, + 'global_custom_stats': global_custom_stats, + } + + self._display.display(json.dumps(output, cls=AnsibleJSONEncoder, indent=4, sort_keys=True)) + + def _record_task_result(self, on_info, result, **kwargs): + """This function is used as a partial to add failed/skipped info in a single method""" + host = result._host + task = result._task + task_result = result._result.copy() + task_result.update(on_info) + task_result['action'] = task.action + self.results[-1]['tasks'][-1]['hosts'][host.name] = task_result + end_time = current_time() + self.results[-1]['tasks'][-1]['task']['duration']['end'] = end_time + self.results[-1]['play']['duration']['end'] = end_time + + def __getattribute__(self, name): + """Return ``_record_task_result`` partial with a dict containing skipped/failed if necessary""" + if name not in ('v2_runner_on_ok', 'v2_runner_on_failed', 'v2_runner_on_unreachable', 'v2_runner_on_skipped'): + return object.__getattribute__(self, name) + + on = name.rsplit('_', 1)[1] + + on_info = {} + if on in ('failed', 'skipped'): + on_info[on] = True + + return partial(self._record_task_result, on_info) diff --git a/plugins/callback/profile_roles.py b/plugins/callback/profile_roles.py new file mode 100644 index 0000000..fc0346e --- /dev/null +++ b/plugins/callback/profile_roles.py @@ -0,0 +1,118 @@ +# (c) 2017, Tennis Smith, https://github.com/gamename +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: profile_roles + type: aggregate + short_description: adds timing information to roles + description: + - This callback module provides profiling for ansible roles. + requirements: + - whitelisting in configuration +''' + +import collections +import time + +from ansible.plugins.callback import CallbackBase +from ansible.module_utils.six.moves import reduce + +# define start time +t0 = tn = time.time() + + +def secondsToStr(t): + # http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds + def rediv(ll, b): + return list(divmod(ll[0], b)) + ll[1:] + + return "%d:%02d:%02d.%03d" % tuple( + reduce(rediv, [[t * 1000, ], 1000, 60, 60])) + + +def filled(msg, fchar="*"): + if len(msg) == 0: + width = 79 + else: + msg = "%s " % msg + width = 79 - len(msg) + if width < 3: + width = 3 + filler = fchar * width + return "%s%s " % (msg, filler) + + +def timestamp(self): + if self.current is not None: + self.stats[self.current] = time.time() - self.stats[self.current] + self.totals[self.current] += self.stats[self.current] + + +def tasktime(): + global tn + time_current = time.strftime('%A %d %B %Y %H:%M:%S %z') + time_elapsed = secondsToStr(time.time() - tn) + time_total_elapsed = secondsToStr(time.time() - t0) + tn = time.time() + return filled('%s (%s)%s%s' % + (time_current, time_elapsed, ' ' * 7, time_total_elapsed)) + + +class CallbackModule(CallbackBase): + """ + This callback module provides profiling for ansible roles. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'ansible.posix.profile_roles' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + self.stats = collections.Counter() + self.totals = collections.Counter() + self.current = None + super(CallbackModule, self).__init__() + + def _record_task(self, task): + """ + Logs the start of each task + """ + self._display.display(tasktime()) + timestamp(self) + + if task._role: + self.current = task._role._role_name + else: + self.current = task.action + + self.stats[self.current] = time.time() + + def v2_playbook_on_task_start(self, task, is_conditional): + self._record_task(task) + + def v2_playbook_on_handler_task_start(self, task): + self._record_task(task) + + def playbook_on_setup(self): + self._display.display(tasktime()) + + def playbook_on_stats(self, stats): + self._display.display(tasktime()) + self._display.display(filled("", fchar="=")) + + timestamp(self) + total_time = sum(self.totals.values()) + + # Print the timings starting with the largest one + for result in self.totals.most_common(): + msg = u"{0:-<70}{1:->9}".format(result[0] + u' ', u' {0:.02f}s'.format(result[1])) + self._display.display(msg) + + msg_total = u"{0:-<70}{1:->9}".format(u'total ', u' {0:.02f}s'.format(total_time)) + self._display.display(filled("", fchar="~")) + self._display.display(msg_total) diff --git a/plugins/callback/profile_tasks.py b/plugins/callback/profile_tasks.py new file mode 100644 index 0000000..9e25aaf --- /dev/null +++ b/plugins/callback/profile_tasks.py @@ -0,0 +1,193 @@ +# (C) 2016, Joel, https://github.com/jjshoe +# (C) 2015, Tom Paine, +# (C) 2014, Jharrod LaFon, @JharrodLaFon +# (C) 2012-2013, Michael DeHaan, +# (C) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: profile_tasks + type: aggregate + short_description: adds time information to tasks + description: + - Ansible callback plugin for timing individual tasks and overall execution time. + - "Mashup of 2 excellent original works: https://github.com/jlafon/ansible-profile, + https://github.com/junaid18183/ansible_home/blob/master/ansible_plugins/callback_plugins/timestamp.py.old" + - "Format: C( () )" + - It also lists the top/bottom time consuming tasks in the summary (configurable) + - Before 2.4 only the environment variables were available for configuration. + requirements: + - whitelisting in configuration - see examples section below for details. + options: + output_limit: + description: Number of tasks to display in the summary + default: 20 + env: + - name: PROFILE_TASKS_TASK_OUTPUT_LIMIT + ini: + - section: callback_profile_tasks + key: task_output_limit + sort_order: + description: Adjust the sorting output of summary tasks + choices: ['descending', 'ascending', 'none'] + default: 'descending' + env: + - name: PROFILE_TASKS_SORT_ORDER + ini: + - section: callback_profile_tasks + key: sort_order +''' + +EXAMPLES = ''' +example: > + To enable, add this to your ansible.cfg file in the defaults block + [defaults] + callback_whitelist = profile_tasks +sample output: > +# +# TASK: [ensure messaging security group exists] ******************************** +# Thursday 11 June 2017 22:50:53 +0100 (0:00:00.721) 0:00:05.322 ********* +# ok: [localhost] +# +# TASK: [ensure db security group exists] *************************************** +# Thursday 11 June 2017 22:50:54 +0100 (0:00:00.558) 0:00:05.880 ********* +# changed: [localhost] +# +''' + +import collections +import time + +from ansible.module_utils.six.moves import reduce +from ansible.plugins.callback import CallbackBase + + +# define start time +t0 = tn = time.time() + + +def secondsToStr(t): + # http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds + def rediv(ll, b): + return list(divmod(ll[0], b)) + ll[1:] + + return "%d:%02d:%02d.%03d" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60, 60])) + + +def filled(msg, fchar="*"): + if len(msg) == 0: + width = 79 + else: + msg = "%s " % msg + width = 79 - len(msg) + if width < 3: + width = 3 + filler = fchar * width + return "%s%s " % (msg, filler) + + +def timestamp(self): + if self.current is not None: + self.stats[self.current]['time'] = time.time() - self.stats[self.current]['time'] + + +def tasktime(): + global tn + time_current = time.strftime('%A %d %B %Y %H:%M:%S %z') + time_elapsed = secondsToStr(time.time() - tn) + time_total_elapsed = secondsToStr(time.time() - t0) + tn = time.time() + return filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed)) + + +class CallbackModule(CallbackBase): + """ + This callback module provides per-task timing, ongoing playbook elapsed time + and ordered list of top 20 longest running tasks at end. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'ansible.posix.profile_tasks' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + self.stats = collections.OrderedDict() + self.current = None + + self.sort_order = None + self.task_output_limit = None + + super(CallbackModule, self).__init__() + + def set_options(self, task_keys=None, var_options=None, direct=None): + + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + self.sort_order = self.get_option('sort_order') + if self.sort_order is not None: + if self.sort_order == 'ascending': + self.sort_order = False + elif self.sort_order == 'descending': + self.sort_order = True + elif self.sort_order == 'none': + self.sort_order = None + + self.task_output_limit = self.get_option('output_limit') + if self.task_output_limit is not None: + if self.task_output_limit == 'all': + self.task_output_limit = None + else: + self.task_output_limit = int(self.task_output_limit) + + def _record_task(self, task): + """ + Logs the start of each task + """ + self._display.display(tasktime()) + timestamp(self) + + # Record the start time of the current task + self.current = task._uuid + self.stats[self.current] = {'time': time.time(), 'name': task.get_name()} + if self._display.verbosity >= 2: + self.stats[self.current]['path'] = task.get_path() + + def v2_playbook_on_task_start(self, task, is_conditional): + self._record_task(task) + + def v2_playbook_on_handler_task_start(self, task): + self._record_task(task) + + def playbook_on_setup(self): + self._display.display(tasktime()) + + def playbook_on_stats(self, stats): + self._display.display(tasktime()) + self._display.display(filled("", fchar="=")) + + timestamp(self) + self.current = None + + results = self.stats.items() + + # Sort the tasks by the specified sort + if self.sort_order is not None: + results = sorted( + self.stats.items(), + key=lambda x: x[1]['time'], + reverse=self.sort_order, + ) + + # Display the number of tasks specified or the default of 20 + results = results[:self.task_output_limit] + + # Print the timings + for uuid, result in results: + msg = u"{0:-<{2}}{1:->9}".format(result['name'] + u' ', u' {0:.02f}s'.format(result['time']), self._display.columns - 9) + if 'path' in result: + msg += u"\n{0:-<{1}}".format(result['path'] + u' ', self._display.columns) + self._display.display(msg) diff --git a/plugins/callback/skippy.py b/plugins/callback/skippy.py new file mode 100644 index 0000000..cdbf8dc --- /dev/null +++ b/plugins/callback/skippy.py @@ -0,0 +1,43 @@ +# (c) 2012-2014, Michael DeHaan +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: skippy + callback_type: stdout + requirements: + - set as main display callback + short_description: Ansible screen output that ignores skipped status + deprecated: + why: The 'default' callback plugin now supports this functionality + removed_in: '2.11' + alternative: "'default' callback plugin with 'display_skipped_hosts = no' option" + extends_documentation_fragment: + - default_callback + description: + - This callback does the same as the default except it does not output skipped host/task/item status +''' + +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default + + +class CallbackModule(CallbackModule_default): + + ''' + This is the default callback interface, which simply prints messages + to stdout when new callback events are received. + ''' + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'ansible.posix.skippy' + + def v2_runner_on_skipped(self, result): + pass + + def v2_runner_item_on_skipped(self, result): + pass diff --git a/plugins/callback/timer.py b/plugins/callback/timer.py new file mode 100644 index 0000000..7b38d10 --- /dev/null +++ b/plugins/callback/timer.py @@ -0,0 +1,49 @@ +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: timer + callback_type: aggregate + requirements: + - whitelist in configuration + short_description: Adds time to play stats + description: + - This callback just adds total play duration to the play stats. +''' + +from datetime import datetime + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + """ + This callback module tells you how long your plays ran for. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'ansible.posix.timer' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + + super(CallbackModule, self).__init__() + + self.start_time = datetime.utcnow() + + def days_hours_minutes_seconds(self, runtime): + minutes = (runtime.seconds // 60) % 60 + r_seconds = runtime.seconds % 60 + return runtime.days, runtime.seconds // 3600, minutes, r_seconds + + def playbook_on_stats(self, stats): + self.v2_playbook_on_stats(stats) + + def v2_playbook_on_stats(self, stats): + end_time = datetime.utcnow() + runtime = end_time - self.start_time + self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(runtime))) diff --git a/plugins/module_utils/__init__.py b/plugins/module_utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/plugins/module_utils/ismount.py b/plugins/module_utils/ismount.py new file mode 100644 index 0000000..62feb35 --- /dev/null +++ b/plugins/module_utils/ismount.py @@ -0,0 +1,90 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is based on +# Lib/posixpath.py of cpython +# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +# +# 1. This LICENSE AGREEMENT is between the Python Software Foundation +# ("PSF"), and the Individual or Organization ("Licensee") accessing and +# otherwise using this software ("Python") in source or binary form and +# its associated documentation. +# +# 2. Subject to the terms and conditions of this License Agreement, PSF hereby +# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +# analyze, test, perform and/or display publicly, prepare derivative works, +# distribute, and otherwise use Python alone or in any derivative version, +# provided, however, that PSF's License Agreement and PSF's notice of copyright, +# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" +# are retained in Python alone or in any derivative version prepared by Licensee. +# +# 3. In the event Licensee prepares a derivative work that is based on +# or incorporates Python or any part thereof, and wants to make +# the derivative work available to others as provided herein, then +# Licensee hereby agrees to include in any such work a brief summary of +# the changes made to Python. +# +# 4. PSF is making Python available to Licensee on an "AS IS" +# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +# INFRINGE ANY THIRD PARTY RIGHTS. +# +# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. +# +# 6. This License Agreement will automatically terminate upon a material +# breach of its terms and conditions. +# +# 7. Nothing in this License Agreement shall be deemed to create any +# relationship of agency, partnership, or joint venture between PSF and +# Licensee. This License Agreement does not grant permission to use PSF +# trademarks or trade name in a trademark sense to endorse or promote +# products or services of Licensee, or any third party. +# +# 8. By copying, installing or otherwise using Python, Licensee +# agrees to be bound by the terms and conditions of this License +# Agreement. + +import os + + +def ismount(path): + """Test whether a path is a mount point + This is a copy of the upstream version of ismount(). Originally this was copied here as a workaround + until Python issue 2466 was fixed. Now it is here so this will work on older versions of Python + that may not have the upstream fix. + https://github.com/ansible/ansible-modules-core/issues/2186 + http://bugs.python.org/issue2466 + """ + try: + s1 = os.lstat(path) + except (OSError, ValueError): + # It doesn't exist -- so not a mount point. :-) + return False + else: + # A symlink can never be a mount point + if os.path.stat.S_ISLNK(s1.st_mode): + return False + + if isinstance(path, bytes): + parent = os.path.join(path, b'..') + else: + parent = os.path.join(path, '..') + parent = os.path.realpath(parent) + try: + s2 = os.lstat(parent) + except (OSError, ValueError): + return False + + dev1 = s1.st_dev + dev2 = s2.st_dev + if dev1 != dev2: + return True # path/.. on a different device as path + ino1 = s1.st_ino + ino2 = s2.st_ino + if ino1 == ino2: + return True # path/.. is the same i-node as path + return False diff --git a/plugins/modules/__init__.py b/plugins/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/plugins/modules/acl.py b/plugins/modules/acl.py new file mode 100644 index 0000000..0aaf85c --- /dev/null +++ b/plugins/modules/acl.py @@ -0,0 +1,377 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: acl +short_description: Set and retrieve file ACL information. +description: +- Set and retrieve file ACL information. +options: + path: + description: + - The full path of the file or object. + type: path + required: yes + aliases: [ name ] + state: + description: + - Define whether the ACL should be present or not. + - The C(query) state gets the current ACL without changing it, for use in C(register) operations. + choices: [ absent, present, query ] + default: query + follow: + description: + - Whether to follow symlinks on the path if a symlink is encountered. + type: bool + default: yes + default: + description: + - If the target is a directory, setting this to C(yes) will make it the default ACL for entities created inside the directory. + - Setting C(default) to C(yes) causes an error if the path is a file. + type: bool + default: no + entity: + description: + - The actual user or group that the ACL applies to when matching entity types user or group are selected. + etype: + description: + - The entity type of the ACL to apply, see C(setfacl) documentation for more info. + choices: [ group, mask, other, user ] + permissions: + description: + - The permissions to apply/remove can be any combination of C(r), C(w) and C(x) (read, write and execute respectively) + entry: + description: + - DEPRECATED. + - The ACL to set or remove. + - This must always be quoted in the form of C(::). + - The qualifier may be empty for some types, but the type and perms are always required. + - C(-) can be used as placeholder when you do not care about permissions. + - This is now superseded by entity, type and permissions fields. + recursive: + description: + - Recursively sets the specified ACL. + - Incompatible with C(state=query). + type: bool + default: no + use_nfsv4_acls: + description: + - Use NFSv4 ACLs instead of POSIX ACLs. + type: bool + default: no + recalculate_mask: + description: + - Select if and when to recalculate the effective right masks of the files. + - See C(setfacl) documentation for more info. + - Incompatible with C(state=query). + choices: [ default, mask, no_mask ] + default: default +author: +- Brian Coca (@bcoca) +- Jérémie Astori (@astorije) +notes: +- The C(acl) module requires that ACLs are enabled on the target filesystem and that the C(setfacl) and C(getfacl) binaries are installed. +- As of Ansible 2.0, this module only supports Linux distributions. +- As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well. +''' + +EXAMPLES = r''' +- name: Grant user Joe read access to a file + acl: + path: /etc/foo.conf + entity: joe + etype: user + permissions: r + state: present + +- name: Removes the ACL for Joe on a specific file + acl: + path: /etc/foo.conf + entity: joe + etype: user + state: absent + +- name: Sets default ACL for joe on /etc/foo.d/ + acl: + path: /etc/foo.d/ + entity: joe + etype: user + permissions: rw + default: yes + state: present + +- name: Same as previous but using entry shorthand + acl: + path: /etc/foo.d/ + entry: default:user:joe:rw- + state: present + +- name: Obtain the ACL for a specific file + acl: + path: /etc/foo.conf + register: acl_info +''' + +RETURN = r''' +acl: + description: Current ACL on provided path (after changes, if any) + returned: success + type: list + sample: [ "user::rwx", "group::rwx", "other::rwx" ] +''' + +import os +import platform + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def split_entry(entry): + ''' splits entry and ensures normalized return''' + + a = entry.split(':') + + d = None + if entry.lower().startswith("d"): + d = True + a.pop(0) + + if len(a) == 2: + a.append(None) + + t, e, p = a + t = t.lower() + + if t.startswith("u"): + t = "user" + elif t.startswith("g"): + t = "group" + elif t.startswith("m"): + t = "mask" + elif t.startswith("o"): + t = "other" + else: + t = None + + return [d, t, e, p] + + +def build_entry(etype, entity, permissions=None, use_nfsv4_acls=False): + '''Builds and returns an entry string. Does not include the permissions bit if they are not provided.''' + if use_nfsv4_acls: + return ':'.join([etype, entity, permissions, 'allow']) + + if permissions: + return etype + ':' + entity + ':' + permissions + + return etype + ':' + entity + + +def build_command(module, mode, path, follow, default, recursive, recalculate_mask, entry=''): + '''Builds and returns a getfacl/setfacl command.''' + if mode == 'set': + cmd = [module.get_bin_path('setfacl', True)] + cmd.extend(['-m', entry]) + elif mode == 'rm': + cmd = [module.get_bin_path('setfacl', True)] + cmd.extend(['-x', entry]) + else: # mode == 'get' + cmd = [module.get_bin_path('getfacl', True)] + # prevents absolute path warnings and removes headers + if platform.system().lower() == 'linux': + cmd.append('--omit-header') + cmd.append('--absolute-names') + + if recursive: + cmd.append('--recursive') + + if recalculate_mask == 'mask' and mode in ['set', 'rm']: + cmd.append('--mask') + elif recalculate_mask == 'no_mask' and mode in ['set', 'rm']: + cmd.append('--no-mask') + + if not follow: + if platform.system().lower() == 'linux': + cmd.append('--physical') + elif platform.system().lower() == 'freebsd': + cmd.append('-h') + + if default: + cmd.insert(1, '-d') + + cmd.append(path) + return cmd + + +def acl_changed(module, cmd): + '''Returns true if the provided command affects the existing ACLs, false otherwise.''' + # FreeBSD do not have a --test flag, so by default, it is safer to always say "true" + if platform.system().lower() == 'freebsd': + return True + + cmd = cmd[:] # lists are mutables so cmd would be overwritten without this + cmd.insert(1, '--test') + lines = run_acl(module, cmd) + + for line in lines: + if not line.endswith('*,*'): + return True + return False + + +def run_acl(module, cmd, check_rc=True): + + try: + (rc, out, err) = module.run_command(cmd, check_rc=check_rc) + except Exception as e: + module.fail_json(msg=to_native(e)) + + lines = [] + for l in out.splitlines(): + if not l.startswith('#'): + lines.append(l.strip()) + + if lines and not lines[-1].split(): + # trim last line only when it is empty + return lines[:-1] + + return lines + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True, aliases=['name']), + entry=dict(type='str'), + entity=dict(type='str', default=''), + etype=dict( + type='str', + choices=['group', 'mask', 'other', 'user'], + ), + permissions=dict(type='str'), + state=dict( + type='str', + default='query', + choices=['absent', 'present', 'query'], + ), + follow=dict(type='bool', default=True), + default=dict(type='bool', default=False), + recursive=dict(type='bool', default=False), + recalculate_mask=dict( + type='str', + default='default', + choices=['default', 'mask', 'no_mask'], + ), + use_nfsv4_acls=dict(type='bool', default=False) + ), + supports_check_mode=True, + ) + + if platform.system().lower() not in ['linux', 'freebsd']: + module.fail_json(msg="The acl module is not available on this system.") + + path = module.params.get('path') + entry = module.params.get('entry') + entity = module.params.get('entity') + etype = module.params.get('etype') + permissions = module.params.get('permissions') + state = module.params.get('state') + follow = module.params.get('follow') + default = module.params.get('default') + recursive = module.params.get('recursive') + recalculate_mask = module.params.get('recalculate_mask') + use_nfsv4_acls = module.params.get('use_nfsv4_acls') + + if not os.path.exists(path): + module.fail_json(msg="Path not found or not accessible.") + + if state == 'query': + if recursive: + module.fail_json(msg="'recursive' MUST NOT be set when 'state=query'.") + + if recalculate_mask in ['mask', 'no_mask']: + module.fail_json(msg="'recalculate_mask' MUST NOT be set to 'mask' or 'no_mask' when 'state=query'.") + + if not entry: + if state == 'absent' and permissions: + module.fail_json(msg="'permissions' MUST NOT be set when 'state=absent'.") + + if state == 'absent' and not entity: + module.fail_json(msg="'entity' MUST be set when 'state=absent'.") + + if state in ['present', 'absent'] and not etype: + module.fail_json(msg="'etype' MUST be set when 'state=%s'." % state) + + if entry: + if etype or entity or permissions: + module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.") + + if state == 'present' and not entry.count(":") in [2, 3]: + module.fail_json(msg="'entry' MUST have 3 or 4 sections divided by ':' when 'state=present'.") + + if state == 'absent' and not entry.count(":") in [1, 2]: + module.fail_json(msg="'entry' MUST have 2 or 3 sections divided by ':' when 'state=absent'.") + + if state == 'query': + module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.") + + default_flag, etype, entity, permissions = split_entry(entry) + if default_flag is not None: + default = default_flag + + if platform.system().lower() == 'freebsd': + if recursive: + module.fail_json(msg="recursive is not supported on that platform.") + + changed = False + msg = "" + + if state == 'present': + entry = build_entry(etype, entity, permissions, use_nfsv4_acls) + command = build_command( + module, 'set', path, follow, + default, recursive, recalculate_mask, entry + ) + changed = acl_changed(module, command) + + if changed and not module.check_mode: + run_acl(module, command) + msg = "%s is present" % entry + + elif state == 'absent': + entry = build_entry(etype, entity, use_nfsv4_acls) + command = build_command( + module, 'rm', path, follow, + default, recursive, recalculate_mask, entry + ) + changed = acl_changed(module, command) + + if changed and not module.check_mode: + run_acl(module, command, False) + msg = "%s is absent" % entry + + elif state == 'query': + msg = "current acl" + + acl = run_acl( + module, + build_command(module, 'get', path, follow, default, recursive, recalculate_mask) + ) + + module.exit_json(changed=changed, msg=msg, acl=acl) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/at.py b/plugins/modules/at.py new file mode 100644 index 0000000..cd3d08f --- /dev/null +++ b/plugins/modules/at.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Richard Isaacson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: at +short_description: Schedule the execution of a command or script file via the at command +description: + - Use this module to schedule a command or script file to run once in the future. + - All jobs are executed in the 'a' queue. +options: + command: + description: + - A command to be executed in the future. + type: str + script_file: + description: + - An existing script file to be executed in the future. + type: str + count: + description: + - The count of units in the future to execute the command or script file. + type: int + required: true + units: + description: + - The type of units in the future to execute the command or script file. + type: str + required: true + choices: [ minutes, hours, days, weeks ] + state: + description: + - The state dictates if the command or script file should be evaluated as present(added) or absent(deleted). + type: str + choices: [ absent, present ] + default: present + unique: + description: + - If a matching job is present a new job will not be added. + type: bool + default: no +requirements: + - at +author: +- Richard Isaacson (@risaacson) +''' + +EXAMPLES = ''' +- name: Schedule a command to execute in 20 minutes as root + at: + command: ls -d / >/dev/null + count: 20 + units: minutes + +- name: Match a command to an existing job and delete the job + at: + command: ls -d / >/dev/null + state: absent + +- name: Schedule a command to execute in 20 minutes making sure it is unique in the queue + at: + command: ls -d / >/dev/null + count: 20 + units: minutes + unique: yes +''' + +import os +import tempfile + +from ansible.module_utils.basic import AnsibleModule + + +def add_job(module, result, at_cmd, count, units, command, script_file): + at_command = "%s -f %s now + %s %s" % (at_cmd, script_file, count, units) + rc, out, err = module.run_command(at_command, check_rc=True) + if command: + os.unlink(script_file) + result['changed'] = True + + +def delete_job(module, result, at_cmd, command, script_file): + for matching_job in get_matching_jobs(module, at_cmd, script_file): + at_command = "%s -d %s" % (at_cmd, matching_job) + rc, out, err = module.run_command(at_command, check_rc=True) + result['changed'] = True + if command: + os.unlink(script_file) + module.exit_json(**result) + + +def get_matching_jobs(module, at_cmd, script_file): + matching_jobs = [] + + atq_cmd = module.get_bin_path('atq', True) + + # Get list of job numbers for the user. + atq_command = "%s" % atq_cmd + rc, out, err = module.run_command(atq_command, check_rc=True) + current_jobs = out.splitlines() + if len(current_jobs) == 0: + return matching_jobs + + # Read script_file into a string. + with open(script_file) as script_fh: + script_file_string = script_fh.read().strip() + + # Loop through the jobs. + # If the script text is contained in a job add job number to list. + for current_job in current_jobs: + split_current_job = current_job.split() + at_command = "%s -c %s" % (at_cmd, split_current_job[0]) + rc, out, err = module.run_command(at_command, check_rc=True) + if script_file_string in out: + matching_jobs.append(split_current_job[0]) + + # Return the list. + return matching_jobs + + +def create_tempfile(command): + filed, script_file = tempfile.mkstemp(prefix='at') + fileh = os.fdopen(filed, 'w') + fileh.write(command) + fileh.close() + return script_file + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + command=dict(type='str'), + script_file=dict(type='str'), + count=dict(type='int'), + units=dict(type='str', choices=['minutes', 'hours', 'days', 'weeks']), + state=dict(type='str', default='present', choices=['absent', 'present']), + unique=dict(type='bool', default=False), + ), + mutually_exclusive=[['command', 'script_file']], + required_one_of=[['command', 'script_file']], + supports_check_mode=False, + ) + + at_cmd = module.get_bin_path('at', True) + + command = module.params['command'] + script_file = module.params['script_file'] + count = module.params['count'] + units = module.params['units'] + state = module.params['state'] + unique = module.params['unique'] + + if (state == 'present') and (not count or not units): + module.fail_json(msg="present state requires count and units") + + result = dict( + changed=False, + state=state, + ) + + # If command transform it into a script_file + if command: + script_file = create_tempfile(command) + + # if absent remove existing and return + if state == 'absent': + delete_job(module, result, at_cmd, command, script_file) + + # if unique if existing return unchanged + if unique: + if len(get_matching_jobs(module, at_cmd, script_file)) != 0: + if command: + os.unlink(script_file) + module.exit_json(**result) + + result['script_file'] = script_file + result['count'] = count + result['units'] = units + + add_job(module, result, at_cmd, count, units, command, script_file) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/authorized_key.py b/plugins/modules/authorized_key.py new file mode 100644 index 0000000..30f52ca --- /dev/null +++ b/plugins/modules/authorized_key.py @@ -0,0 +1,672 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Brad Olson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + + +DOCUMENTATION = r''' +--- +module: authorized_key +short_description: Adds or removes an SSH authorized key +description: + - Adds or removes SSH authorized keys for particular user accounts. +options: + user: + description: + - The username on the remote host whose authorized_keys file will be modified. + type: str + required: true + key: + description: + - The SSH public key(s), as a string or (since Ansible 1.9) url (https://github.com/username.keys). + type: str + required: true + path: + description: + - Alternate path to the authorized_keys file. + - When unset, this value defaults to I(~/.ssh/authorized_keys). + type: path + manage_dir: + description: + - Whether this module should manage the directory of the authorized key file. + - If set to C(yes), the module will create the directory, as well as set the owner and permissions + of an existing directory. + - Be sure to set C(manage_dir=no) if you are using an alternate directory for authorized_keys, + as set with C(path), since you could lock yourself out of SSH access. + - See the example below. + type: bool + default: yes + state: + description: + - Whether the given key (with the given key_options) should or should not be in the file. + type: str + choices: [ absent, present ] + default: present + key_options: + description: + - A string of ssh key options to be prepended to the key in the authorized_keys file. + exclusive: + description: + - Whether to remove all other non-specified keys from the authorized_keys file. + - Multiple keys can be specified in a single C(key) string value by separating them by newlines. + - This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration of the loop. + - If you want multiple keys in the file you need to pass them all to C(key) in a single batch as mentioned above. + type: bool + default: no + validate_certs: + description: + - This only applies if using a https url as the source of the keys. + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. + - Prior to 2.1 the code worked as if this was set to C(yes). + type: bool + default: yes + comment: + description: + - Change the comment on the public key. + - Rewriting the comment is useful in cases such as fetching it from GitHub or GitLab. + - If no comment is specified, the existing comment will be kept. + type: str + follow: + description: + - Follow path symlink instead of replacing it. + type: bool + default: no +author: Ansible Core Team +''' + +EXAMPLES = r''' +- name: Set authorized key taken from file + authorized_key: + user: charlie + state: present + key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" + +- name: Set authorized keys taken from url + authorized_key: + user: charlie + state: present + key: https://github.com/charlie.keys + +- name: Set authorized key in alternate location + authorized_key: + user: charlie + state: present + key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" + path: /etc/ssh/authorized_keys/charlie + manage_dir: False + +- name: Set up multiple authorized keys + authorized_key: + user: deploy + state: present + key: '{{ item }}' + with_file: + - public_keys/doe-jane + - public_keys/doe-john + +- name: Set authorized key defining key options + authorized_key: + user: charlie + state: present + key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" + key_options: 'no-port-forwarding,from="10.0.1.1"' + +- name: Set authorized key without validating the TLS/SSL certificates + authorized_key: + user: charlie + state: present + key: https://github.com/user.keys + validate_certs: False + +- name: Set authorized key, removing all the authorized keys already set + authorized_key: + user: root + key: "{{ lookup('file', 'public_keys/doe-jane') }}" + state: present + exclusive: True + +- name: Set authorized key for user ubuntu copying it from current user + authorized_key: + user: ubuntu + state: present + key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}" +''' + +RETURN = r''' +exclusive: + description: If the key has been forced to be exclusive or not. + returned: success + type: bool + sample: False +key: + description: The key that the module was running against. + returned: success + type: str + sample: https://github.com/user.keys +key_option: + description: Key options related to the key. + returned: success + type: str + sample: null +keyfile: + description: Path for authorized key file. + returned: success + type: str + sample: /home/user/.ssh/authorized_keys +manage_dir: + description: Whether this module managed the directory of the authorized key file. + returned: success + type: bool + sample: True +path: + description: Alternate path to the authorized_keys file + returned: success + type: str + sample: null +state: + description: Whether the given key (with the given key_options) should or should not be in the file + returned: success + type: str + sample: present +unique: + description: Whether the key is unique + returned: success + type: bool + sample: false +user: + description: The username on the remote host whose authorized_keys file will be modified + returned: success + type: str + sample: user +validate_certs: + description: This only applies if using a https url as the source of the keys. If set to C(no), the SSL certificates will not be validated. + returned: success + type: bool + sample: true +''' + +# Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. +# +# Arguments +# ========= +# user = username +# key = line to add to authorized_keys for user +# path = path to the user's authorized_keys file (default: ~/.ssh/authorized_keys) +# manage_dir = whether to create, and control ownership of the directory (default: true) +# state = absent|present (default: present) +# +# see example in examples/playbooks + +import os +import pwd +import os.path +import tempfile +import re +import shlex +from operator import itemgetter + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +class keydict(dict): + + """ a dictionary that maintains the order of keys as they are added + + This has become an abuse of the dict interface. Probably should be + rewritten to be an entirely custom object with methods instead of + bracket-notation. + + Our requirements are for a data structure that: + * Preserves insertion order + * Can store multiple values for a single key. + + The present implementation has the following functions used by the rest of + the code: + + * __setitem__(): to add a key=value. The value can never be disassociated + with the key, only new values can be added in addition. + * items(): to retrieve the key, value pairs. + + Other dict methods should work but may be surprising. For instance, there + will be multiple keys that are the same in keys() and __getitem__() will + return a list of the values that have been set via __setitem__. + """ + + # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class + + def __init__(self, *args, **kw): + super(keydict, self).__init__(*args, **kw) + self.itemlist = list(super(keydict, self).keys()) + + def __setitem__(self, key, value): + self.itemlist.append(key) + if key in self: + self[key].append(value) + else: + super(keydict, self).__setitem__(key, [value]) + + def __iter__(self): + return iter(self.itemlist) + + def keys(self): + return self.itemlist + + def _item_generator(self): + indexes = {} + for key in self.itemlist: + if key in indexes: + indexes[key] += 1 + else: + indexes[key] = 0 + yield key, self[key][indexes[key]] + + def iteritems(self): + raise NotImplementedError("Do not use this as it's not available on py3") + + def items(self): + return list(self._item_generator()) + + def itervalues(self): + raise NotImplementedError("Do not use this as it's not available on py3") + + def values(self): + return [item[1] for item in self.items()] + + +def keyfile(module, user, write=False, path=None, manage_dir=True, follow=False): + """ + Calculate name of authorized keys file, optionally creating the + directories and file, properly setting permissions. + + :param str user: name of user in passwd file + :param bool write: if True, write changes to authorized_keys file (creating directories if needed) + :param str path: if not None, use provided path rather than default of '~user/.ssh/authorized_keys' + :param bool manage_dir: if True, create and set ownership of the parent dir of the authorized_keys file + :param bool follow: if True symlinks will be followed and not replaced + :return: full path string to authorized_keys for user + """ + + if module.check_mode and path is not None: + keysfile = path + + if follow: + return os.path.realpath(keysfile) + + return keysfile + + try: + user_entry = pwd.getpwnam(user) + except KeyError as e: + if module.check_mode and path is None: + module.fail_json(msg="Either user must exist or you must provide full path to key file in check mode") + module.fail_json(msg="Failed to lookup user %s: %s" % (user, to_native(e))) + if path is None: + homedir = user_entry.pw_dir + sshdir = os.path.join(homedir, ".ssh") + keysfile = os.path.join(sshdir, "authorized_keys") + else: + sshdir = os.path.dirname(path) + keysfile = path + + if follow: + keysfile = os.path.realpath(keysfile) + + if not write or module.check_mode: + return keysfile + + uid = user_entry.pw_uid + gid = user_entry.pw_gid + + if manage_dir: + if not os.path.exists(sshdir): + os.mkdir(sshdir, int('0700', 8)) + if module.selinux_enabled(): + module.set_default_selinux_context(sshdir, False) + os.chown(sshdir, uid, gid) + os.chmod(sshdir, int('0700', 8)) + + if not os.path.exists(keysfile): + basedir = os.path.dirname(keysfile) + if not os.path.exists(basedir): + os.makedirs(basedir) + try: + f = open(keysfile, "w") # touches file so we can set ownership and perms + finally: + f.close() + if module.selinux_enabled(): + module.set_default_selinux_context(keysfile, False) + + try: + os.chown(keysfile, uid, gid) + os.chmod(keysfile, int('0600', 8)) + except OSError: + pass + + return keysfile + + +def parseoptions(module, options): + ''' + reads a string containing ssh-key options + and returns a dictionary of those options + ''' + options_dict = keydict() # ordered dict + if options: + # the following regex will split on commas while + # ignoring those commas that fall within quotes + regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') + parts = regex.split(options)[1:-1] + for part in parts: + if "=" in part: + (key, value) = part.split("=", 1) + options_dict[key] = value + elif part != ",": + options_dict[part] = None + + return options_dict + + +def parsekey(module, raw_key, rank=None): + ''' + parses a key, which may or may not contain a list + of ssh-key options at the beginning + + rank indicates the keys original ordering, so that + it can be written out in the same order. + ''' + + VALID_SSH2_KEY_TYPES = [ + 'ssh-ed25519', + 'ecdsa-sha2-nistp256', + 'ecdsa-sha2-nistp384', + 'ecdsa-sha2-nistp521', + 'ssh-dss', + 'ssh-rsa', + ] + + options = None # connection options + key = None # encrypted key string + key_type = None # type of ssh key + type_index = None # index of keytype in key string|list + + # remove comment yaml escapes + raw_key = raw_key.replace(r'\#', '#') + + # split key safely + lex = shlex.shlex(raw_key) + lex.quotes = [] + lex.commenters = '' # keep comment hashes + lex.whitespace_split = True + key_parts = list(lex) + + if key_parts and key_parts[0] == '#': + # comment line, invalid line, etc. + return (raw_key, 'skipped', None, None, rank) + + for i in range(0, len(key_parts)): + if key_parts[i] in VALID_SSH2_KEY_TYPES: + type_index = i + key_type = key_parts[i] + break + + # check for options + if type_index is None: + return None + elif type_index > 0: + options = " ".join(key_parts[:type_index]) + + # parse the options (if any) + options = parseoptions(module, options) + + # get key after the type index + key = key_parts[(type_index + 1)] + + # set comment to everything after the key + if len(key_parts) > (type_index + 1): + comment = " ".join(key_parts[(type_index + 2):]) + + return (key, key_type, options, comment, rank) + + +def readfile(filename): + + if not os.path.isfile(filename): + return '' + + f = open(filename) + try: + return f.read() + finally: + f.close() + + +def parsekeys(module, lines): + keys = {} + for rank_index, line in enumerate(lines.splitlines(True)): + key_data = parsekey(module, line, rank=rank_index) + if key_data: + # use key as identifier + keys[key_data[0]] = key_data + else: + # for an invalid line, just set the line + # dict key to the line so it will be re-output later + keys[line] = (line, 'skipped', None, None, rank_index) + return keys + + +def writefile(module, filename, content): + + fd, tmp_path = tempfile.mkstemp('', 'tmp', os.path.dirname(filename)) + f = open(tmp_path, "w") + + try: + f.write(content) + except IOError as e: + module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(e))) + f.close() + module.atomic_move(tmp_path, filename) + + +def serialize(keys): + lines = [] + new_keys = keys.values() + # order the new_keys by their original ordering, via the rank item in the tuple + ordered_new_keys = sorted(new_keys, key=itemgetter(4)) + + for key in ordered_new_keys: + try: + (keyhash, key_type, options, comment, rank) = key + + option_str = "" + if options: + option_strings = [] + for option_key, value in options.items(): + if value is None: + option_strings.append("%s" % option_key) + else: + option_strings.append("%s=%s" % (option_key, value)) + option_str = ",".join(option_strings) + option_str += " " + + # comment line or invalid line, just leave it + if not key_type: + key_line = key + + if key_type == 'skipped': + key_line = key[0] + else: + key_line = "%s%s %s %s\n" % (option_str, key_type, keyhash, comment) + except Exception: + key_line = key + lines.append(key_line) + return ''.join(lines) + + +def enforce_state(module, params): + """ + Add or remove key. + """ + + user = params["user"] + key = params["key"] + path = params.get("path", None) + manage_dir = params.get("manage_dir", True) + state = params.get("state", "present") + key_options = params.get("key_options", None) + exclusive = params.get("exclusive", False) + comment = params.get("comment", None) + follow = params.get('follow', False) + error_msg = "Error getting key from: %s" + + # if the key is a url, request it and use it as key source + if key.startswith("http"): + try: + resp, info = fetch_url(module, key) + if info['status'] != 200: + module.fail_json(msg=error_msg % key) + else: + key = resp.read() + except Exception: + module.fail_json(msg=error_msg % key) + + # resp.read gives bytes on python3, convert to native string type + key = to_native(key, errors='surrogate_or_strict') + + # extract individual keys into an array, skipping blank lines and comments + new_keys = [s for s in key.splitlines() if s and not s.startswith('#')] + + # check current state -- just get the filename, don't create file + do_write = False + params["keyfile"] = keyfile(module, user, do_write, path, manage_dir) + existing_content = readfile(params["keyfile"]) + existing_keys = parsekeys(module, existing_content) + + # Add a place holder for keys that should exist in the state=present and + # exclusive=true case + keys_to_exist = [] + + # we will order any non exclusive new keys higher than all the existing keys, + # resulting in the new keys being written to the key file after existing keys, but + # in the order of new_keys + max_rank_of_existing_keys = len(existing_keys) + + # Check our new keys, if any of them exist we'll continue. + for rank_index, new_key in enumerate(new_keys): + parsed_new_key = parsekey(module, new_key, rank=rank_index) + + if not parsed_new_key: + module.fail_json(msg="invalid key specified: %s" % new_key) + + if key_options is not None: + parsed_options = parseoptions(module, key_options) + # rank here is the rank in the provided new keys, which may be unrelated to rank in existing_keys + parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3], parsed_new_key[4]) + + if comment is not None: + parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_new_key[2], comment, parsed_new_key[4]) + + matched = False + non_matching_keys = [] + + if parsed_new_key[0] in existing_keys: + # Then we check if everything (except the rank at index 4) matches, including + # the key type and options. If not, we append this + # existing key to the non-matching list + # We only want it to match everything when the state + # is present + if parsed_new_key[:4] != existing_keys[parsed_new_key[0]][:4] and state == "present": + non_matching_keys.append(existing_keys[parsed_new_key[0]]) + else: + matched = True + + # handle idempotent state=present + if state == "present": + keys_to_exist.append(parsed_new_key[0]) + if len(non_matching_keys) > 0: + for non_matching_key in non_matching_keys: + if non_matching_key[0] in existing_keys: + del existing_keys[non_matching_key[0]] + do_write = True + + # new key that didn't exist before. Where should it go in the ordering? + if not matched: + # We want the new key to be after existing keys if not exclusive (rank > max_rank_of_existing_keys) + total_rank = max_rank_of_existing_keys + parsed_new_key[4] + # replace existing key tuple with new parsed key with its total rank + existing_keys[parsed_new_key[0]] = (parsed_new_key[0], parsed_new_key[1], parsed_new_key[2], parsed_new_key[3], total_rank) + do_write = True + + elif state == "absent": + if not matched: + continue + del existing_keys[parsed_new_key[0]] + do_write = True + + # remove all other keys to honor exclusive + # for 'exclusive', make sure keys are written in the order the new keys were + if state == "present" and exclusive: + to_remove = frozenset(existing_keys).difference(keys_to_exist) + for key in to_remove: + del existing_keys[key] + do_write = True + + if do_write: + filename = keyfile(module, user, do_write, path, manage_dir, follow) + new_content = serialize(existing_keys) + + diff = None + if module._diff: + diff = { + 'before_header': params['keyfile'], + 'after_header': filename, + 'before': existing_content, + 'after': new_content, + } + params['diff'] = diff + + if module.check_mode: + module.exit_json(changed=True, diff=diff) + writefile(module, filename, new_content) + params['changed'] = True + else: + if module.check_mode: + module.exit_json(changed=False) + + return params + + +def main(): + module = AnsibleModule( + argument_spec=dict( + user=dict(type='str', required=True), + key=dict(type='str', required=True), + path=dict(type='path'), + manage_dir=dict(type='bool', default=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + key_options=dict(type='str'), + exclusive=dict(type='bool', default=False), + comment=dict(type='str'), + validate_certs=dict(type='bool', default=True), + follow=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + results = enforce_state(module, module.params) + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/mount.py b/plugins/modules/mount.py new file mode 100644 index 0000000..b9be7f9 --- /dev/null +++ b/plugins/modules/mount.py @@ -0,0 +1,764 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Red Hat, inc +# Written by Seth Vidal +# based on the mount modules from salt and puppet +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: mount +short_description: Control active and configured mount points +description: + - This module controls active and configured mount points in C(/etc/fstab). +author: + - Ansible Core Team + - Seth Vidal (@skvidal) +options: + path: + description: + - Path to the mount point (e.g. C(/mnt/files)). + - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name). + type: path + required: true + aliases: [ name ] + src: + description: + - Device to be mounted on I(path). + - Required when I(state) set to C(present) or C(mounted). + type: path + fstype: + description: + - Filesystem type. + - Required when I(state) is C(present) or C(mounted). + type: str + opts: + description: + - Mount options (see fstab(5), or vfstab(4) on Solaris). + type: str + dump: + description: + - Dump (see fstab(5)). + - Note that if set to C(null) and I(state) set to C(present), + it will cease to work and duplicate entries will be made + with subsequent runs. + - Has no effect on Solaris systems. + type: str + default: 0 + passno: + description: + - Passno (see fstab(5)). + - Note that if set to C(null) and I(state) set to C(present), + it will cease to work and duplicate entries will be made + with subsequent runs. + - Deprecated on Solaris systems. + type: str + default: 0 + state: + description: + - If C(mounted), the device will be actively mounted and appropriately + configured in I(fstab). If the mount point is not present, the mount + point will be created. + - If C(unmounted), the device will be unmounted without changing I(fstab). + - C(present) only specifies that the device is to be configured in + I(fstab) and does not trigger or require a mount. + - C(absent) specifies that the device mount's entry will be removed from + I(fstab) and will also unmount the device and remove the mount + point. + - C(remounted) specifies that the device will be remounted for when you + want to force a refresh on the mount itself (added in 2.9). This will + always return changed=true. + type: str + required: true + choices: [ absent, mounted, present, unmounted, remounted ] + fstab: + description: + - File to use instead of C(/etc/fstab). + - You should not use this option unless you really know what you are doing. + - This might be useful if you need to configure mountpoints in a chroot environment. + - OpenBSD does not allow specifying alternate fstab files with mount so do not + use this on OpenBSD with any state that operates on the live filesystem. + - This parameter defaults to /etc/fstab or /etc/vfstab on Solaris. + type: str + boot: + description: + - Determines if the filesystem should be mounted on boot. + - Only applies to Solaris systems. + type: bool + default: yes + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + type: bool + default: no +notes: + - As of Ansible 2.3, the I(name) option has been changed to I(path) as + default, but I(name) still works as well. +''' + +EXAMPLES = r''' +# Before 2.3, option 'name' was used instead of 'path' +- name: Mount DVD read-only + mount: + path: /mnt/dvd + src: /dev/sr0 + fstype: iso9660 + opts: ro,noauto + state: present + +- name: Mount up device by label + mount: + path: /srv/disk + src: LABEL=SOME_LABEL + fstype: ext4 + state: present + +- name: Mount up device by UUID + mount: + path: /home + src: UUID=b3e48f45-f933-4c8e-a700-22a159ec9077 + fstype: xfs + opts: noatime + state: present + +- name: Unmount a mounted volume + mount: + path: /tmp/mnt-pnt + state: unmounted + +- name: Mount and bind a volume + mount: + path: /system/new_volume/boot + src: /boot + opts: bind + state: mounted + fstype: none + +- name: Mount an NFS volume + mount: + src: 192.168.1.100:/nfs/ssd/shared_data + path: /mnt/shared_data + opts: rw,sync,hard,intr + state: mounted + fstype: nfs +''' + + +import os +import platform + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.posix.plugins.module_utils.ismount import ismount +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + + +def write_fstab(module, lines, path): + if module.params['backup']: + module.backup_local(path) + + fs_w = open(path, 'w') + + for l in lines: + fs_w.write(l) + + fs_w.flush() + fs_w.close() + + +def _escape_fstab(v): + """Escape invalid characters in fstab fields. + + space (040) + ampersand (046) + backslash (134) + """ + + if isinstance(v, int): + return v + else: + return( + v. + replace('\\', '\\134'). + replace(' ', '\\040'). + replace('&', '\\046')) + + +def set_mount(module, args): + """Set/change a mount point location in fstab.""" + + to_write = [] + exists = False + changed = False + escaped_args = dict([(k, _escape_fstab(v)) for k, v in iteritems(args)]) + new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' + + if platform.system() == 'SunOS': + new_line = ( + '%(src)s - %(name)s %(fstype)s %(passno)s %(boot)s %(opts)s\n') + + for line in open(args['fstab'], 'r').readlines(): + if not line.strip(): + to_write.append(line) + + continue + + if line.strip().startswith('#'): + to_write.append(line) + + continue + + fields = line.split() + + # Check if we got a valid line for splitting + # (on Linux the 5th and the 6th field is optional) + if ( + platform.system() == 'SunOS' and len(fields) != 7 or + platform.system() == 'Linux' and len(fields) not in [4, 5, 6] or + platform.system() not in ['SunOS', 'Linux'] and len(fields) != 6): + to_write.append(line) + + continue + + ld = {} + + if platform.system() == 'SunOS': + ( + ld['src'], + dash, + ld['name'], + ld['fstype'], + ld['passno'], + ld['boot'], + ld['opts'] + ) = fields + else: + fields_labels = ['src', 'name', 'fstype', 'opts', 'dump', 'passno'] + + # The last two fields are optional on Linux so we fill in default values + ld['dump'] = 0 + ld['passno'] = 0 + + # Fill in the rest of the available fields + for i, field in enumerate(fields): + ld[fields_labels[i]] = field + + # Check if we found the correct line + if ( + ld['name'] != escaped_args['name'] or ( + # In the case of swap, check the src instead + 'src' in args and + ld['name'] == 'none' and + ld['fstype'] == 'swap' and + ld['src'] != args['src'])): + to_write.append(line) + + continue + + # If we got here we found a match - let's check if there is any + # difference + exists = True + args_to_check = ('src', 'fstype', 'opts', 'dump', 'passno') + + if platform.system() == 'SunOS': + args_to_check = ('src', 'fstype', 'passno', 'boot', 'opts') + + for t in args_to_check: + if ld[t] != escaped_args[t]: + ld[t] = escaped_args[t] + changed = True + + if changed: + to_write.append(new_line % ld) + else: + to_write.append(line) + + if not exists: + to_write.append(new_line % escaped_args) + changed = True + + if changed and not module.check_mode: + write_fstab(module, to_write, args['fstab']) + + return (args['name'], changed) + + +def unset_mount(module, args): + """Remove a mount point from fstab.""" + + to_write = [] + changed = False + escaped_name = _escape_fstab(args['name']) + + for line in open(args['fstab'], 'r').readlines(): + if not line.strip(): + to_write.append(line) + + continue + + if line.strip().startswith('#'): + to_write.append(line) + + continue + + # Check if we got a valid line for splitting + if ( + platform.system() == 'SunOS' and len(line.split()) != 7 or + platform.system() != 'SunOS' and len(line.split()) != 6): + to_write.append(line) + + continue + + ld = {} + + if platform.system() == 'SunOS': + ( + ld['src'], + dash, + ld['name'], + ld['fstype'], + ld['passno'], + ld['boot'], + ld['opts'] + ) = line.split() + else: + ( + ld['src'], + ld['name'], + ld['fstype'], + ld['opts'], + ld['dump'], + ld['passno'] + ) = line.split() + + if ( + ld['name'] != escaped_name or ( + # In the case of swap, check the src instead + 'src' in args and + ld['name'] == 'none' and + ld['fstype'] == 'swap' and + ld['src'] != args['src'])): + to_write.append(line) + + continue + + # If we got here we found a match - continue and mark changed + changed = True + + if changed and not module.check_mode: + write_fstab(module, to_write, args['fstab']) + + return (args['name'], changed) + + +def _set_fstab_args(fstab_file): + result = [] + + if ( + fstab_file and + fstab_file != '/etc/fstab' and + platform.system().lower() != 'sunos'): + if platform.system().lower().endswith('bsd'): + result.append('-F') + else: + result.append('-T') + + result.append(fstab_file) + + return result + + +def mount(module, args): + """Mount up a path or remount if needed.""" + + mount_bin = module.get_bin_path('mount', required=True) + name = args['name'] + cmd = [mount_bin] + + if platform.system().lower() == 'openbsd': + # Use module.params['fstab'] here as args['fstab'] has been set to the + # default value. + if module.params['fstab'] is not None: + module.fail_json( + msg=( + 'OpenBSD does not support alternate fstab files. Do not ' + 'specify the fstab parameter for OpenBSD hosts')) + else: + cmd += _set_fstab_args(args['fstab']) + + cmd += [name] + + rc, out, err = module.run_command(cmd) + + if rc == 0: + return 0, '' + else: + return rc, out + err + + +def umount(module, path): + """Unmount a path.""" + + umount_bin = module.get_bin_path('umount', required=True) + cmd = [umount_bin, path] + + rc, out, err = module.run_command(cmd) + + if rc == 0: + return 0, '' + else: + return rc, out + err + + +def remount(module, args): + """Try to use 'remount' first and fallback to (u)mount if unsupported.""" + mount_bin = module.get_bin_path('mount', required=True) + cmd = [mount_bin] + + # Multiplatform remount opts + if platform.system().lower().endswith('bsd'): + cmd += ['-u'] + else: + cmd += ['-o', 'remount'] + + if platform.system().lower() == 'openbsd': + # Use module.params['fstab'] here as args['fstab'] has been set to the + # default value. + if module.params['fstab'] is not None: + module.fail_json( + msg=( + 'OpenBSD does not support alternate fstab files. Do not ' + 'specify the fstab parameter for OpenBSD hosts')) + else: + cmd += _set_fstab_args(args['fstab']) + + cmd += [args['name']] + out = err = '' + + try: + if platform.system().lower().endswith('bsd'): + # Note: Forcing BSDs to do umount/mount due to BSD remount not + # working as expected (suspect bug in the BSD mount command) + # Interested contributor could rework this to use mount options on + # the CLI instead of relying on fstab + # https://github.com/ansible/ansible-modules-core/issues/5591 + rc = 1 + else: + rc, out, err = module.run_command(cmd) + except Exception: + rc = 1 + + msg = '' + + if rc != 0: + msg = out + err + rc, msg = umount(module, args['name']) + + if rc == 0: + rc, msg = mount(module, args) + + return rc, msg + + +# Note if we wanted to put this into module_utils we'd have to get permission +# from @jupeter -- https://github.com/ansible/ansible-modules-core/pull/2923 +# @jtyr -- https://github.com/ansible/ansible-modules-core/issues/4439 +# and @abadger to relicense from GPLv3+ +def is_bind_mounted(module, linux_mounts, dest, src=None, fstype=None): + """Return whether the dest is bind mounted + + :arg module: The AnsibleModule (used for helper functions) + :arg dest: The directory to be mounted under. This is the primary means + of identifying whether the destination is mounted. + :kwarg src: The source directory. If specified, this is used to help + ensure that we are detecting that the correct source is mounted there. + :kwarg fstype: The filesystem type. If specified this is also used to + help ensure that we are detecting the right mount. + :kwarg linux_mounts: Cached list of mounts for Linux. + :returns: True if the dest is mounted with src otherwise False. + """ + + is_mounted = False + + if platform.system() == 'Linux' and linux_mounts is not None: + if src is None: + # That's for unmounted/absent + if dest in linux_mounts: + is_mounted = True + else: + if dest in linux_mounts: + is_mounted = linux_mounts[dest]['src'] == src + + else: + bin_path = module.get_bin_path('mount', required=True) + cmd = '%s -l' % bin_path + rc, out, err = module.run_command(cmd) + mounts = [] + + if len(out): + mounts = to_native(out).strip().split('\n') + + for mnt in mounts: + arguments = mnt.split() + + if ( + (arguments[0] == src or src is None) and + arguments[2] == dest and + (arguments[4] == fstype or fstype is None)): + is_mounted = True + + if is_mounted: + break + + return is_mounted + + +def get_linux_mounts(module, mntinfo_file="/proc/self/mountinfo"): + """Gather mount information""" + + try: + f = open(mntinfo_file) + except IOError: + return + + lines = map(str.strip, f.readlines()) + + try: + f.close() + except IOError: + module.fail_json(msg="Cannot close file %s" % mntinfo_file) + + mntinfo = {} + + for line in lines: + fields = line.split() + + record = { + 'id': int(fields[0]), + 'parent_id': int(fields[1]), + 'root': fields[3], + 'dst': fields[4], + 'opts': fields[5], + 'fs': fields[-3], + 'src': fields[-2] + } + + mntinfo[record['id']] = record + + mounts = {} + + for mnt in mntinfo.values(): + if mnt['parent_id'] != 1 and mnt['parent_id'] in mntinfo: + m = mntinfo[mnt['parent_id']] + if ( + len(m['root']) > 1 and + mnt['root'].startswith("%s/" % m['root'])): + # Omit the parent's root in the child's root + # == Example: + # 140 136 253:2 /rootfs / rw - ext4 /dev/sdb2 rw + # 141 140 253:2 /rootfs/tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw + # == Expected result: + # src=/tmp/aaa + mnt['root'] = mnt['root'][len(m['root']):] + + # Prepend the parent's dst to the child's root + # == Example: + # 42 60 0:35 / /tmp rw - tmpfs tmpfs rw + # 78 42 0:35 /aaa /tmp/bbb rw - tmpfs tmpfs rw + # == Expected result: + # src=/tmp/aaa + if m['dst'] != '/': + mnt['root'] = "%s%s" % (m['dst'], mnt['root']) + src = mnt['root'] + else: + src = mnt['src'] + + record = { + 'dst': mnt['dst'], + 'src': src, + 'opts': mnt['opts'], + 'fs': mnt['fs'] + } + + mounts[mnt['dst']] = record + + return mounts + + +def main(): + module = AnsibleModule( + argument_spec=dict( + boot=dict(type='bool', default=True), + dump=dict(type='str'), + fstab=dict(type='str'), + fstype=dict(type='str'), + path=dict(type='path', required=True, aliases=['name']), + opts=dict(type='str'), + passno=dict(type='str'), + src=dict(type='path'), + backup=dict(type='bool', default=False), + state=dict(type='str', required=True, choices=['absent', 'mounted', 'present', 'unmounted', 'remounted']), + ), + supports_check_mode=True, + required_if=( + ['state', 'mounted', ['src', 'fstype']], + ['state', 'present', ['src', 'fstype']], + ), + ) + + # solaris args: + # name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab + # linux args: + # name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab + # Note: Do not modify module.params['fstab'] as we need to know if the user + # explicitly specified it in mount() and remount() + if platform.system().lower() == 'sunos': + args = dict( + name=module.params['path'], + opts='-', + passno='-', + fstab=module.params['fstab'], + boot='yes' + ) + if args['fstab'] is None: + args['fstab'] = '/etc/vfstab' + else: + args = dict( + name=module.params['path'], + opts='defaults', + dump='0', + passno='0', + fstab=module.params['fstab'] + ) + if args['fstab'] is None: + args['fstab'] = '/etc/fstab' + + # FreeBSD doesn't have any 'default' so set 'rw' instead + if platform.system() == 'FreeBSD': + args['opts'] = 'rw' + + linux_mounts = [] + + # Cache all mounts here in order we have consistent results if we need to + # call is_bind_mounted() multiple times + if platform.system() == 'Linux': + linux_mounts = get_linux_mounts(module) + + if linux_mounts is None: + args['warnings'] = ( + 'Cannot open file /proc/self/mountinfo. ' + 'Bind mounts might be misinterpreted.') + + # Override defaults with user specified params + for key in ('src', 'fstype', 'passno', 'opts', 'dump', 'fstab'): + if module.params[key] is not None: + args[key] = module.params[key] + + # If fstab file does not exist, we first need to create it. This mainly + # happens when fstab option is passed to the module. + if not os.path.exists(args['fstab']): + if not os.path.exists(os.path.dirname(args['fstab'])): + os.makedirs(os.path.dirname(args['fstab'])) + try: + open(args['fstab'], 'a').close() + except PermissionError as e: + module.fail_json(msg="Failed to open %s due to permission issue" % args['fstab']) + except Exception as e: + module.fail_json(msg="Failed to open %s due to %s" % (args['fstab'], to_native(e))) + + # absent: + # Remove from fstab and unmounted. + # unmounted: + # Do not change fstab state, but unmount. + # present: + # Add to fstab, do not change mount state. + # mounted: + # Add to fstab if not there and make sure it is mounted. If it has + # changed in fstab then remount it. + + state = module.params['state'] + name = module.params['path'] + changed = False + + if state == 'absent': + name, changed = unset_mount(module, args) + + if changed and not module.check_mode: + if ismount(name) or is_bind_mounted(module, linux_mounts, name): + res, msg = umount(module, name) + + if res: + module.fail_json( + msg="Error unmounting %s: %s" % (name, msg)) + + if os.path.exists(name): + try: + os.rmdir(name) + except (OSError, IOError) as e: + module.fail_json(msg="Error rmdir %s: %s" % (name, to_native(e))) + elif state == 'unmounted': + if ismount(name) or is_bind_mounted(module, linux_mounts, name): + if not module.check_mode: + res, msg = umount(module, name) + + if res: + module.fail_json( + msg="Error unmounting %s: %s" % (name, msg)) + + changed = True + elif state == 'mounted': + if not os.path.exists(args['src']): + module.fail_json(msg="Unable to mount %s as it does not exist" % args['src']) + + if not os.path.exists(name) and not module.check_mode: + try: + os.makedirs(name) + except (OSError, IOError) as e: + module.fail_json( + msg="Error making dir %s: %s" % (name, to_native(e))) + + name, changed = set_mount(module, args) + res = 0 + + if ( + ismount(name) or + is_bind_mounted( + module, linux_mounts, name, args['src'], args['fstype'])): + if changed and not module.check_mode: + res, msg = remount(module, args) + changed = True + else: + changed = True + + if not module.check_mode: + res, msg = mount(module, args) + + if res: + module.fail_json(msg="Error mounting %s: %s" % (name, msg)) + elif state == 'present': + name, changed = set_mount(module, args) + elif state == 'remounted': + if not module.check_mode: + res, msg = remount(module, args) + + if res: + module.fail_json(msg="Error remounting %s: %s" % (name, msg)) + + changed = True + else: + module.fail_json(msg='Unexpected position reached') + + module.exit_json(changed=changed, **args) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/seboolean.py b/plugins/modules/seboolean.py new file mode 100644 index 0000000..38f4e02 --- /dev/null +++ b/plugins/modules/seboolean.py @@ -0,0 +1,335 @@ +#!/usr/bin/python + +# Copyright: (c) 2012, Stephen Fromm +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: seboolean +short_description: Toggles SELinux booleans +description: + - Toggles SELinux booleans. +options: + name: + description: + - Name of the boolean to configure. + required: true + persistent: + description: + - Set to C(yes) if the boolean setting should survive a reboot. + type: bool + default: 'no' + state: + description: + - Desired boolean value + type: bool + required: true + ignore_selinux_state: + description: + - Useful for scenarios (chrooted environment) that you can't get the real SELinux state. + type: bool + default: false +notes: + - Not tested on any Debian based system. +requirements: +- libselinux-python +- libsemanage-python +author: +- Stephen Fromm (@sfromm) +''' + +EXAMPLES = ''' +- name: Set httpd_can_network_connect flag on and keep it persistent across reboots + seboolean: + name: httpd_can_network_connect + state: yes + persistent: yes +''' + +import os +import traceback + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEMANAGE_IMP_ERR = None +try: + import semanage + HAVE_SEMANAGE = True +except ImportError: + SEMANAGE_IMP_ERR = traceback.format_exc() + HAVE_SEMANAGE = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six import binary_type +from ansible.module_utils._text import to_bytes, to_text + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def has_boolean_value(module, name): + bools = [] + try: + rc, bools = selinux.security_get_boolean_names() + except OSError: + module.fail_json(msg="Failed to get list of boolean names") + # work around for selinux who changed its API, see + # https://github.com/ansible/ansible/issues/25651 + if len(bools) > 0: + if isinstance(bools[0], binary_type): + name = to_bytes(name) + if name in bools: + return True + else: + return False + + +def get_boolean_value(module, name): + state = 0 + try: + state = selinux.security_get_boolean_active(name) + except OSError: + module.fail_json(msg="Failed to determine current state for boolean %s" % name) + if state == 1: + return True + else: + return False + + +def semanage_get_handle(module): + handle = semanage.semanage_handle_create() + if not handle: + module.fail_json(msg="Failed to create semanage library handle") + + managed = semanage.semanage_is_managed(handle) + if managed <= 0: + semanage.semanage_handle_destroy(handle) + if managed < 0: + module.fail_json(msg="Failed to determine whether policy is manage") + if managed == 0: + if os.getuid() == 0: + module.fail_json(msg="Cannot set persistent booleans without managed policy") + else: + module.fail_json(msg="Cannot set persistent booleans; please try as root") + + if semanage.semanage_connect(handle) < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to connect to semanage") + + return handle + + +def semanage_begin_transaction(module, handle): + if semanage.semanage_begin_transaction(handle) < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to begin semanage transaction") + + +def semanage_set_boolean_value(module, handle, name, value): + rc, t_b = semanage.semanage_bool_create(handle) + if rc < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to create seboolean with semanage") + + if semanage.semanage_bool_set_name(handle, t_b, name) < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to set seboolean name with semanage") + + rc, boolkey = semanage.semanage_bool_key_extract(handle, t_b) + if rc < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to extract boolean key with semanage") + + rc, exists = semanage.semanage_bool_exists(handle, boolkey) + if rc < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to check if boolean is defined") + if not exists: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="SELinux boolean %s is not defined in persistent policy" % name) + + rc, sebool = semanage.semanage_bool_query(handle, boolkey) + if rc < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to query boolean in persistent policy") + + semanage.semanage_bool_set_value(sebool, value) + + if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to modify boolean key with semanage") + + if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to set boolean key active with semanage") + + semanage.semanage_bool_key_free(boolkey) + semanage.semanage_bool_free(t_b) + semanage.semanage_bool_free(sebool) + + +def semanage_get_boolean_value(module, handle, name): + rc, t_b = semanage.semanage_bool_create(handle) + if rc < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to create seboolean with semanage") + + if semanage.semanage_bool_set_name(handle, t_b, name) < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to set seboolean name with semanage") + + rc, boolkey = semanage.semanage_bool_key_extract(handle, t_b) + if rc < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to extract boolean key with semanage") + + rc, exists = semanage.semanage_bool_exists(handle, boolkey) + if rc < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to check if boolean is defined") + if not exists: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="SELinux boolean %s is not defined in persistent policy" % name) + + rc, sebool = semanage.semanage_bool_query(handle, boolkey) + if rc < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to query boolean in persistent policy") + + value = semanage.semanage_bool_get_value(sebool) + + semanage.semanage_bool_key_free(boolkey) + semanage.semanage_bool_free(t_b) + semanage.semanage_bool_free(sebool) + + return value + + +def semanage_commit(module, handle, load=0): + semanage.semanage_set_reload(handle, load) + if semanage.semanage_commit(handle) < 0: + semanage.semanage_handle_destroy(handle) + module.fail_json(msg="Failed to commit changes to semanage") + + +def semanage_destroy_handle(module, handle): + rc = semanage.semanage_disconnect(handle) + semanage.semanage_handle_destroy(handle) + if rc < 0: + module.fail_json(msg="Failed to disconnect from semanage") + + +# The following method implements what setsebool.c does to change +# a boolean and make it persist after reboot.. +def semanage_boolean_value(module, name, state): + value = 0 + changed = False + if state: + value = 1 + try: + handle = semanage_get_handle(module) + semanage_begin_transaction(module, handle) + cur_value = semanage_get_boolean_value(module, handle, name) + if cur_value != value: + changed = True + if not module.check_mode: + semanage_set_boolean_value(module, handle, name, value) + semanage_commit(module, handle) + semanage_destroy_handle(module, handle) + except Exception as e: + module.fail_json(msg=u"Failed to manage policy for boolean %s: %s" % (name, to_text(e))) + return changed + + +def set_boolean_value(module, name, state): + rc = 0 + value = 0 + if state: + value = 1 + try: + rc = selinux.security_set_boolean(name, value) + except OSError: + module.fail_json(msg="Failed to set boolean %s to %s" % (name, value)) + if rc == 0: + return True + else: + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + name=dict(type='str', required=True), + persistent=dict(type='bool', default=False), + state=dict(type='bool', required=True), + ), + supports_check_mode=True, + ) + + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib('libselinux-python'), exception=SELINUX_IMP_ERR) + + if not HAVE_SEMANAGE: + module.fail_json(msg=missing_required_lib('libsemanage-python'), exception=SEMANAGE_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + name = module.params['name'] + persistent = module.params['persistent'] + state = module.params['state'] + + result = dict( + name=name, + persistent=persistent, + state=state + ) + changed = False + + if hasattr(selinux, 'selinux_boolean_sub'): + # selinux_boolean_sub allows sites to rename a boolean and alias the old name + # Feature only available in selinux library since 2012. + name = selinux.selinux_boolean_sub(name) + + if not has_boolean_value(module, name): + module.fail_json(msg="SELinux boolean %s does not exist." % name) + + if persistent: + changed = semanage_boolean_value(module, name, state) + else: + cur_value = get_boolean_value(module, name) + if cur_value != state: + changed = True + if not module.check_mode: + changed = set_boolean_value(module, name, state) + if not changed: + module.fail_json(msg="Failed to set boolean %s to %s" % (name, state)) + try: + selinux.security_commit_booleans() + except Exception: + module.fail_json(msg="Failed to commit pending boolean %s value" % name) + + result['changed'] = changed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/selinux.py b/plugins/modules/selinux.py new file mode 100644 index 0000000..b806cfb --- /dev/null +++ b/plugins/modules/selinux.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Derek Carter +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core' +} + +DOCUMENTATION = r''' +--- +module: selinux +short_description: Change policy and state of SELinux +description: + - Configures the SELinux mode and policy. + - A reboot may be required after usage. + - Ansible will not issue this reboot but will let you know when it is required. +options: + policy: + description: + - The name of the SELinux policy to use (e.g. C(targeted)) will be required if state is not C(disabled). + state: + description: + - The SELinux mode. + required: true + choices: [ disabled, enforcing, permissive ] + configfile: + description: + - The path to the SELinux configuration file, if non-standard. + default: /etc/selinux/config + aliases: [ conf, file ] +requirements: [ libselinux-python ] +author: +- Derek Carter (@goozbach) +''' + +EXAMPLES = r''' +- name: Enable SELinux + selinux: + policy: targeted + state: enforcing + +- name: Put SELinux in permissive mode, logging actions that would be blocked. + selinux: + policy: targeted + state: permissive + +- name: Disable SELinux + selinux: + state: disabled +''' + +RETURN = r''' +msg: + description: Messages that describe changes that were made. + returned: always + type: str + sample: Config SELinux state changed from 'disabled' to 'permissive' +configfile: + description: Path to SELinux configuration file. + returned: always + type: str + sample: /etc/selinux/config +policy: + description: Name of the SELinux policy. + returned: always + type: str + sample: targeted +state: + description: SELinux mode. + returned: always + type: str + sample: enforcing +reboot_required: + description: Whether or not an reboot is required for the changes to take effect. + returned: always + type: bool + sample: true +''' + +import os +import re +import tempfile +import traceback + +SELINUX_IMP_ERR = None +try: + import selinux + HAS_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAS_SELINUX = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.facts.utils import get_file_lines + + +# getter subroutines +def get_config_state(configfile): + lines = get_file_lines(configfile, strip=False) + + for line in lines: + stateline = re.match(r'^SELINUX=.*$', line) + if stateline: + return line.split('=')[1].strip() + + +def get_config_policy(configfile): + lines = get_file_lines(configfile, strip=False) + + for line in lines: + stateline = re.match(r'^SELINUXTYPE=.*$', line) + if stateline: + return line.split('=')[1].strip() + + +# setter subroutines +def set_config_state(module, state, configfile): + # SELINUX=permissive + # edit config file with state value + stateline = 'SELINUX=%s' % state + lines = get_file_lines(configfile, strip=False) + + tmpfd, tmpfile = tempfile.mkstemp() + + with open(tmpfile, "w") as write_file: + for line in lines: + write_file.write(re.sub(r'^SELINUX=.*', stateline, line) + '\n') + + module.atomic_move(tmpfile, configfile) + + +def set_state(module, state): + if state == 'enforcing': + selinux.security_setenforce(1) + elif state == 'permissive': + selinux.security_setenforce(0) + elif state == 'disabled': + pass + else: + msg = 'trying to set invalid runtime state %s' % state + module.fail_json(msg=msg) + + +def set_config_policy(module, policy, configfile): + if not os.path.exists('/etc/selinux/%s/policy' % policy): + module.fail_json(msg='Policy %s does not exist in /etc/selinux/' % policy) + + # edit config file with state value + # SELINUXTYPE=targeted + policyline = 'SELINUXTYPE=%s' % policy + lines = get_file_lines(configfile, strip=False) + + tmpfd, tmpfile = tempfile.mkstemp() + + with open(tmpfile, "w") as write_file: + for line in lines: + write_file.write(re.sub(r'^SELINUXTYPE=.*', policyline, line) + '\n') + + module.atomic_move(tmpfile, configfile) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + policy=dict(type='str'), + state=dict(type='str', required='True', choices=['enforcing', 'permissive', 'disabled']), + configfile=dict(type='str', default='/etc/selinux/config', aliases=['conf', 'file']), + ), + supports_check_mode=True, + ) + + if not HAS_SELINUX: + module.fail_json(msg=missing_required_lib('libselinux-python'), exception=SELINUX_IMP_ERR) + + # global vars + changed = False + msgs = [] + configfile = module.params['configfile'] + policy = module.params['policy'] + state = module.params['state'] + runtime_enabled = selinux.is_selinux_enabled() + runtime_policy = selinux.selinux_getpolicytype()[1] + runtime_state = 'disabled' + reboot_required = False + + if runtime_enabled: + # enabled means 'enforcing' or 'permissive' + if selinux.security_getenforce(): + runtime_state = 'enforcing' + else: + runtime_state = 'permissive' + + if not os.path.isfile(configfile): + module.fail_json(msg="Unable to find file {0}".format(configfile), + details="Please install SELinux-policy package, " + "if this package is not installed previously.") + + config_policy = get_config_policy(configfile) + config_state = get_config_state(configfile) + + # check to see if policy is set if state is not 'disabled' + if state != 'disabled': + if not policy: + module.fail_json(msg="Policy is required if state is not 'disabled'") + else: + if not policy: + policy = config_policy + + # check changed values and run changes + if policy != runtime_policy: + if module.check_mode: + module.exit_json(changed=True) + # cannot change runtime policy + msgs.append("Running SELinux policy changed from '%s' to '%s'" % (runtime_policy, policy)) + changed = True + + if policy != config_policy: + if module.check_mode: + module.exit_json(changed=True) + set_config_policy(module, policy, configfile) + msgs.append("SELinux policy configuration in '%s' changed from '%s' to '%s'" % (configfile, config_policy, policy)) + changed = True + + if state != runtime_state: + if runtime_enabled: + if state == 'disabled': + if runtime_state != 'permissive': + # Temporarily set state to permissive + if not module.check_mode: + set_state(module, 'permissive') + module.warn("SELinux state temporarily changed from '%s' to 'permissive'. State change will take effect next reboot." % (runtime_state)) + changed = True + else: + module.warn('SELinux state change will take effect next reboot') + reboot_required = True + else: + if not module.check_mode: + set_state(module, state) + msgs.append("SELinux state changed from '%s' to '%s'" % (runtime_state, state)) + + # Only report changes if the file is changed. + # This prevents the task from reporting changes every time the task is run. + changed = True + else: + module.warn("Reboot is required to set SELinux state to '%s'" % state) + reboot_required = True + + if state != config_state: + if not module.check_mode: + set_config_state(module, state, configfile) + msgs.append("Config SELinux state changed from '%s' to '%s'" % (config_state, state)) + changed = True + + module.exit_json(changed=changed, msg=', '.join(msgs), configfile=configfile, policy=policy, state=state, reboot_required=reboot_required) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/synchronize.py b/plugins/modules/synchronize.py new file mode 100644 index 0000000..555c779 --- /dev/null +++ b/plugins/modules/synchronize.py @@ -0,0 +1,607 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012-2013, Timothy Appnel +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: synchronize +short_description: A wrapper around rsync to make common tasks in your playbooks quick and easy +description: + - C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy. + - It is run and originates on the local host where Ansible is being run. + - Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of + boilerplate options and host facts. + - This module is not intended to provide access to the full power of rsync, but does make the most common + invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case. +options: + src: + description: + - Path on the source host that will be synchronized to the destination. + - The path can be absolute or relative. + type: str + required: true + dest: + description: + - Path on the destination host that will be synchronized from the source. + - The path can be absolute or relative. + type: str + required: true + dest_port: + description: + - Port number for ssh on the destination host. + - Prior to Ansible 2.0, the ansible_ssh_port inventory var took precedence over this value. + - This parameter defaults to the value of C(ansible_ssh_port) or C(ansible_port), + the C(remote_port) config setting or the value from ssh client configuration + if none of the former have been set. + type: int + mode: + description: + - Specify the direction of the synchronization. + - In push mode the localhost or delegate is the source. + - In pull mode the remote host in context is the source. + type: str + choices: [ pull, push ] + default: push + archive: + description: + - Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D. + type: bool + default: yes + checksum: + description: + - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will + not disable it. + type: bool + default: no + compress: + description: + - Compress file data during the transfer. + - In most cases, leave this enabled unless it causes problems. + type: bool + default: yes + existing_only: + description: + - Skip creating new files on receiver. + type: bool + default: no + delete: + description: + - Delete files in C(dest) that don't exist (after transfer, not before) in the C(src) path. + - This option requires C(recursive=yes). + - This option ignores excluded files and behaves like the rsync opt --delete-excluded. + type: bool + default: no + dirs: + description: + - Transfer directories without recursing. + type: bool + default: no + recursive: + description: + - Recurse into directories. + - This parameter defaults to the value of the archive option. + type: bool + links: + description: + - Copy symlinks as symlinks. + - This parameter defaults to the value of the archive option. + type: bool + copy_links: + description: + - Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink. + type: bool + default: no + perms: + description: + - Preserve permissions. + - This parameter defaults to the value of the archive option. + type: bool + times: + description: + - Preserve modification times. + - This parameter defaults to the value of the archive option. + type: bool + owner: + description: + - Preserve owner (super user only). + - This parameter defaults to the value of the archive option. + type: bool + group: + description: + - Preserve group. + - This parameter defaults to the value of the archive option. + type: bool + rsync_path: + description: + - Specify the rsync command to run on the remote host. See C(--rsync-path) on the rsync man page. + - To specify the rsync command to run on the local host, you need to set this your task var C(ansible_rsync_path). + type: str + rsync_timeout: + description: + - Specify a C(--timeout) for the rsync command in seconds. + type: int + default: 0 + set_remote_user: + description: + - Put user@ for the remote paths. + - If you have a custom ssh config to define the remote user for a host + that does not match the inventory user, you should set this parameter to C(no). + type: bool + default: yes + use_ssh_args: + description: + - Use the ssh_args specified in ansible.cfg. + type: bool + default: no + rsync_opts: + description: + - Specify additional rsync options by passing in an array. + - Note that an empty string in C(rsync_opts) will end up transfer the current working directory. + type: list + default: + partial: + description: + - Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster. + type: bool + default: no + verify_host: + description: + - Verify destination host key. + type: bool + default: no + private_key: + description: + - Specify the private key to use for SSH-based rsync connections (e.g. C(~/.ssh/id_rsa)). + type: path + link_dest: + description: + - Add a destination to hard link against during the rsync. + type: list + default: +notes: + - rsync must be installed on both the local and remote host. + - For the C(synchronize) module, the "local host" is the host `the synchronize task originates on`, and the "destination host" is the host + `synchronize is connecting to`. + - The "local host" can be changed to a different host by using `delegate_to`. This enables copying between two remote hosts or entirely on one + remote machine. + - > + The user and permissions for the synchronize `src` are those of the user running the Ansible task on the local host (or the remote_user for a + delegate_to host when delegate_to is used). + - The user and permissions for the synchronize `dest` are those of the `remote_user` on the destination host or the `become_user` if `become=yes` is active. + - In Ansible 2.0 a bug in the synchronize module made become occur on the "local host". This was fixed in Ansible 2.0.1. + - Currently, synchronize is limited to elevating permissions via passwordless sudo. This is because rsync itself is connecting to the remote machine + and rsync doesn't give us a way to pass sudo credentials in. + - Currently there are only a few connection types which support synchronize (ssh, paramiko, local, and docker) because a sync strategy has been + determined for those connection types. Note that the connection for these must not need a password as rsync itself is making the connection and + rsync does not provide us a way to pass a password to the connection. + - Expect that dest=~/x will be ~/x even if using sudo. + - Inspect the verbose output to validate the destination user/host/path are what was expected. + - To exclude files and directories from being synchronized, you may add C(.rsync-filter) files to the source directory. + - rsync daemon must be up and running with correct permission when using rsync protocol in source or destination path. + - The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process + encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly. + - link_destination is subject to the same limitations as the underlying rsync daemon. Hard links are only preserved if the relative subtrees + of the source and destination are the same. Attempts to hardlink into a directory that is a subdirectory of the source will be prevented. +seealso: +- module: copy +- module: community.windows.win_robocopy +author: +- Timothy Appnel (@tima) +''' + +EXAMPLES = ''' +- name: Synchronization of src on the control machine to dest on the remote hosts + synchronize: + src: some/relative/path + dest: /some/absolute/path + +- name: Synchronization using rsync protocol (push) + synchronize: + src: some/relative/path/ + dest: rsync://somehost.com/path/ + +- name: Synchronization using rsync protocol (pull) + synchronize: + mode: pull + src: rsync://somehost.com/path/ + dest: /some/absolute/path/ + +- name: Synchronization using rsync protocol on delegate host (push) + synchronize: + src: /some/absolute/path/ + dest: rsync://somehost.com/path/ + delegate_to: delegate.host + +- name: Synchronization using rsync protocol on delegate host (pull) + synchronize: + mode: pull + src: rsync://somehost.com/path/ + dest: /some/absolute/path/ + delegate_to: delegate.host + +- name: Synchronization without any --archive options enabled + synchronize: + src: some/relative/path + dest: /some/absolute/path + archive: no + +- name: Synchronization with --archive options enabled except for --recursive + synchronize: + src: some/relative/path + dest: /some/absolute/path + recursive: no + +- name: Synchronization with --archive options enabled except for --times, with --checksum option enabled + synchronize: + src: some/relative/path + dest: /some/absolute/path + checksum: yes + times: no + +- name: Synchronization without --archive options enabled except use --links + synchronize: + src: some/relative/path + dest: /some/absolute/path + archive: no + links: yes + +- name: Synchronization of two paths both on the control machine + synchronize: + src: some/relative/path + dest: /some/absolute/path + delegate_to: localhost + +- name: Synchronization of src on the inventory host to the dest on the localhost in pull mode + synchronize: + mode: pull + src: some/relative/path + dest: /some/absolute/path + +- name: Synchronization of src on delegate host to dest on the current inventory host. + synchronize: + src: /first/absolute/path + dest: /second/absolute/path + delegate_to: delegate.host + +- name: Synchronize two directories on one remote host. + synchronize: + src: /first/absolute/path + dest: /second/absolute/path + delegate_to: "{{ inventory_hostname }}" + +- name: Synchronize and delete files in dest on the remote host that are not found in src of localhost. + synchronize: + src: some/relative/path + dest: /some/absolute/path + delete: yes + recursive: yes + +# This specific command is granted su privileges on the destination +- name: Synchronize using an alternate rsync command + synchronize: + src: some/relative/path + dest: /some/absolute/path + rsync_path: su -c rsync + +# Example .rsync-filter file in the source directory +# - var # exclude any path whose last part is 'var' +# - /var # exclude any path starting with 'var' starting at the source directory +# + /var/conf # include /var/conf even though it was previously excluded + +- name: Synchronize passing in extra rsync options + synchronize: + src: /tmp/helloworld + dest: /var/www/helloworld + rsync_opts: + - "--no-motd" + - "--exclude=.git" + +# Hardlink files if they didn't change +- name: Use hardlinks when synchronizing filesystems + synchronize: + src: /tmp/path_a/foo.txt + dest: /tmp/path_b/foo.txt + link_dest: /tmp/path_a/ + +# Specify the rsync binary to use on remote host and on local host +- hosts: groupofhosts + vars: + ansible_rsync_path: /usr/gnu/bin/rsync + + tasks: + - name: copy /tmp/localpath/ to remote location /tmp/remotepath + synchronize: + src: /tmp/localpath/ + dest: /tmp/remotepath + rsync_path: /usr/gnu/bin/rsync +''' + + +import os +import errno + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.six.moves import shlex_quote + + +client_addr = None + + +def substitute_controller(path): + global client_addr + if not client_addr: + ssh_env_string = os.environ.get('SSH_CLIENT', None) + try: + client_addr, _ = ssh_env_string.split(None, 1) + except AttributeError: + ssh_env_string = os.environ.get('SSH_CONNECTION', None) + try: + client_addr, _ = ssh_env_string.split(None, 1) + except AttributeError: + pass + if not client_addr: + raise ValueError + + if path.startswith('localhost:'): + path = path.replace('localhost', client_addr, 1) + return path + + +def is_rsh_needed(source, dest): + if source.startswith('rsync://') or dest.startswith('rsync://'): + return False + if ':' in source or ':' in dest: + return True + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + src=dict(type='str', required=True), + dest=dict(type='str', required=True), + dest_port=dict(type='int'), + delete=dict(type='bool', default=False), + private_key=dict(type='path'), + rsync_path=dict(type='str'), + _local_rsync_path=dict(type='path', default='rsync'), + _local_rsync_password=dict(type='str', no_log=True), + _substitute_controller=dict(type='bool', default=False), + archive=dict(type='bool', default=True), + checksum=dict(type='bool', default=False), + compress=dict(type='bool', default=True), + existing_only=dict(type='bool', default=False), + dirs=dict(type='bool', default=False), + recursive=dict(type='bool'), + links=dict(type='bool'), + copy_links=dict(type='bool', default=False), + perms=dict(type='bool'), + times=dict(type='bool'), + owner=dict(type='bool'), + group=dict(type='bool'), + set_remote_user=dict(type='bool', default=True), + rsync_timeout=dict(type='int', default=0), + rsync_opts=dict(type='list', default=[]), + ssh_args=dict(type='str'), + partial=dict(type='bool', default=False), + verify_host=dict(type='bool', default=False), + mode=dict(type='str', default='push', choices=['pull', 'push']), + link_dest=dict(type='list') + ), + supports_check_mode=True, + ) + + if module.params['_substitute_controller']: + try: + source = substitute_controller(module.params['src']) + dest = substitute_controller(module.params['dest']) + except ValueError: + module.fail_json(msg='Could not determine controller hostname for rsync to send to') + else: + source = module.params['src'] + dest = module.params['dest'] + dest_port = module.params['dest_port'] + delete = module.params['delete'] + private_key = module.params['private_key'] + rsync_path = module.params['rsync_path'] + rsync = module.params.get('_local_rsync_path', 'rsync') + rsync_password = module.params.get('_local_rsync_password') + rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout') + archive = module.params['archive'] + checksum = module.params['checksum'] + compress = module.params['compress'] + existing_only = module.params['existing_only'] + dirs = module.params['dirs'] + partial = module.params['partial'] + # the default of these params depends on the value of archive + recursive = module.params['recursive'] + links = module.params['links'] + copy_links = module.params['copy_links'] + perms = module.params['perms'] + times = module.params['times'] + owner = module.params['owner'] + group = module.params['group'] + rsync_opts = module.params['rsync_opts'] + ssh_args = module.params['ssh_args'] + verify_host = module.params['verify_host'] + link_dest = module.params['link_dest'] + + if '/' not in rsync: + rsync = module.get_bin_path(rsync, required=True) + + cmd = [rsync, '--delay-updates', '-F'] + _sshpass_pipe = None + if rsync_password: + try: + module.run_command(["sshpass"]) + except OSError: + module.fail_json( + msg="to use rsync connection with passwords, you must install the sshpass program" + ) + _sshpass_pipe = os.pipe() + cmd = ['sshpass', '-d' + to_native(_sshpass_pipe[0], errors='surrogate_or_strict')] + cmd + if compress: + cmd.append('--compress') + if rsync_timeout: + cmd.append('--timeout=%s' % rsync_timeout) + if module.check_mode: + cmd.append('--dry-run') + if delete: + cmd.append('--delete-after') + if existing_only: + cmd.append('--existing') + if checksum: + cmd.append('--checksum') + if copy_links: + cmd.append('--copy-links') + if archive: + cmd.append('--archive') + if recursive is False: + cmd.append('--no-recursive') + if links is False: + cmd.append('--no-links') + if perms is False: + cmd.append('--no-perms') + if times is False: + cmd.append('--no-times') + if owner is False: + cmd.append('--no-owner') + if group is False: + cmd.append('--no-group') + else: + if recursive is True: + cmd.append('--recursive') + if links is True: + cmd.append('--links') + if perms is True: + cmd.append('--perms') + if times is True: + cmd.append('--times') + if owner is True: + cmd.append('--owner') + if group is True: + cmd.append('--group') + if dirs: + cmd.append('--dirs') + + if source.startswith('rsync://') and dest.startswith('rsync://'): + module.fail_json(msg='either src or dest must be a localhost', rc=1) + + if is_rsh_needed(source, dest): + + # https://github.com/ansible/ansible/issues/15907 + has_rsh = False + for rsync_opt in rsync_opts: + if '--rsh' in rsync_opt: + has_rsh = True + break + + # if the user has not supplied an --rsh option go ahead and add ours + if not has_rsh: + ssh_cmd = [module.get_bin_path('ssh', required=True), '-S', 'none'] + if private_key is not None: + ssh_cmd.extend(['-i', private_key]) + # If the user specified a port value + # Note: The action plugin takes care of setting this to a port from + # inventory if the user didn't specify an explicit dest_port + if dest_port is not None: + ssh_cmd.extend(['-o', 'Port=%s' % dest_port]) + if not verify_host: + ssh_cmd.extend(['-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null']) + ssh_cmd_str = ' '.join(shlex_quote(arg) for arg in ssh_cmd) + if ssh_args: + ssh_cmd_str += ' %s' % ssh_args + cmd.append('--rsh=%s' % ssh_cmd_str) + + if rsync_path: + cmd.append('--rsync-path=%s' % rsync_path) + + if rsync_opts: + if '' in rsync_opts: + module.warn('The empty string is present in rsync_opts which will cause rsync to' + ' transfer the current working directory. If this is intended, use "."' + ' instead to get rid of this warning. If this is unintended, check for' + ' problems in your playbook leading to empty string in rsync_opts.') + cmd.extend(rsync_opts) + + if partial: + cmd.append('--partial') + + if link_dest: + cmd.append('-H') + # verbose required because rsync does not believe that adding a + # hardlink is actually a change + cmd.append('-vv') + for x in link_dest: + link_path = os.path.abspath(os.path.expanduser(x)) + destination_path = os.path.abspath(os.path.dirname(dest)) + if destination_path.find(link_path) == 0: + module.fail_json(msg='Hardlinking into a subdirectory of the source would cause recursion. %s and %s' % (destination_path, dest)) + cmd.append('--link-dest=%s' % link_path) + + changed_marker = '<>' + cmd.append('--out-format=' + changed_marker + '%i %n%L') + + # expand the paths + if '@' not in source: + source = os.path.expanduser(source) + if '@' not in dest: + dest = os.path.expanduser(dest) + + cmd.append(source) + cmd.append(dest) + cmdstr = ' '.join(cmd) + + # If we are using password authentication, write the password into the pipe + if rsync_password: + def _write_password_to_pipe(proc): + os.close(_sshpass_pipe[0]) + try: + os.write(_sshpass_pipe[1], to_bytes(rsync_password) + b'\n') + except OSError as exc: + # Ignore broken pipe errors if the sshpass process has exited. + if exc.errno != errno.EPIPE or proc.poll() is None: + raise + + (rc, out, err) = module.run_command( + cmd, pass_fds=_sshpass_pipe, + before_communicate_callback=_write_password_to_pipe) + else: + (rc, out, err) = module.run_command(cmd) + + if rc: + return module.fail_json(msg=err, rc=rc, cmd=cmdstr) + + if link_dest: + # a leading period indicates no change + changed = (changed_marker + '.') not in out + else: + changed = changed_marker in out + + out_clean = out.replace(changed_marker, '') + out_lines = out_clean.split('\n') + while '' in out_lines: + out_lines.remove('') + if module._diff: + diff = {'prepared': out_clean} + return module.exit_json(changed=changed, msg=out_clean, + rc=rc, cmd=cmdstr, stdout_lines=out_lines, + diff=diff) + + return module.exit_json(changed=changed, msg=out_clean, + rc=rc, cmd=cmdstr, stdout_lines=out_lines) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sysctl.py b/plugins/modules/sysctl.py new file mode 100644 index 0000000..fb6aae7 --- /dev/null +++ b/plugins/modules/sysctl.py @@ -0,0 +1,419 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, David "DaviXX" CHANIAL +# (c) 2014, James Tanner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: sysctl +short_description: Manage entries in sysctl.conf. +description: + - This module manipulates sysctl entries and optionally performs a C(/sbin/sysctl -p) after changing them. +options: + name: + description: + - The dot-separated path (aka I(key)) specifying the sysctl variable. + required: true + aliases: [ 'key' ] + value: + description: + - Desired value of the sysctl key. + aliases: [ 'val' ] + state: + description: + - Whether the entry should be present or absent in the sysctl file. + choices: [ "present", "absent" ] + default: present + ignoreerrors: + description: + - Use this option to ignore errors about unknown keys. + type: bool + default: 'no' + reload: + description: + - If C(yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is + updated. If C(no), does not reload I(sysctl) even if the + C(sysctl_file) is updated. + type: bool + default: 'yes' + sysctl_file: + description: + - Specifies the absolute path to C(sysctl.conf), if not C(/etc/sysctl.conf). + default: /etc/sysctl.conf + sysctl_set: + description: + - Verify token value with the sysctl command and set with -w if necessary + type: bool + default: 'no' +author: "David CHANIAL (@davixx) " +''' + +EXAMPLES = ''' +# Set vm.swappiness to 5 in /etc/sysctl.conf +- sysctl: + name: vm.swappiness + value: '5' + state: present + +# Remove kernel.panic entry from /etc/sysctl.conf +- sysctl: + name: kernel.panic + state: absent + sysctl_file: /etc/sysctl.conf + +# Set kernel.panic to 3 in /tmp/test_sysctl.conf +- sysctl: + name: kernel.panic + value: '3' + sysctl_file: /tmp/test_sysctl.conf + reload: no + +# Set ip forwarding on in /proc and verify token value with the sysctl command +- sysctl: + name: net.ipv4.ip_forward + value: '1' + sysctl_set: yes + +# Set ip forwarding on in /proc and in the sysctl file and reload if necessary +- sysctl: + name: net.ipv4.ip_forward + value: '1' + sysctl_set: yes + state: present + reload: yes +''' + +# ============================================================== + +import os +import platform +import re +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import string_types +from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE +from ansible.module_utils._text import to_native + + +class SysctlModule(object): + + # We have to use LANG=C because we are capturing STDERR of sysctl to detect + # success or failure. + LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} + + def __init__(self, module): + self.module = module + self.args = self.module.params + + self.sysctl_cmd = self.module.get_bin_path('sysctl', required=True) + self.sysctl_file = self.args['sysctl_file'] + + self.proc_value = None # current token value in proc fs + self.file_value = None # current token value in file + self.file_lines = [] # all lines in the file + self.file_values = {} # dict of token values + + self.changed = False # will change occur + self.set_proc = False # does sysctl need to set value + self.write_file = False # does the sysctl file need to be reloaded + + self.process() + + # ============================================================== + # LOGIC + # ============================================================== + + def process(self): + + self.platform = platform.system().lower() + + # Whitespace is bad + self.args['name'] = self.args['name'].strip() + self.args['value'] = self._parse_value(self.args['value']) + + thisname = self.args['name'] + + # get the current proc fs value + self.proc_value = self.get_token_curr_value(thisname) + + # get the current sysctl file value + self.read_sysctl_file() + if thisname not in self.file_values: + self.file_values[thisname] = None + + # update file contents with desired token/value + self.fix_lines() + + # what do we need to do now? + if self.file_values[thisname] is None and self.args['state'] == "present": + self.changed = True + self.write_file = True + elif self.file_values[thisname] is None and self.args['state'] == "absent": + self.changed = False + elif self.file_values[thisname] and self.args['state'] == "absent": + self.changed = True + self.write_file = True + elif self.file_values[thisname] != self.args['value']: + self.changed = True + self.write_file = True + # with reload=yes we should check if the current system values are + # correct, so that we know if we should reload + elif self.args['reload']: + if self.proc_value is None: + self.changed = True + elif not self._values_is_equal(self.proc_value, self.args['value']): + self.changed = True + + # use the sysctl command or not? + if self.args['sysctl_set'] and self.args['state'] == "present": + if self.proc_value is None: + self.changed = True + elif not self._values_is_equal(self.proc_value, self.args['value']): + self.changed = True + self.set_proc = True + + # Do the work + if not self.module.check_mode: + if self.write_file: + self.write_sysctl() + if self.changed and self.args['reload']: + self.reload_sysctl() + if self.set_proc: + self.set_token_value(self.args['name'], self.args['value']) + + def _values_is_equal(self, a, b): + """Expects two string values. It will split the string by whitespace + and compare each value. It will return True if both lists are the same, + contain the same elements and the same order.""" + if a is None or b is None: + return False + + a = a.split() + b = b.split() + + if len(a) != len(b): + return False + + return len([i for i, j in zip(a, b) if i == j]) == len(a) + + def _parse_value(self, value): + if value is None: + return '' + elif isinstance(value, bool): + if value: + return '1' + else: + return '0' + elif isinstance(value, string_types): + if value.lower() in BOOLEANS_TRUE: + return '1' + elif value.lower() in BOOLEANS_FALSE: + return '0' + else: + return value.strip() + else: + return value + + def _stderr_failed(self, err): + # sysctl can fail to set a value even if it returns an exit status 0 + # (https://bugzilla.redhat.com/show_bug.cgi?id=1264080). That's why we + # also have to check stderr for errors. For now we will only fail on + # specific errors defined by the regex below. + errors_regex = r'^sysctl: setting key "[^"]+": (Invalid argument|Read-only file system)$' + return re.search(errors_regex, err, re.MULTILINE) is not None + + # ============================================================== + # SYSCTL COMMAND MANAGEMENT + # ============================================================== + + # Use the sysctl command to find the current value + def get_token_curr_value(self, token): + if self.platform == 'openbsd': + # openbsd doesn't support -e, just drop it + thiscmd = "%s -n %s" % (self.sysctl_cmd, token) + else: + thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token) + rc, out, err = self.module.run_command(thiscmd, environ_update=self.LANG_ENV) + if rc != 0: + return None + else: + return out + + # Use the sysctl command to set the current value + def set_token_value(self, token, value): + if len(value.split()) > 0: + value = '"' + value + '"' + if self.platform == 'openbsd': + # openbsd doesn't accept -w, but since it's not needed, just drop it + thiscmd = "%s %s=%s" % (self.sysctl_cmd, token, value) + elif self.platform == 'freebsd': + ignore_missing = '' + if self.args['ignoreerrors']: + ignore_missing = '-i' + # freebsd doesn't accept -w, but since it's not needed, just drop it + thiscmd = "%s %s %s=%s" % (self.sysctl_cmd, ignore_missing, token, value) + else: + ignore_missing = '' + if self.args['ignoreerrors']: + ignore_missing = '-e' + thiscmd = "%s %s -w %s=%s" % (self.sysctl_cmd, ignore_missing, token, value) + rc, out, err = self.module.run_command(thiscmd, environ_update=self.LANG_ENV) + if rc != 0 or self._stderr_failed(err): + self.module.fail_json(msg='setting %s failed: %s' % (token, out + err)) + else: + return rc + + # Run sysctl -p + def reload_sysctl(self): + if self.platform == 'freebsd': + # freebsd doesn't support -p, so reload the sysctl service + rc, out, err = self.module.run_command('/etc/rc.d/sysctl reload', environ_update=self.LANG_ENV) + elif self.platform == 'openbsd': + # openbsd doesn't support -p and doesn't have a sysctl service, + # so we have to set every value with its own sysctl call + for k, v in self.file_values.items(): + rc = 0 + if k != self.args['name']: + rc = self.set_token_value(k, v) + # FIXME this check is probably not needed as set_token_value would fail_json if rc != 0 + if rc != 0: + break + if rc == 0 and self.args['state'] == "present": + rc = self.set_token_value(self.args['name'], self.args['value']) + + # set_token_value would have called fail_json in case of failure + # so return here and do not continue to the error processing below + # https://github.com/ansible/ansible/issues/58158 + return + else: + # system supports reloading via the -p flag to sysctl, so we'll use that + sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file] + if self.args['ignoreerrors']: + sysctl_args.insert(1, '-e') + + rc, out, err = self.module.run_command(sysctl_args, environ_update=self.LANG_ENV) + + if rc != 0 or self._stderr_failed(err): + self.module.fail_json(msg="Failed to reload sysctl: %s" % to_native(out) + to_native(err)) + + # ============================================================== + # SYSCTL FILE MANAGEMENT + # ============================================================== + + # Get the token value from the sysctl file + def read_sysctl_file(self): + + lines = [] + if os.path.isfile(self.sysctl_file): + try: + with open(self.sysctl_file, "r") as read_file: + lines = read_file.readlines() + except IOError as e: + self.module.fail_json(msg="Failed to open %s: %s" % (to_native(self.sysctl_file), to_native(e))) + + for line in lines: + line = line.strip() + self.file_lines.append(line) + + # don't split empty lines or comments or line without equal sign + if not line or line.startswith(("#", ";")) or "=" not in line: + continue + + k, v = line.split('=', 1) + k = k.strip() + v = v.strip() + self.file_values[k] = v.strip() + + # Fix the value in the sysctl file content + def fix_lines(self): + checked = [] + self.fixed_lines = [] + for line in self.file_lines: + if not line.strip() or line.strip().startswith(("#", ";")) or "=" not in line: + self.fixed_lines.append(line) + continue + tmpline = line.strip() + k, v = tmpline.split('=', 1) + k = k.strip() + v = v.strip() + if k not in checked: + checked.append(k) + if k == self.args['name']: + if self.args['state'] == "present": + new_line = "%s=%s\n" % (k, self.args['value']) + self.fixed_lines.append(new_line) + else: + new_line = "%s=%s\n" % (k, v) + self.fixed_lines.append(new_line) + + if self.args['name'] not in checked and self.args['state'] == "present": + new_line = "%s=%s\n" % (self.args['name'], self.args['value']) + self.fixed_lines.append(new_line) + + # Completely rewrite the sysctl file + def write_sysctl(self): + # open a tmp file + fd, tmp_path = tempfile.mkstemp('.conf', '.ansible_m_sysctl_', os.path.dirname(self.sysctl_file)) + f = open(tmp_path, "w") + try: + for l in self.fixed_lines: + f.write(l.strip() + "\n") + except IOError as e: + self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(e))) + f.flush() + f.close() + + # replace the real one + self.module.atomic_move(tmp_path, self.sysctl_file) + + +# ============================================================== +# main + +def main(): + + # defining module + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['key'], required=True), + value=dict(aliases=['val'], required=False, type='str'), + state=dict(default='present', choices=['present', 'absent']), + reload=dict(default=True, type='bool'), + sysctl_set=dict(default=False, type='bool'), + ignoreerrors=dict(default=False, type='bool'), + sysctl_file=dict(default='/etc/sysctl.conf', type='path') + ), + supports_check_mode=True, + required_if=[('state', 'present', ['value'])], + ) + + if module.params['name'] is None: + module.fail_json(msg="name cannot be None") + if module.params['state'] == 'present' and module.params['value'] is None: + module.fail_json(msg="value cannot be None") + + # In case of in-line params + if module.params['name'] == '': + module.fail_json(msg="name cannot be blank") + if module.params['state'] == 'present' and module.params['value'] == '': + module.fail_json(msg="value cannot be blank") + + result = SysctlModule(module) + + module.exit_json(changed=result.changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/shell/__init__.py b/plugins/shell/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/plugins/shell/csh.py b/plugins/shell/csh.py new file mode 100644 index 0000000..4836dce --- /dev/null +++ b/plugins/shell/csh.py @@ -0,0 +1,38 @@ +# Copyright (c) 2014, Chris Church +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.shell import ShellBase + +DOCUMENTATION = ''' + name: csh + plugin_type: shell + short_description: C shell (/bin/csh) + description: + - When you have no other option than to use csh + extends_documentation_fragment: + - shell_common +''' + + +class ShellModule(ShellBase): + + # Common shell filenames that this plugin handles + COMPATIBLE_SHELLS = frozenset(('csh', 'tcsh')) + # Family of shells this has. Must match the filename without extension + SHELL_FAMILY = 'csh' + + # How to end lines in a python script one-liner + _SHELL_EMBEDDED_PY_EOL = '\\\n' + _SHELL_REDIRECT_ALLNULL = '>& /dev/null' + _SHELL_AND = '&&' + _SHELL_OR = '||' + _SHELL_SUB_LEFT = '"`' + _SHELL_SUB_RIGHT = '`"' + _SHELL_GROUP_LEFT = '(' + _SHELL_GROUP_RIGHT = ')' + + def env_prefix(self, **kwargs): + return 'env %s' % super(ShellModule, self).env_prefix(**kwargs) diff --git a/plugins/shell/fish.py b/plugins/shell/fish.py new file mode 100644 index 0000000..8d0a4aa --- /dev/null +++ b/plugins/shell/fish.py @@ -0,0 +1,90 @@ +# Copyright (c) 2014, Chris Church +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils.six import text_type +from ansible.module_utils.six.moves import shlex_quote +from ansible.plugins.shell.sh import ShellModule as ShModule + +DOCUMENTATION = ''' + name: fish + plugin_type: shell + short_description: fish shell (/bin/fish) + description: + - This is here because some people are restricted to fish. + extends_documentation_fragment: + - shell_common +''' + + +class ShellModule(ShModule): + + # Common shell filenames that this plugin handles + COMPATIBLE_SHELLS = frozenset(('fish',)) + # Family of shells this has. Must match the filename without extension + SHELL_FAMILY = 'fish' + + _SHELL_EMBEDDED_PY_EOL = '\n' + _SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1' + _SHELL_AND = '; and' + _SHELL_OR = '; or' + _SHELL_SUB_LEFT = '(' + _SHELL_SUB_RIGHT = ')' + _SHELL_GROUP_LEFT = '' + _SHELL_GROUP_RIGHT = '' + + def env_prefix(self, **kwargs): + env = self.env.copy() + env.update(kwargs) + return ' '.join(['set -lx %s %s;' % (k, shlex_quote(text_type(v))) for k, v in env.items()]) + + def build_module_command(self, env_string, shebang, cmd, arg_path=None): + # don't quote the cmd if it's an empty string, because this will break pipelining mode + if cmd.strip() != '': + cmd = shlex_quote(cmd) + cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd] + if arg_path is not None: + cmd_parts.append(arg_path) + new_cmd = " ".join(cmd_parts) + return new_cmd + + def checksum(self, path, python_interp): + # The following test is fish-compliant. + # + # In the following test, each condition is a check and logical + # comparison (or or and) that sets the rc value. Every check is run so + # the last check in the series to fail will be the rc that is + # returned. + # + # If a check fails we error before invoking the hash functions because + # hash functions may successfully take the hash of a directory on BSDs + # (UFS filesystem?) which is not what the rest of the ansible code + # expects + # + # If all of the available hashing methods fail we fail with an rc of + # 0. This logic is added to the end of the cmd at the bottom of this + # function. + + # Return codes: + # checksum: success! + # 0: Unknown error + # 1: Remote file does not exist + # 2: No read permissions on the file + # 3: File is a directory + # 4: No python interpreter + + # Quoting gets complex here. We're writing a python string that's + # used by a variety of shells on the remote host to invoke a python + # "one-liner". + shell_escaped_path = shlex_quote(path) + test = "set rc flag; [ -r %(p)s ] %(shell_or)s set rc 2; [ -f %(p)s ] %(shell_or)s set rc 1; [ -d %(p)s ] %(shell_and)s set rc 3; %(i)s -V 2>/dev/null %(shell_or)s set rc 4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"$rc \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) # NOQA + csums = [ + u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python > 2.4 (including python3) + u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python == 2.4 + ] + + cmd = (" %s " % self._SHELL_OR).join(csums) + cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path) + return cmd diff --git a/tests/.gitignore b/tests/.gitignore new file mode 100644 index 0000000..ea1472e --- /dev/null +++ b/tests/.gitignore @@ -0,0 +1 @@ +output/ diff --git a/tests/integration/targets/acl/aliases b/tests/integration/targets/acl/aliases new file mode 100644 index 0000000..ed821c2 --- /dev/null +++ b/tests/integration/targets/acl/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +skip/aix diff --git a/tests/integration/targets/acl/tasks/acl.yml b/tests/integration/targets/acl/tasks/acl.yml new file mode 100644 index 0000000..7770ed4 --- /dev/null +++ b/tests/integration/targets/acl/tasks/acl.yml @@ -0,0 +1,205 @@ +# (c) 2017, Martin Krizek + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: Create ansible user + user: + name: "{{ test_user }}" + +- name: Create ansible group + group: + name: "{{ test_group }}" + +- name: Create ansible file + file: + path: "{{ test_file }}" + state: touch + +- name: Create ansible dir + file: + path: "{{ test_dir }}" + state: directory +############################################################################## +- name: Grant ansible user read access to a file + acl: + path: "{{ test_file }}" + entity: "{{ test_user }}" + etype: user + permissions: r + state: present + register: output + +- name: get getfacl output + shell: "getfacl {{ test_file | quote }}" + register: getfacl_output + +- name: verify output + assert: + that: + - output is changed + - output is not failed + - "'user:{{ test_user }}:r--' in output.acl" + - "'user:{{ test_user }}:r--' in getfacl_output.stdout_lines" +############################################################################## +- name: Obtain the acl for a specific file + acl: + path: "{{ test_file }}" + register: output + +- name: get getfacl output + shell: "getfacl {{ test_file | quote }}" + register: getfacl_output + +- name: verify output + assert: + that: + - output is not changed + - output is not failed + - "'user::rw-' in output.acl" + - "'user:{{ test_user }}:r--' in output.acl" + - "'group::r--' in output.acl" + - "'mask::r--' in output.acl" + - "'other::r--' in output.acl" + - "'user::rw-' in getfacl_output.stdout_lines" + - "'user:{{ test_user }}:r--' in getfacl_output.stdout_lines" + - "'group::r--' in getfacl_output.stdout_lines" + - "'mask::r--' in getfacl_output.stdout_lines" + - "'other::r--' in getfacl_output.stdout_lines" +############################################################################## +- name: Removes the acl for ansible user on a specific file + acl: + path: "{{ test_file }}" + entity: "{{ test_user }}" + etype: user + state: absent + register: output + +- name: get getfacl output + shell: "getfacl {{ test_file | quote }}" + register: getfacl_output + +- name: verify output + assert: + that: + - output is changed + - output is not failed + - "'user:{{ test_user }}:r--' not in output.acl" + - "'user:{{ test_user }}:r--' not in getfacl_output.stdout_lines" +############################################################################## +- name: Sets default acl for ansible user on ansible dir + acl: + path: "{{ test_dir }}" + entity: "{{ test_user }}" + etype: user + permissions: rw + default: yes + state: present + register: output + +- name: get getfacl output + shell: "getfacl {{ test_dir | quote }}" + register: getfacl_output + +- name: verify output + assert: + that: + - output is changed + - output is not failed + - "'user:{{ test_user }}:rw-' in output.acl" + - "'default:user:{{ test_user }}:rw-' in getfacl_output.stdout_lines" +############################################################################## +- name: Cleanup + shell: "setfacl -b {{ test_dir | quote }}" +############################################################################## +- name: Same as previous but using entry shorthand + acl: + path: "{{ test_dir }}" + entry: "user:{{ test_user }}:rw-" + default: yes + state: present + register: output + +- name: get getfacl output + shell: "getfacl {{ test_dir | quote }}" + register: getfacl_output + +- name: verify output + assert: + that: + - output is changed + - output is not failed + - "'user:{{ test_user }}:rw-' in output.acl" + - "'default:user:{{ test_user }}:rw-' in getfacl_output.stdout_lines" +############################################################################## +- name: Same as previous, to test idempotence + acl: + path: "{{ test_dir }}" + entry: "user:{{ test_user }}:rw-" + default: yes + state: present + register: output + +- name: get getfacl output + shell: "getfacl {{ test_dir | quote }}" + register: getfacl_output + +- name: verify output + assert: + that: + - output is not changed + - output is not failed + - "'user:{{ test_user }}:rw-' in output.acl" + - "'default:user:{{ test_user }}:rw-' in getfacl_output.stdout_lines" +############################################################################## +- name: Cleanup + shell: "setfacl -b {{ test_dir | quote }}" +############################################################################## +- name: Set default acls + acl: + path: "{{ test_dir }}" + entry: "{{ item }}" + default: yes + state: present + with_items: + - "user:{{ test_user }}:rw-" + - "group:{{ test_group }}:rw-" + +- name: Remove default group test_user acl + acl: + path: "{{ test_dir }}" + entry: "group:{{ test_group }}:rw-" + default: yes + state: absent + register: output + +- name: get getfacl output + shell: "getfacl {{ test_dir | quote }}" + register: getfacl_output + +- name: verify output + assert: + that: + - output is changed + - output is not failed + - "'user::rwx' in getfacl_output.stdout_lines" + - "'group::r-x' in getfacl_output.stdout_lines" + - "'other::r-x' in getfacl_output.stdout_lines" + - "'default:user::rwx' in getfacl_output.stdout_lines" + - "'default:user:{{ test_user }}:rw-' in getfacl_output.stdout_lines" + - "'default:group::r-x' in getfacl_output.stdout_lines" + - "'default:mask::rwx' in getfacl_output.stdout_lines" + - "'default:other::r-x' in getfacl_output.stdout_lines" + - "'default:group:{{ test_group }}:rw-' not in getfacl_output.stdout_lines" diff --git a/tests/integration/targets/acl/tasks/main.yml b/tests/integration/targets/acl/tasks/main.yml new file mode 100644 index 0000000..84af286 --- /dev/null +++ b/tests/integration/targets/acl/tasks/main.yml @@ -0,0 +1,36 @@ +# (c) 2017, Martin Krizek + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- block: + + - include: acl.yml + when: ansible_system == 'Linux' # TODO enable acls mount option on FreeBSD to test it there too + + always: + - name: delete created directory and file + file: + path: '{{ item }}' + state: absent + with_items: + - '{{ test_dir }}' + - '{{ test_file }}' + + vars: + test_user: ansible_user + test_group: ansible_group + test_file: '{{ output_dir }}/ansible file' + test_dir: "{{ output_dir }}/ansible_dir/with some space" diff --git a/tests/integration/targets/at/aliases b/tests/integration/targets/at/aliases new file mode 100644 index 0000000..6eae8bd --- /dev/null +++ b/tests/integration/targets/at/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +destructive diff --git a/tests/integration/targets/at/meta/main.yml b/tests/integration/targets/at/meta/main.yml new file mode 100644 index 0000000..07faa21 --- /dev/null +++ b/tests/integration/targets/at/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/tests/integration/targets/at/tasks/main.yml b/tests/integration/targets/at/tasks/main.yml new file mode 100644 index 0000000..cd09e11 --- /dev/null +++ b/tests/integration/targets/at/tasks/main.yml @@ -0,0 +1,62 @@ +# Test code for the at module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- set_fact: output_dir_test={{output_dir}}/at + +- name: make sure our testing sub-directory does not exist + file: path="{{ output_dir_test }}" state=absent + +- name: create our testing sub-directory + file: path="{{ output_dir_test }}" state=directory + +## +## at +## + +- name: define distros to attempt installing at on + set_fact: + package_distros: + - RedHat + - CentOS + - ScientificLinux + - Fedora + - Ubuntu + - Debian + - openSUSE Leap + +- name: ensure at is installed + package: + name: at + state: present + when: ansible_distribution in package_distros + +- name: run the first example + at: + command: "ls -d / > /dev/null" + count: 20 + units: minutes + register: at_test0 +- debug: var=at_test0 +- name: validate results + assert: + that: + - 'at_test0.changed is defined' + - 'at_test0.count is defined' + - 'at_test0.script_file is defined' + - 'at_test0.state is defined' + - 'at_test0.units is defined' diff --git a/tests/integration/targets/authorized_key/aliases b/tests/integration/targets/authorized_key/aliases new file mode 100644 index 0000000..9af2d79 --- /dev/null +++ b/tests/integration/targets/authorized_key/aliases @@ -0,0 +1,2 @@ +needs/root +shippable/posix/group2 diff --git a/tests/integration/targets/authorized_key/defaults/main.yml b/tests/integration/targets/authorized_key/defaults/main.yml new file mode 100644 index 0000000..d99e298 --- /dev/null +++ b/tests/integration/targets/authorized_key/defaults/main.yml @@ -0,0 +1,36 @@ +dss_key_basic: ssh-dss DATA_BASIC root@testing +dss_key_unquoted_option: idle-timeout=5m ssh-dss DATA_UNQUOTED_OPTION root@testing +dss_key_command: command="/bin/true" ssh-dss DATA_COMMAND root@testing +dss_key_complex_command: command="echo foo 'bar baz'" ssh-dss DATA_COMPLEX_COMMAND root@testing +dss_key_command_single_option: no-port-forwarding,command="/bin/true" ssh-dss DATA_COMMAND_SINGLE_OPTIONS root@testing +dss_key_command_multiple_options: no-port-forwarding,idle-timeout=5m,command="/bin/true" ssh-dss DATA_COMMAND_MULTIPLE_OPTIONS root@testing +dss_key_trailing: ssh-dss DATA_TRAILING root@testing foo bar baz +rsa_key_basic: ssh-rsa DATA_BASIC root@testing + +multiple_key_base: | + ssh-rsa DATA_BASIC 1@testing + ssh-dss DATA_TRAILING 2@testing foo bar baz + ssh-dss DATA_TRAILING 3@testing foo bar baz + ecdsa-sha2-nistp521 ECDSA_DATA 4@testing + +multiple_key_different_order: | + ssh-dss DATA_TRAILING 2@testing foo bar baz + ssh-dss DATA_TRAILING 3@testing foo bar baz + ssh-rsa DATA_BASIC 1@testing + ecdsa-sha2-nistp521 ECDSA_DATA 4@testing + +multiple_key_different_order_2: | + ssh-dss DATA_TRAILING 2@testing foo bar baz + ssh-rsa WHATEVER 2.5@testing + ssh-dss DATA_TRAILING 3@testing foo bar baz + ssh-rsa DATA_BASIC 1@testing + ecdsa-sha2-nistp521 ECDSA_DATA 4@testing + +multiple_key_exclusive: | + ssh-rsa DATA_BASIC 1@testing + ecdsa-sha2-nistp521 ECDSA_DATA 4@testing + +multiple_keys_comments: | + ssh-rsa DATA_BASIC 1@testing + # I like adding comments yo-dude-this-is-not-a-key INVALID_DATA 2@testing + ecdsa-sha2-nistp521 ECDSA_DATA 4@testing diff --git a/tests/integration/targets/authorized_key/files/existing_authorized_keys b/tests/integration/targets/authorized_key/files/existing_authorized_keys new file mode 100644 index 0000000..d480f8a --- /dev/null +++ b/tests/integration/targets/authorized_key/files/existing_authorized_keys @@ -0,0 +1,5 @@ +# I like candy +ssh-rsa somekeydata somekeyalias +# It is a very pleasant temperature outside today. +ssh-rsa otherkeydata otherkeyalias + diff --git a/tests/integration/targets/authorized_key/meta/main.yml b/tests/integration/targets/authorized_key/meta/main.yml new file mode 100644 index 0000000..07faa21 --- /dev/null +++ b/tests/integration/targets/authorized_key/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/tests/integration/targets/authorized_key/tasks/main.yml b/tests/integration/targets/authorized_key/tasks/main.yml new file mode 100644 index 0000000..1453987 --- /dev/null +++ b/tests/integration/targets/authorized_key/tasks/main.yml @@ -0,0 +1,485 @@ +# test code for the authorized_key module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# ------------------------------------------------------------- +# Setup steps + + +- name: copy an existing file in place with comments + copy: + src: existing_authorized_keys + dest: "{{ output_dir | expanduser }}/authorized_keys" + +- name: add multiple keys different order + authorized_key: + user: root + key: "{{ multiple_key_different_order_2 }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: get the file content + shell: cat "{{ output_dir | expanduser }}/authorized_keys" + changed_when: no + register: multiple_keys_existing + +- name: assert that the key was added and comments and ordering preserved + assert: + that: + - 'result.changed == True' + - '"# I like candy" in multiple_keys_existing.stdout' + - '"# I like candy" in multiple_keys_existing.stdout_lines[0]' + - '"ssh-rsa DATA_BASIC 1@testing" in multiple_keys_existing.stdout' + # The specific index is a little fragile, but I want to verify the line shows up + # as the 3rd line in the new entries after the existing entries and comments are preserved + - '"ssh-rsa DATA_BASIC 1@testing" in multiple_keys_existing.stdout_lines[7]' + +# start afresh + +- name: remove file foo.txt + file: + path: "{{ output_dir | expanduser }}/authorized_keys" + state: absent + +- name: touch the authorized_keys file + file: + dest: "{{ output_dir }}/authorized_keys" + state: touch + register: result + +- name: assert that the authorized_keys file was created + assert: + that: + - 'result.changed == True' + - 'result.state == "file"' + +- name: add multiple keys + authorized_key: + user: root + key: "{{ multiple_key_base }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - 'result.changed == True' + - 'result.key == multiple_key_base' + - 'result.key_options == None' + +- name: add multiple keys different order + authorized_key: + user: root + key: "{{ multiple_key_different_order }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - 'result.changed == True' + - 'result.key == multiple_key_different_order' + - 'result.key_options == None' + +- name: add multiple keys exclusive + authorized_key: + user: root + key: "{{ multiple_key_exclusive }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + exclusive: true + register: result + +- name: assert that the key was added + assert: + that: + - 'result.changed == True' + - 'result.key == multiple_key_exclusive' + - 'result.key_options == None' + +- name: add multiple keys in different calls + authorized_key: + user: root + key: "ecdsa-sha2-nistp521 ECDSA_DATA 4@testing" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: add multiple keys in different calls + authorized_key: + user: root + key: "ssh-rsa DATA_BASIC 1@testing" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: get the file content + shell: cat "{{ output_dir | expanduser }}/authorized_keys" + changed_when: no + register: multiple_keys_at_a_time + +- name: assert that the key was added + assert: + that: + - 'result.changed == false' + - 'multiple_keys_at_a_time.stdout == multiple_key_exclusive.strip()' + +- name: add multiple keys comment + authorized_key: + user: root + key: "{{ multiple_keys_comments }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + exclusive: true + register: result + +- name: get the file content + shell: cat "{{ output_dir | expanduser }}/authorized_keys" + changed_when: no + register: multiple_keys_comments + +- name: assert that the keys exist and comment only lines were not added + assert: + that: + - 'result.changed == False' + - 'multiple_keys_comments.stdout == multiple_key_exclusive.strip()' + - 'result.key_options == None' + + + +# ------------------------------------------------------------- +# basic ssh-dss key + +- name: add basic ssh-dss key + authorized_key: user=root key="{{ dss_key_basic }}" state=present path="{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - 'result.changed == True' + - 'result.key == dss_key_basic' + - 'result.key_options == None' + +- name: re-add basic ssh-dss key + authorized_key: user=root key="{{ dss_key_basic }}" state=present path="{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - 'result.changed == False' + +# ------------------------------------------------------------- +# ssh-dss key with an unquoted option + +- name: add ssh-dss key with an unquoted option + authorized_key: + user: root + key: "{{ dss_key_unquoted_option }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - 'result.changed == True' + - 'result.key == dss_key_unquoted_option' + - 'result.key_options == None' + +- name: re-add ssh-dss key with an unquoted option + authorized_key: + user: root + key: "{{ dss_key_unquoted_option }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - 'result.changed == False' + +# ------------------------------------------------------------- +# ssh-dss key with a leading command="/bin/foo" + +- name: add ssh-dss key with a leading command + authorized_key: + user: root + key: "{{ dss_key_command }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - 'result.changed == True' + - 'result.key == dss_key_command' + - 'result.key_options == None' + +- name: re-add ssh-dss key with a leading command + authorized_key: + user: root + key: "{{ dss_key_command }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - 'result.changed == False' + +# ------------------------------------------------------------- +# ssh-dss key with a complex quoted leading command +# ie. command="/bin/echo foo 'bar baz'" + +- name: add ssh-dss key with a complex quoted leading command + authorized_key: + user: root + key: "{{ dss_key_complex_command }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - 'result.changed == True' + - 'result.key == dss_key_complex_command' + - 'result.key_options == None' + +- name: re-add ssh-dss key with a complex quoted leading command + authorized_key: + user: root + key: "{{ dss_key_complex_command }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - 'result.changed == False' + +# ------------------------------------------------------------- +# ssh-dss key with a command and a single option, which are +# in a comma-separated list + +- name: add ssh-dss key with a command and a single option + authorized_key: + user: root + key: "{{ dss_key_command_single_option }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - 'result.changed == True' + - 'result.key == dss_key_command_single_option' + - 'result.key_options == None' + +- name: re-add ssh-dss key with a command and a single option + authorized_key: + user: root + key: "{{ dss_key_command_single_option }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - 'result.changed == False' + +# ------------------------------------------------------------- +# ssh-dss key with a command and multiple other options + +- name: add ssh-dss key with a command and multiple options + authorized_key: + user: root + key: "{{ dss_key_command_multiple_options }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - 'result.changed == True' + - 'result.key == dss_key_command_multiple_options' + - 'result.key_options == None' + +- name: re-add ssh-dss key with a command and multiple options + authorized_key: + user: root + key: "{{ dss_key_command_multiple_options }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - 'result.changed == False' + +# ------------------------------------------------------------- +# ssh-dss key with multiple trailing parts, which are space- +# separated and not quoted in any way + +- name: add ssh-dss key with trailing parts + authorized_key: + user: root + key: "{{ dss_key_trailing }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - 'result.changed == True' + - 'result.key == dss_key_trailing' + - 'result.key_options == None' + +- name: re-add ssh-dss key with trailing parts + authorized_key: + user: root + key: "{{ dss_key_trailing }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - 'result.changed == False' + +# ------------------------------------------------------------- +# basic ssh-dss key with mutliple permit-open options +# https://github.com/ansible/ansible-modules-core/issues/1715 + +- name: add basic ssh-dss key with multi-opts + authorized_key: + user: root + key: "{{ dss_key_basic }}" + key_options: 'no-agent-forwarding,no-X11-forwarding,permitopen="10.9.8.1:8080",permitopen="10.9.8.1:9001"' + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: assert that the key with multi-opts was added + assert: + that: + - 'result.changed == True' + - 'result.key == dss_key_basic' + - 'result.key_options == "no-agent-forwarding,no-X11-forwarding,permitopen=\"10.9.8.1:8080\",permitopen=\"10.9.8.1:9001\""' + +- name: get the file content + shell: cat "{{ output_dir | expanduser }}/authorized_keys" | fgrep DATA_BASIC + changed_when: no + register: content + +- name: validate content + assert: + that: + - 'content.stdout == "no-agent-forwarding,no-X11-forwarding,permitopen=\"10.9.8.1:8080\",permitopen=\"10.9.8.1:9001\" ssh-dss DATA_BASIC root@testing"' + +# ------------------------------------------------------------- +# check mode + +- name: copy an existing file in place with comments + copy: + src: existing_authorized_keys + dest: "{{ output_dir | expanduser }}/authorized_keys" + +- authorized_key: + user: root + key: "{{ multiple_key_different_order_2 }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + check_mode: True + register: result + +- name: assert that the file was not changed + copy: + src: existing_authorized_keys + dest: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- assert: + that: + - 'result.changed == False' + +# ------------------------------------------------------------- +# comments + +- name: Add rsa key with existing comment + authorized_key: + user: root + key: "{{ rsa_key_basic }}" + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: Change the comment on an existing key + authorized_key: + user: root + key: "{{ rsa_key_basic }}" + comment: user@acme.com + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: get the file content + shell: cat "{{ output_dir | expanduser }}/authorized_keys" | fgrep DATA_BASIC + changed_when: no + register: content + +- name: Assert that comment on an existing key was changed + assert: + that: + - "'user@acme.com' in content.stdout" + +- name: Set the same key with comment to ensure no changes are reported + authorized_key: + user: root + key: "{{ rsa_key_basic }}" + comment: user@acme.com + state: present + path: "{{ output_dir | expanduser }}/authorized_keys" + register: result + +- name: Assert that no changes were made when running again + assert: + that: + - not result.changed + +- debug: + var: "{{ item }}" + verbosity: 1 + with_items: + - result + - content diff --git a/tests/integration/targets/mount/aliases b/tests/integration/targets/mount/aliases new file mode 100644 index 0000000..77b5220 --- /dev/null +++ b/tests/integration/targets/mount/aliases @@ -0,0 +1,4 @@ +needs/privileged +needs/root +shippable/posix/group2 +skip/aix diff --git a/tests/integration/targets/mount/tasks/main.yml b/tests/integration/targets/mount/tasks/main.yml new file mode 100644 index 0000000..9145078 --- /dev/null +++ b/tests/integration/targets/mount/tasks/main.yml @@ -0,0 +1,280 @@ +- name: Create the mount point + file: + state: directory + path: '{{ output_dir }}/mount_dest' +- name: Create a directory to bind mount + file: + state: directory + path: '{{ output_dir }}/mount_source' +- name: Put something in the directory so we see that it worked + copy: + content: 'Testing + + ' + dest: '{{ output_dir }}/mount_source/test_file' + register: orig_info +- name: Bind mount a filesystem (Linux) + mount: + src: '{{ output_dir }}/mount_source' + name: '{{ output_dir }}/mount_dest' + state: mounted + fstype: None + opts: bind + when: ansible_system == 'Linux' + register: bind_result_linux +- name: Bind mount a filesystem (FreeBSD) + mount: + src: '{{ output_dir }}/mount_source' + name: '{{ output_dir }}/mount_dest' + state: mounted + fstype: nullfs + when: ansible_system == 'FreeBSD' + register: bind_result_freebsd +- name: get checksum for bind mounted file + stat: + path: '{{ output_dir }}/mount_dest/test_file' + when: ansible_system in ('FreeBSD', 'Linux') + register: dest_stat +- name: assert the bind mount was successful + assert: + that: + - (ansible_system == 'Linux' and bind_result_linux['changed']) or (ansible_system == 'FreeBSD' and bind_result_freebsd['changed']) + - dest_stat['stat']['exists'] + - orig_info['checksum'] == dest_stat['stat']['checksum'] + when: ansible_system in ('FreeBSD', 'Linux') +- name: Bind mount a filesystem (Linux) + mount: + src: '{{ output_dir }}/mount_source' + name: '{{ output_dir }}/mount_dest' + state: mounted + fstype: None + opts: bind + when: ansible_system == 'Linux' + register: bind_result_linux +- name: Bind mount a filesystem (FreeBSD) + mount: + src: '{{ output_dir }}/mount_source' + name: '{{ output_dir }}/mount_dest' + state: mounted + fstype: nullfs + when: ansible_system == 'FreeBSD' + register: bind_result_freebsd +- name: Make sure we didn't mount a second time + assert: + that: + - (ansible_system == 'Linux' and not bind_result_linux['changed']) or (ansible_system == 'FreeBSD' and not bind_result_freebsd['changed']) + when: ansible_system in ('FreeBSD', 'Linux') +- name: Remount filesystem with different opts (Linux) + mount: + src: '{{ output_dir }}/mount_source' + name: '{{ output_dir }}/mount_dest' + state: mounted + fstype: None + opts: bind,ro + when: ansible_system == 'Linux' + register: bind_result_linux +- name: Remount filesystem with different opts (FreeBSD) + mount: + src: '{{ output_dir }}/mount_source' + name: '{{ output_dir }}/mount_dest' + state: mounted + fstype: nullfs + opts: ro + when: ansible_system == 'FreeBSD' + register: bind_result_freebsd +- name: Get mount options + shell: mount | grep mount_dest | grep -E -w '(ro|read-only)' | wc -l + register: remount_options +- name: Make sure the filesystem now has the new opts + assert: + that: + - (ansible_system == 'Linux' and bind_result_linux['changed']) or (ansible_system == 'FreeBSD' and bind_result_freebsd['changed']) + - '''1'' in remount_options.stdout' + - 1 == remount_options.stdout_lines | length + when: ansible_system in ('FreeBSD', 'Linux') +- name: Unmount the bind mount + mount: + name: '{{ output_dir }}/mount_dest' + state: absent + when: ansible_system in ('Linux', 'FreeBSD') + register: unmount_result +- name: Make sure the file no longer exists in dest + stat: + path: '{{ output_dir }}/mount_dest/test_file' + when: ansible_system in ('FreeBSD', 'Linux') + register: dest_stat +- name: Check that we unmounted + assert: + that: + - unmount_result['changed'] + - not dest_stat['stat']['exists'] + when: ansible_system in ('FreeBSD', 'Linux') +- name: Create fstab record for the first swap file + mount: + name: none + src: /tmp/swap1 + opts: sw + fstype: swap + state: present + register: swap1_created + when: ansible_system in ('Linux') +- name: Try to create fstab record for the first swap file again + mount: + name: none + src: /tmp/swap1 + opts: sw + fstype: swap + state: present + register: swap1_created_again + when: ansible_system in ('Linux') +- name: Check that we created the swap1 record + assert: + that: + - swap1_created['changed'] + - not swap1_created_again['changed'] + when: ansible_system in ('Linux') +- name: Create fstab record for the second swap file + mount: + name: none + src: /tmp/swap2 + opts: sw + fstype: swap + state: present + register: swap2_created + when: ansible_system in ('Linux') +- name: Try to create fstab record for the second swap file again + mount: + name: none + src: /tmp/swap1 + opts: sw + fstype: swap + state: present + register: swap2_created_again + when: ansible_system in ('Linux') +- name: Check that we created the swap2 record + assert: + that: + - swap2_created['changed'] + - not swap2_created_again['changed'] + when: ansible_system in ('Linux') +- name: Remove the fstab record for the first swap file + mount: + name: none + src: /tmp/swap1 + state: absent + register: swap1_removed + when: ansible_system in ('Linux') +- name: Try to remove the fstab record for the first swap file again + mount: + name: none + src: /tmp/swap1 + state: absent + register: swap1_removed_again + when: ansible_system in ('Linux') +- name: Check that we removed the swap1 record + assert: + that: + - swap1_removed['changed'] + - not swap1_removed_again['changed'] + when: ansible_system in ('Linux') +- name: Remove the fstab record for the second swap file + mount: + name: none + src: /tmp/swap2 + state: absent + register: swap2_removed + when: ansible_system in ('Linux') +- name: Try to remove the fstab record for the second swap file again + mount: + name: none + src: /tmp/swap2 + state: absent + register: swap2_removed_again + when: ansible_system in ('Linux') +- name: Check that we removed the swap2 record + assert: + that: + - swap2_removed['changed'] + - not swap2_removed_again['changed'] + when: ansible_system in ('Linux') +- name: Create fstab record with missing last two fields + copy: + dest: /etc/fstab + content: '//nas/photo /home/jik/pictures cifs defaults,credentials=/etc/security/nas.creds,uid=jik,gid=users,forceuid,forcegid,noserverino,_netdev + + ' + when: ansible_system in ('Linux') +- name: Try to change the fstab record with the missing last two fields + mount: + src: //nas/photo + path: /home/jik/pictures + fstype: cifs + opts: defaults,credentials=/etc/security/nas.creds,uid=jik,gid=users,forceuid,forcegid,noserverino,_netdev,x-systemd.mount-timeout=0 + state: present + register: optional_fields_update + when: ansible_system in ('Linux') +- name: Get the content of the fstab file + shell: cat /etc/fstab + register: optional_fields_content + when: ansible_system in ('Linux') +- name: Check if the line containing the missing last two fields was changed + assert: + that: + - optional_fields_update['changed'] + - ''' 0 0'' in optional_fields_content.stdout' + - 1 == optional_fields_content.stdout_lines | length + when: ansible_system in ('Linux') +- name: Block to test remounted option + block: + - name: Create empty file + command: dd if=/dev/zero of=/tmp/myfs.img bs=1048576 count=20 + when: ansible_system in ('Linux') + - name: Format FS + when: ansible_system in ('Linux') + community.general.filesystem: + fstype: ext3 + dev: /tmp/myfs.img + - name: Mount the FS for the first time + mount: + path: /tmp/myfs + src: /tmp/myfs.img + fstype: ext2 + state: mounted + when: ansible_system in ('Linux') + - name: Get the last write time + shell: 'dumpe2fs /tmp/myfs.img 2>/dev/null | grep -i last write time: |cut -d: -f2-' + register: last_write_time + when: ansible_system in ('Linux') + - name: Wait 2 second + pause: + seconds: 2 + when: ansible_system in ('Linux') + - name: Test if the FS is remounted + mount: + path: /tmp/myfs + state: remounted + when: ansible_system in ('Linux') + - name: Get again the last write time + shell: 'dumpe2fs /tmp/myfs.img 2>/dev/null | grep -i last write time: |cut -d: -f2-' + register: last_write_time2 + when: ansible_system in ('Linux') + - name: Fail if they are the same + fail: + msg: Filesytem was not remounted, testing of the module failed! + when: last_write is defined and last_write_time2 is defined and last_write_time.stdout == last_write_time2.stdout and ansible_system in ('Linux') + always: + - name: Umount the test FS + mount: + path: /tmp/myfs + src: /tmp/myfs.img + opts: loop + state: absent + when: ansible_system in ('Linux') + - name: Remove the test FS + file: + path: '{{ item }}' + state: absent + loop: + - /tmp/myfs.img + - /tmp/myfs + when: ansible_system in ('Linux') diff --git a/tests/integration/targets/patch/aliases b/tests/integration/targets/patch/aliases new file mode 100644 index 0000000..9a80b36 --- /dev/null +++ b/tests/integration/targets/patch/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group2 +skip/aix diff --git a/tests/integration/targets/patch/files/origin.txt b/tests/integration/targets/patch/files/origin.txt new file mode 100644 index 0000000..0ef3d73 --- /dev/null +++ b/tests/integration/targets/patch/files/origin.txt @@ -0,0 +1,19 @@ +Stet clita kasd gubergren,no sea takimata sanctus est Lorem ipsum dolor +sit amet. + +Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod +tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At +vero eos et accusam et justo duo dolores et ea rebum. + +Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor +sit amet. + +Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod +tempor invidunt ut labore et dolore magna aliquyam erat. + +Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod +tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At +vero eos et accusam et justo duo dolores et ea rebum. + +Stet clita kasd gubergren,no sea takimata sanctus est Lorem ipsum dolor +sit amet. diff --git a/tests/integration/targets/patch/files/result.patch b/tests/integration/targets/patch/files/result.patch new file mode 100644 index 0000000..d672b1e --- /dev/null +++ b/tests/integration/targets/patch/files/result.patch @@ -0,0 +1,24 @@ +--- origin.txt 2018-05-12 10:22:14.155109584 +0200 ++++ result.txt 2018-05-12 10:18:07.230811204 +0200 +@@ -2,18 +2,12 @@ + sit amet. + + Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod +-tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At +-vero eos et accusam et justo duo dolores et ea rebum. +- +-Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor +-sit amet. +- +-Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod +-tempor invidunt ut labore et dolore magna aliquyam erat. ++tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. ++At vero eos et accusam et justo duo dolores et ea rebum. + + Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod + tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At + vero eos et accusam et justo duo dolores et ea rebum. + +-Stet clita kasd gubergren,no sea takimata sanctus est Lorem ipsum dolor ++Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor + sit amet. diff --git a/tests/integration/targets/patch/files/result.txt b/tests/integration/targets/patch/files/result.txt new file mode 100644 index 0000000..ec40e3b --- /dev/null +++ b/tests/integration/targets/patch/files/result.txt @@ -0,0 +1,13 @@ +Stet clita kasd gubergren,no sea takimata sanctus est Lorem ipsum dolor +sit amet. + +Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod +tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. +At vero eos et accusam et justo duo dolores et ea rebum. + +Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod +tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At +vero eos et accusam et justo duo dolores et ea rebum. + +Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor +sit amet. diff --git a/tests/integration/targets/patch/meta/main.yml b/tests/integration/targets/patch/meta/main.yml new file mode 100644 index 0000000..8828391 --- /dev/null +++ b/tests/integration/targets/patch/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_tests diff --git a/tests/integration/targets/patch/tasks/main.yml b/tests/integration/targets/patch/tasks/main.yml new file mode 100644 index 0000000..c8533c1 --- /dev/null +++ b/tests/integration/targets/patch/tasks/main.yml @@ -0,0 +1,89 @@ +- name: ensure idempotency installed + package: + name: patch + when: ansible_distribution != "MacOSX" +- name: create a directory for the result + file: + dest: '{{ output_dir }}/patch' + state: directory + register: result +- name: assert the directory was created + assert: + that: + - result.state == 'directory' +- name: copy the origin file + copy: + src: ./origin.txt + dest: '{{ output_dir }}/patch/workfile.txt' + register: result +- name: patch the origin file in check mode + check_mode: true + register: result + community.general.patch: + src: result.patch + dest: '{{ output_dir }}/patch/workfile.txt' +- name: verify patch the origin file in check mode + assert: + that: + - result is changed +- name: patch the origin file + register: result + community.general.patch: + src: result.patch + dest: '{{ output_dir }}/patch/workfile.txt' +- name: verify patch the origin file + assert: + that: + - result is changed +- name: test patch the origin file idempotency + register: result + community.general.patch: + src: result.patch + dest: '{{ output_dir }}/patch/workfile.txt' +- name: verify test patch the origin file idempotency + assert: + that: + - result is not changed +- name: verify the resulted file matches expectations + copy: + src: ./result.txt + dest: '{{ output_dir }}/patch/workfile.txt' + register: result + failed_when: result is changed +- name: patch the workfile file in check mode state absent + check_mode: true + register: result + community.general.patch: + src: result.patch + dest: '{{ output_dir }}/patch/workfile.txt' + state: absent +- name: verify patch the workfile file in check mode state absent + assert: + that: + - result is changed +- name: patch the workfile file state absent + register: result + community.general.patch: + src: result.patch + dest: '{{ output_dir }}/patch/workfile.txt' + state: absent +- name: verify patch the workfile file state absent + assert: + that: + - result is changed +- name: patch the workfile file state absent idempotency + register: result + community.general.patch: + src: result.patch + dest: '{{ output_dir }}/patch/workfile.txt' + state: absent +- name: verify patch the workfile file state absent idempotency + assert: + that: + - result is not changed +- name: verify the resulted file matches expectations + copy: + src: ./origin.txt + dest: '{{ output_dir }}/patch/workfile.txt' + register: result + failed_when: result is changed diff --git a/tests/integration/targets/prepare_tests/tasks/main.yml b/tests/integration/targets/prepare_tests/tasks/main.yml new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/targets/seboolean/aliases b/tests/integration/targets/seboolean/aliases new file mode 100644 index 0000000..58a2a31 --- /dev/null +++ b/tests/integration/targets/seboolean/aliases @@ -0,0 +1,3 @@ +needs/root +shippable/posix/group2 +skip/aix diff --git a/tests/integration/targets/seboolean/tasks/main.yml b/tests/integration/targets/seboolean/tasks/main.yml new file mode 100644 index 0000000..df21691 --- /dev/null +++ b/tests/integration/targets/seboolean/tasks/main.yml @@ -0,0 +1,22 @@ +# (c) 2017, Martin Krizek + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- include: seboolean.yml + when: + - ansible_selinux is defined + - ansible_selinux != False + - ansible_selinux.status == 'enabled' diff --git a/tests/integration/targets/seboolean/tasks/seboolean.yml b/tests/integration/targets/seboolean/tasks/seboolean.yml new file mode 100644 index 0000000..252ee3d --- /dev/null +++ b/tests/integration/targets/seboolean/tasks/seboolean.yml @@ -0,0 +1,86 @@ +# (c) 2017, Martin Krizek + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: install requirements for RHEL 7 and earlier + package: + name: policycoreutils-python + when: + - ansible_distribution == 'RedHat' and ansible_distribution_major_version is version('7', '<=') + +- name: install requirements for RHEL 8 and later + package: + name: policycoreutils-python-utils + when: + - ansible_distribution == 'RedHat' and ansible_distribution_major_version is version('8', '>=') + +- name: Cleanup + shell: setsebool -P httpd_can_network_connect 0 +########################################################################################## +- name: set flag and don't keep it persistent + seboolean: + name: httpd_can_network_connect + state: yes + register: output + +- name: get getsebool output + shell: semanage boolean -l | grep 'httpd_can_network_connect\W' + register: getsebool_output + +- name: check output + assert: + that: + - output is changed + - output is not failed + - output.name == 'httpd_can_network_connect' + - getsebool_output.stdout.startswith('httpd_can_network_connect (on , off)') +########################################################################################## +- name: unset flag + seboolean: + name: httpd_can_network_connect + state: no + +- name: get getsebool output + shell: semanage boolean -l | grep 'httpd_can_network_connect\W' + register: getsebool_output + +- name: check output + assert: + that: + - output is changed + - output is not failed + - output.name == 'httpd_can_network_connect' + - getsebool_output.stdout.startswith('httpd_can_network_connect (off , off)') +########################################################################################## +- name: set flag and keep it persistent + seboolean: + name: httpd_can_network_connect + state: yes + persistent: yes + register: output + +- name: get getsebool output + shell: semanage boolean -l | grep 'httpd_can_network_connect\W' + register: getsebool_output + +- name: check output + assert: + that: + - output is changed + - output is not failed + - output.name == 'httpd_can_network_connect' + - getsebool_output.stdout.startswith('httpd_can_network_connect (on , on)') +########################################################################################## diff --git a/tests/integration/targets/selinux/aliases b/tests/integration/targets/selinux/aliases new file mode 100644 index 0000000..58a2a31 --- /dev/null +++ b/tests/integration/targets/selinux/aliases @@ -0,0 +1,3 @@ +needs/root +shippable/posix/group2 +skip/aix diff --git a/tests/integration/targets/selinux/tasks/main.yml b/tests/integration/targets/selinux/tasks/main.yml new file mode 100644 index 0000000..41fdca5 --- /dev/null +++ b/tests/integration/targets/selinux/tasks/main.yml @@ -0,0 +1,36 @@ +# (c) 2017, Sam Doran + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- debug: + msg: SELinux is disabled + when: ansible_selinux is defined and ansible_selinux == False + +- debug: + msg: SELinux is {{ ansible_selinux.status }} + when: ansible_selinux is defined and ansible_selinux != False + +- include: selinux.yml + when: + - ansible_selinux is defined + - ansible_selinux != False + - ansible_selinux.status == 'enabled' + +- include: selogin.yml + when: + - ansible_selinux is defined + - ansible_selinux != False + - ansible_selinux.status == 'enabled' diff --git a/tests/integration/targets/selinux/tasks/selinux.yml b/tests/integration/targets/selinux/tasks/selinux.yml new file mode 100644 index 0000000..7fcba89 --- /dev/null +++ b/tests/integration/targets/selinux/tasks/selinux.yml @@ -0,0 +1,364 @@ +# (c) 2017, Sam Doran + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# First Test +# ############################################################################## +# Test changing the state, which requires a reboot + +- name: TEST 1 | Get current SELinux config file contents + set_fact: + selinux_config_original: "{{ lookup('file', '/etc/sysconfig/selinux').split('\n') }}" + before_test_sestatus: "{{ ansible_selinux }}" + +- debug: + var: "{{ item }}" + verbosity: 1 + with_items: + - selinux_config_original + - before_test_sestatus + - ansible_selinux + +- name: TEST 1 | Setup SELinux configuration for tests + selinux: + state: enforcing + policy: targeted + +- name: TEST 1 | Disable SELinux + selinux: + state: disabled + policy: targeted + register: _disable_test1 + +- debug: + var: _disable_test1 + verbosity: 1 + +- name: TEST 1 | Re-gather facts + setup: + +- name: TEST 1 | Assert that status was changed, reboot_required is True, a warning was displayed, and SELinux is configured properly + assert: + that: + - _disable_test1 is changed + - _disable_test1.reboot_required + - (_disable_test1.warnings | length ) >= 1 + - ansible_selinux.config_mode == 'disabled' + - ansible_selinux.type == 'targeted' + +- debug: + var: ansible_selinux + verbosity: 1 + +- name: TEST 1 | Disable SELinux again + selinux: + state: disabled + policy: targeted + register: _disable_test2 + +- debug: + var: _disable_test2 + verbosity: 1 + +- name: TEST 1 | Assert that no change is reported, a warnking was dispalyed, and reboot_required is True + assert: + that: + - _disable_test2 is not changed + - (_disable_test1.warnings | length ) >= 1 + - _disable_test2.reboot_required + +- name: TEST 1 | Get modified config file + set_fact: + selinux_config_after: "{{ lookup('file', '/etc/sysconfig/selinux').split('\n') }}" + +- debug: + var: selinux_config_after + verbosity: 1 + +- name: TEST 1 | Ensure SELinux config file is properly formatted + assert: + that: + - selinux_config_original | length == selinux_config_after | length + - selinux_config_after[selinux_config_after.index('SELINUX=disabled')] is search("^SELINUX=\w+$") + - selinux_config_after[selinux_config_after.index('SELINUXTYPE=targeted')] is search("^SELINUXTYPE=\w+$") + +- name: TEST 1 | Reset SELinux configuration for next test + selinux: + state: enforcing + policy: targeted + + +# Second Test +# ############################################################################## +# Test changing only the policy, which does not require a reboot + +- name: TEST 2 | Make sure the policy is present + package: + name: selinux-policy-mls + state: present + +- name: TEST 2 | Set SELinux policy + selinux: + state: enforcing + policy: mls + register: _state_test1 + +- debug: + var: _state_test1 + verbosity: 1 + +- name: TEST 2 | Re-gather facts + setup: + +- debug: + var: ansible_selinux + tags: debug + +- name: TEST 2 | Assert that status was changed, reboot_required is False, no warnings were displayed, and SELinux is configured properly + assert: + that: + - _state_test1 is changed + - not _state_test1.reboot_required + - _state_test1.warnings is not defined + - ansible_selinux.config_mode == 'enforcing' + - ansible_selinux.type == 'mls' + +- name: TEST 2 | Set SELinux policy again + selinux: + state: enforcing + policy: mls + register: _state_test2 + +- debug: + var: _state_test2 + verbosity: 1 + +- name: TEST 2 | Assert that no change was reported, no warnings were dispalyed, and reboot_required is False + assert: + that: + - _state_test2 is not changed + - _state_test2.warnings is not defined + - not _state_test2.reboot_required + +- name: TEST 2 | Get modified config file + set_fact: + selinux_config_after: "{{ lookup('file', '/etc/sysconfig/selinux').split('\n') }}" + +- debug: + var: selinux_config_after + verbosity: 1 + +- name: TEST 2 | Ensure SELinux config file is properly formatted + assert: + that: + - selinux_config_original | length == selinux_config_after | length + - selinux_config_after[selinux_config_after.index('SELINUX=enforcing')] is search("^SELINUX=\w+$") + - selinux_config_after[selinux_config_after.index('SELINUXTYPE=mls')] is search("^SELINUXTYPE=\w+$") + +- name: TEST 2 | Reset SELinux configuration for next test + selinux: + state: enforcing + policy: targeted + + +# Third Test +# ############################################################################## +# Test changing non-existing policy + +- name: TEST 3 | Set SELinux policy + selinux: + state: enforcing + policy: non-existing-selinux-policy + register: _state_test1 + ignore_errors: yes + +- debug: + var: _state_test1 + verbosity: 1 + +- name: TEST 3 | Re-gather facts + setup: + +- debug: + var: ansible_selinux + tags: debug + +- name: TEST 3 | Assert that status was not changed, the task failed, the msg contains proper information and SELinux was not changed + assert: + that: + - _state_test1 is not changed + - _state_test1 is failed + - _state_test1.msg == 'Policy non-existing-selinux-policy does not exist in /etc/selinux/' + - ansible_selinux.config_mode == 'enforcing' + - ansible_selinux.type == 'targeted' + + +# Fourth Test +# ############################################################################## +# Test if check mode returns correct changed values and +# doesn't make any changes + + +- name: TEST 4 | Set SELinux to enforcing + selinux: + state: enforcing + policy: targeted + register: _check_mode_test1 + +- debug: + var: _check_mode_test1 + verbosity: 1 + +- name: TEST 4 | Set SELinux to enforcing in check mode + selinux: + state: enforcing + policy: targeted + register: _check_mode_test1 + check_mode: yes + +- name: TEST 4 | Re-gather facts + setup: + +- debug: + var: ansible_selinux + verbosity: 1 + tags: debug + +- name: TEST 4 | Assert that check mode is idempotent + assert: + that: + - _check_mode_test1 is success + - not _check_mode_test1.reboot_required + - ansible_selinux.config_mode == 'enforcing' + - ansible_selinux.type == 'targeted' + +- name: TEST 4 | Set SELinux to permissive in check mode + selinux: + state: permissive + policy: targeted + register: _check_mode_test2 + check_mode: yes + +- name: TEST 4 | Re-gather facts + setup: + +- debug: + var: ansible_selinux + verbosity: 1 + tags: debug + +- name: TEST 4 | Assert that check mode doesn't set state permissive and returns changed + assert: + that: + - _check_mode_test2 is changed + - not _check_mode_test2.reboot_required + - ansible_selinux.config_mode == 'enforcing' + - ansible_selinux.type == 'targeted' + +- name: TEST 4 | Disable SELinux in check mode + selinux: + state: disabled + register: _check_mode_test3 + check_mode: yes + +- name: TEST 4 | Re-gather facts + setup: + +- debug: + var: ansible_selinux + verbosity: 1 + tags: debug + +- name: TEST 4 | Assert that check mode didn't change anything, status is changed, reboot_required is True, a warning was displayed + assert: + that: + - _check_mode_test3 is changed + - _check_mode_test3.reboot_required + - (_check_mode_test3.warnings | length ) >= 1 + - ansible_selinux.config_mode == 'enforcing' + - ansible_selinux.type == 'targeted' + +- name: TEST 4 | Set SELinux to permissive + selinux: + state: permissive + policy: targeted + register: _check_mode_test4 + +- debug: + var: _check_mode_test4 + verbosity: 1 + +- name: TEST 4 | Disable SELinux in check mode + selinux: + state: disabled + register: _check_mode_test4 + check_mode: yes + +- name: TEST 4 | Re-gather facts + setup: + +- debug: + var: ansible_selinux + verbosity: 1 + tags: debug + +- name: TEST 4 | Assert that check mode didn't change anything, status is changed, reboot_required is True, a warning was displayed + assert: + that: + - _check_mode_test4 is changed + - _check_mode_test4.reboot_required + - (_check_mode_test3.warnings | length ) >= 1 + - ansible_selinux.config_mode == 'permissive' + - ansible_selinux.type == 'targeted' + +- name: TEST 4 | Set SELinux to enforcing + selinux: + state: enforcing + policy: targeted + register: _check_mode_test5 + +- debug: + var: _check_mode_test5 + verbosity: 1 + +- name: TEST 4 | Disable SELinux + selinux: + state: disabled + register: _check_mode_test5 + +- name: TEST 4 | Disable SELinux in check mode + selinux: + state: disabled + register: _check_mode_test5 + check_mode: yes + +- name: TEST 4 | Re-gather facts + setup: + +- debug: + var: ansible_selinux + verbosity: 1 + tags: debug + +- name: TEST 4 | Assert that in check mode status was not changed, reboot_required is True, a warning was displayed, and SELinux is configured properly + assert: + that: + - _check_mode_test5 is success + - _check_mode_test5.reboot_required + - (_check_mode_test5.warnings | length ) >= 1 + - ansible_selinux.config_mode == 'disabled' + - ansible_selinux.type == 'targeted' diff --git a/tests/integration/targets/selinux/tasks/selogin.yml b/tests/integration/targets/selinux/tasks/selogin.yml new file mode 100644 index 0000000..f9986a6 --- /dev/null +++ b/tests/integration/targets/selinux/tasks/selogin.yml @@ -0,0 +1,70 @@ +- name: create user for testing + user: + name: seuser +- name: attempt to add mapping without 'seuser' + register: selogin_error + ignore_errors: true + community.general.selogin: + login: seuser +- name: verify failure + assert: + that: + - selogin_error is failed +- name: map login to SELinux user + register: selogin_new_mapping + check_mode: '{{ item }}' + with_items: + - true + - false + - true + - false + community.general.selogin: + login: seuser + seuser: staff_u +- name: new mapping- verify functionality and check_mode + assert: + that: + - selogin_new_mapping.results[0] is changed + - selogin_new_mapping.results[1] is changed + - selogin_new_mapping.results[2] is not changed + - selogin_new_mapping.results[3] is not changed +- name: change SELinux user login mapping + register: selogin_mod_mapping + check_mode: '{{ item }}' + with_items: + - true + - false + - true + - false + community.general.selogin: + login: seuser + seuser: user_u +- name: changed mapping- verify functionality and check_mode + assert: + that: + - selogin_mod_mapping.results[0] is changed + - selogin_mod_mapping.results[1] is changed + - selogin_mod_mapping.results[2] is not changed + - selogin_mod_mapping.results[3] is not changed +- name: remove SELinux user mapping + register: selogin_del_mapping + check_mode: '{{ item }}' + with_items: + - true + - false + - true + - false + community.general.selogin: + login: seuser + state: absent +- name: delete mapping- verify functionality and check_mode + assert: + that: + - selogin_del_mapping.results[0] is changed + - selogin_del_mapping.results[1] is changed + - selogin_del_mapping.results[2] is not changed + - selogin_del_mapping.results[3] is not changed +- name: remove test user + user: + name: seuser + state: absent diff --git a/tests/integration/targets/synchronize/aliases b/tests/integration/targets/synchronize/aliases new file mode 100644 index 0000000..765b70d --- /dev/null +++ b/tests/integration/targets/synchronize/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/tests/integration/targets/synchronize/files/bar.txt b/tests/integration/targets/synchronize/files/bar.txt new file mode 100644 index 0000000..3e96db9 --- /dev/null +++ b/tests/integration/targets/synchronize/files/bar.txt @@ -0,0 +1 @@ +templated_var_loaded diff --git a/tests/integration/targets/synchronize/files/foo.txt b/tests/integration/targets/synchronize/files/foo.txt new file mode 100644 index 0000000..3e96db9 --- /dev/null +++ b/tests/integration/targets/synchronize/files/foo.txt @@ -0,0 +1 @@ +templated_var_loaded diff --git a/tests/integration/targets/synchronize/meta/main.yml b/tests/integration/targets/synchronize/meta/main.yml new file mode 100644 index 0000000..07faa21 --- /dev/null +++ b/tests/integration/targets/synchronize/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/tests/integration/targets/synchronize/tasks/main.yml b/tests/integration/targets/synchronize/tasks/main.yml new file mode 100644 index 0000000..ac1aa03 --- /dev/null +++ b/tests/integration/targets/synchronize/tasks/main.yml @@ -0,0 +1,229 @@ +- name: install rsync + package: + name: rsync + when: ansible_distribution != "MacOSX" +- name: cleanup old files + shell: rm -rf {{output_dir}}/* +- name: create test new files + copy: dest={{output_dir}}/{{item}} mode=0644 content="hello world" + with_items: + - foo.txt + - bar.txt +- name: synchronize file to new filename + synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.result + register: sync_result +- assert: + that: + - '''changed'' in sync_result' + - sync_result.changed == true + - '''cmd'' in sync_result' + - '''rsync'' in sync_result.cmd' + - '''msg'' in sync_result' + - sync_result.msg.startswith('>f+') + - 'sync_result.msg.endswith(''+ foo.txt + + '')' +- name: test that the file was really copied over + stat: + path: '{{ output_dir }}/foo.result' + register: stat_result +- assert: + that: + - stat_result.stat.exists == True + - stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed' +- name: test that the file is not copied a second time + synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.result + register: sync_result +- assert: + that: + - sync_result.changed == False +- name: Cleanup + file: + state: absent + path: '{{output_dir}}/{{item}}' + with_items: + - foo.result + - bar.result +- name: Synchronize using the mode=push param + synchronize: + src: '{{output_dir}}/foo.txt' + dest: '{{output_dir}}/foo.result' + mode: push + register: sync_result +- assert: + that: + - '''changed'' in sync_result' + - sync_result.changed == true + - '''cmd'' in sync_result' + - '''rsync'' in sync_result.cmd' + - '''msg'' in sync_result' + - sync_result.msg.startswith('>f+') + - 'sync_result.msg.endswith(''+ foo.txt + + '')' +- name: test that the file was really copied over + stat: + path: '{{ output_dir }}/foo.result' + register: stat_result +- assert: + that: + - stat_result.stat.exists == True + - stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed' +- name: test that the file is not copied a second time + synchronize: + src: '{{output_dir}}/foo.txt' + dest: '{{output_dir}}/foo.result' + mode: push + register: sync_result +- assert: + that: + - sync_result.changed == False +- name: Cleanup + file: + state: absent + path: '{{output_dir}}/{{item}}' + with_items: + - foo.result + - bar.result +- name: Synchronize using the mode=pull param + synchronize: + src: '{{output_dir}}/foo.txt' + dest: '{{output_dir}}/foo.result' + mode: pull + register: sync_result +- assert: + that: + - '''changed'' in sync_result' + - sync_result.changed == true + - '''cmd'' in sync_result' + - '''rsync'' in sync_result.cmd' + - '''msg'' in sync_result' + - sync_result.msg.startswith('>f+') + - 'sync_result.msg.endswith(''+ foo.txt + + '')' +- name: test that the file was really copied over + stat: + path: '{{ output_dir }}/foo.result' + register: stat_result +- assert: + that: + - stat_result.stat.exists == True + - stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed' +- name: test that the file is not copied a second time + synchronize: + src: '{{output_dir}}/foo.txt' + dest: '{{output_dir}}/foo.result' + mode: pull + register: sync_result +- assert: + that: + - sync_result.changed == False +- name: Cleanup + file: + state: absent + path: '{{output_dir}}/{{item}}' + with_items: + - foo.result + - bar.result +- name: synchronize files using with_items (issue#5965) + synchronize: src={{output_dir}}/{{item}} dest={{output_dir}}/{{item}}.result + with_items: + - foo.txt + - bar.txt + register: sync_result +- assert: + that: + - sync_result.changed + - sync_result.msg == 'All items completed' + - '''results'' in sync_result' + - sync_result.results|length == 2 + - 'sync_result.results[0].msg.endswith(''+ foo.txt + + '')' + - 'sync_result.results[1].msg.endswith(''+ bar.txt + + '')' +- name: Cleanup + file: + state: absent + path: '{{output_dir}}/{{item}}.result' + with_items: + - foo.txt + - bar.txt +- name: synchronize files using rsync_path (issue#7182) + synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.rsync_path rsync_path="sudo rsync" + register: sync_result +- assert: + that: + - '''changed'' in sync_result' + - sync_result.changed == true + - '''cmd'' in sync_result' + - '''rsync'' in sync_result.cmd' + - '''rsync_path'' in sync_result.cmd' + - '''msg'' in sync_result' + - sync_result.msg.startswith('>f+') + - 'sync_result.msg.endswith(''+ foo.txt + + '')' +- name: Cleanup + file: + state: absent + path: '{{output_dir}}/{{item}}' + with_items: + - foo.rsync_path +- name: add subdirectories for link-dest test + file: + path: '{{output_dir}}/{{item}}/' + state: directory + mode: '0755' + with_items: + - directory_a + - directory_b +- name: copy foo.txt into the first directory + synchronize: + src: '{{output_dir}}/foo.txt' + dest: '{{output_dir}}/{{item}}/foo.txt' + with_items: + - directory_a +- name: synchronize files using link_dest + synchronize: + src: '{{output_dir}}/directory_a/foo.txt' + dest: '{{output_dir}}/directory_b/foo.txt' + link_dest: + - '{{output_dir}}/directory_a' + register: sync_result +- name: get stat information for directory_a + stat: + path: '{{ output_dir }}/directory_a/foo.txt' + register: stat_result_a +- name: get stat information for directory_b + stat: + path: '{{ output_dir }}/directory_b/foo.txt' + register: stat_result_b +- assert: + that: + - '''changed'' in sync_result' + - sync_result.changed == true + - stat_result_a.stat.inode == stat_result_b.stat.inode +- name: synchronize files using link_dest that would be recursive + synchronize: + src: '{{output_dir}}/foo.txt' + dest: '{{output_dir}}/foo.result' + link_dest: + - '{{output_dir}}' + register: sync_result + ignore_errors: true +- assert: + that: + - sync_result is not changed + - sync_result is failed +- name: Cleanup + file: + state: absent + path: '{{output_dir}}/{{item}}' + with_items: + - directory_b/foo.txt + - directory_a/foo.txt + - directory_a + - directory_b diff --git a/tests/integration/targets/sysctl/aliases b/tests/integration/targets/sysctl/aliases new file mode 100644 index 0000000..5959a80 --- /dev/null +++ b/tests/integration/targets/sysctl/aliases @@ -0,0 +1,4 @@ +shippable/posix/group1 +skip/aix +skip/freebsd +skip/osx diff --git a/tests/integration/targets/sysctl/files/sysctl.conf b/tests/integration/targets/sysctl/files/sysctl.conf new file mode 100644 index 0000000..59b0fb6 --- /dev/null +++ b/tests/integration/targets/sysctl/files/sysctl.conf @@ -0,0 +1,12 @@ +# sysctl settings are defined through files in +# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/. +# +# Vendors settings live in /usr/lib/sysctl.d/. +# To override a whole file, create a new file with the same in +# /etc/sysctl.d/ and put new settings there. To override +# only specific settings, add a file with a lexically later +# name in /etc/sysctl.d/ and put new settings there. +# +# For more information, see sysctl.conf(5) and sysctl.d(5). +vm.swappiness=1 +kernel.panic=2 diff --git a/tests/integration/targets/sysctl/meta/main.yml b/tests/integration/targets/sysctl/meta/main.yml new file mode 100644 index 0000000..07faa21 --- /dev/null +++ b/tests/integration/targets/sysctl/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/tests/integration/targets/sysctl/tasks/main.yml b/tests/integration/targets/sysctl/tasks/main.yml new file mode 100644 index 0000000..d72fd52 --- /dev/null +++ b/tests/integration/targets/sysctl/tasks/main.yml @@ -0,0 +1,291 @@ +# Test code for the sysctl module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# NOTE: Testing sysctl inside an unprivileged container means that we cannot +# apply sysctl, or it will always fail, because of that in most cases (except +# those when it should fail) we have to use `reload=no`. + +- name: Test inside Docker + when: + - ansible_facts.virtualization_type == 'docker' + block: + - set_fact: + output_dir_test: "{{ output_dir }}/test_sysctl" + + - name: make sure our testing sub-directory does not exist + file: + path: "{{ output_dir_test }}" + state: absent + + - name: create our testing sub-directory + file: + path: "{{ output_dir_test }}" + state: directory + + ## + ## sysctl - file manipulation + ## + + - name: copy the example conf to the test dir + copy: + src: sysctl.conf + dest: "{{ output_dir_test }}" + + - name: Set vm.swappiness to 5 + sysctl: + name: vm.swappiness + value: 5 + state: present + reload: no + sysctl_file: "{{ output_dir_test }}/sysctl.conf" + register: sysctl_test0 + + - debug: + var: sysctl_test0 + verbosity: 1 + + - name: get file content + shell: "cat {{ output_dir_test }}/sysctl.conf | egrep -v ^\\#" + register: sysctl_content0 + + - debug: + var: sysctl_content0 + verbosity: 1 + + - name: Set vm.swappiness to 5 again + sysctl: + name: vm.swappiness + value: 5 + state: present + reload: no + sysctl_file: "{{ output_dir_test }}/sysctl.conf" + register: sysctl_test1 + + - name: validate results + assert: + that: + - sysctl_test0 is changed + - sysctl_test1 is not changed + - 'sysctl_content0.stdout_lines[sysctl_content0.stdout_lines.index("vm.swappiness=5")] == "vm.swappiness=5"' + + - name: Remove kernel.panic + sysctl: + name: kernel.panic + value: 2 + reload: no + state: absent + sysctl_file: "{{ output_dir_test }}/sysctl.conf" + register: sysctl_test2 + + - name: get file content + shell: "cat {{ output_dir_test }}/sysctl.conf | egrep -v ^\\#" + register: sysctl_content2 + + - debug: + var: item + verbosity: 1 + with_items: + - "{{ sysctl_test2 }}" + - "{{ sysctl_content2 }}" + + - name: Validate results for key removal + assert: + that: + - sysctl_test2 is changed + - "'kernel.panic' not in sysctl_content2.stdout_lines" + + - name: Test remove kernel.panic again + sysctl: + name: kernel.panic + value: 2 + state: absent + reload: no + sysctl_file: "{{ output_dir_test }}/sysctl.conf" + register: sysctl_test2_change_test + + - name: Assert that no change was made + assert: + that: + - sysctl_test2_change_test is not changed + + - name: Try sysctl with an invalid value + sysctl: + name: net.ipv4.ip_forward + value: foo + register: sysctl_test3 + ignore_errors: yes + + - debug: + var: sysctl_test3 + verbosity: 1 + + - name: validate results for test 3 + assert: + that: + - sysctl_test3 is failed + + ## + ## sysctl - sysctl_set + ## + + - name: set net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: 1 + sysctl_set: yes + reload: no + register: sysctl_test3 + + - name: check with sysctl command + shell: sysctl net.ipv4.ip_forward + register: sysctl_check3 + + - debug: + var: item + verbosity: 1 + with_items: + - "{{ sysctl_test3 }}" + - "{{ sysctl_check3 }}" + + - name: validate results for test 3 + assert: + that: + - sysctl_test3 is changed + - 'sysctl_check3.stdout_lines == ["net.ipv4.ip_forward = 1"]' + + - name: Try sysctl with no name + sysctl: + name: + value: 1 + sysctl_set: yes + ignore_errors: True + register: sysctl_no_name + + - name: validate nameless results + assert: + that: + - sysctl_no_name is failed + - "sysctl_no_name.msg == 'name cannot be None'" + + - name: Try sysctl with no value + sysctl: + name: Foo + value: + sysctl_set: yes + ignore_errors: True + register: sysctl_no_value + + - name: validate nameless results + assert: + that: + - sysctl_no_value is failed + - "sysctl_no_value.msg == 'value cannot be None'" + + - name: Try sysctl with an invalid value + sysctl: + name: net.ipv4.ip_forward + value: foo + sysctl_set: yes + register: sysctl_test4 + ignore_errors: yes + + - debug: + var: sysctl_test4 + verbosity: 1 + + - name: validate results for test 4 + assert: + that: + - sysctl_test4 is failed + + +- name: Test on RHEL VMs + when: + - ansible_facts.virtualization_type != 'docker' + - ansible_facts.distribution == 'RedHat' + block: + # Test reload: yes + - name: Set sysctl property using module + sysctl: + name: vm.swappiness + value: '22' + state: present + reload: yes + register: sysctl_set1 + + - name: Change sysctl property using command + command: sysctl vm.swappiness=33 + + - name: Set sysctl property using module + sysctl: + name: vm.swappiness + value: '22' + state: present + reload: yes + register: sysctl_set2 + + - name: Read /etc/sysctl.conf + command: 'egrep -v ^# /etc/sysctl.conf' + register: sysctl_conf_content + + - name: Get current value of vm.swappiness + command: sysctl -n vm.swappiness + register: sysctl_current_vm_swappiness + + - name: Ensure changes were made appropriately + assert: + that: + - sysctl_set1 is changed + - sysctl_set2 is changed + - "'vm.swappiness=22' in sysctl_conf_content.stdout_lines" + - sysctl_current_vm_swappiness.stdout == '22' + + # Test reload: yes in check mode + - name: Set the same value using module in check mode + sysctl: + name: vm.swappiness + value: '22' + state: present + reload: yes + check_mode: yes + register: sysctl_check_mode1 + + - name: Set a different value using module in check mode + sysctl: + name: vm.swappiness + value: '44' + state: present + reload: yes + check_mode: yes + register: sysctl_check_mode2 + + - name: Read /etc/sysctl.conf + command: 'egrep -v ^# /etc/sysctl.conf' + register: sysctl_check_mode_conf_content + + - name: Get current value of vm.swappiness + command: sysctl -n vm.swappiness + register: sysctl_check_mode_current_vm_swappiness + + - name: Ensure no changes were made in check mode + assert: + that: + - sysctl_check_mode1 is success + - sysctl_check_mode2 is changed + - "'vm.swappiness=22' in sysctl_check_mode_conf_content.stdout_lines" + - sysctl_check_mode_current_vm_swappiness.stdout == '22' diff --git a/tests/requirements.yml b/tests/requirements.yml new file mode 100644 index 0000000..47e12a9 --- /dev/null +++ b/tests/requirements.yml @@ -0,0 +1,4 @@ +integration_tests_dependencies: +- community.general +unit_tests_dependencies: +- community.general diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt new file mode 100644 index 0000000..bfb5e1a --- /dev/null +++ b/tests/sanity/ignore-2.10.txt @@ -0,0 +1,32 @@ +plugins/module_utils/ismount.py future-import-boilerplate +plugins/module_utils/ismount.py metaclass-boilerplate +plugins/modules/acl.py validate-modules:parameter-type-not-in-doc +plugins/modules/synchronize.py pylint:blacklisted-name +plugins/modules/synchronize.py use-argspec-type-path +plugins/modules/synchronize.py validate-modules:doc-default-does-not-match-spec +plugins/modules/synchronize.py validate-modules:nonexistent-parameter-documented +plugins/modules/synchronize.py validate-modules:parameter-list-no-elements +plugins/modules/synchronize.py validate-modules:parameter-type-not-in-doc +plugins/modules/synchronize.py validate-modules:undocumented-parameter +plugins/modules/at.py validate-modules:doc-required-mismatch +plugins/modules/authorized_key.py validate-modules:parameter-type-not-in-doc +plugins/modules/seboolean.py validate-modules:parameter-type-not-in-doc +plugins/modules/selinux.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/selinux.py validate-modules:parameter-type-not-in-doc +plugins/modules/sysctl.py validate-modules:doc-missing-type +plugins/modules/sysctl.py validate-modules:parameter-type-not-in-doc +tests/unit/mock/path.py future-import-boilerplate +tests/unit/mock/path.py metaclass-boilerplate +tests/unit/mock/yaml_helper.py future-import-boilerplate +tests/unit/mock/yaml_helper.py metaclass-boilerplate +tests/unit/modules/conftest.py future-import-boilerplate +tests/unit/modules/conftest.py metaclass-boilerplate +tests/unit/modules/system/interfaces_file/test_interfaces_file.py future-import-boilerplate +tests/unit/modules/system/interfaces_file/test_interfaces_file.py metaclass-boilerplate +tests/unit/modules/system/interfaces_file/test_interfaces_file.py pylint:blacklisted-name +tests/unit/modules/system/test_mount.py future-import-boilerplate +tests/unit/modules/system/test_mount.py metaclass-boilerplate +tests/unit/modules/utils.py future-import-boilerplate +tests/unit/modules/utils.py metaclass-boilerplate +tests/unit/plugins/action/test_synchronize.py future-import-boilerplate +tests/unit/plugins/action/test_synchronize.py metaclass-boilerplate \ No newline at end of file diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt new file mode 100644 index 0000000..bfb5e1a --- /dev/null +++ b/tests/sanity/ignore-2.9.txt @@ -0,0 +1,32 @@ +plugins/module_utils/ismount.py future-import-boilerplate +plugins/module_utils/ismount.py metaclass-boilerplate +plugins/modules/acl.py validate-modules:parameter-type-not-in-doc +plugins/modules/synchronize.py pylint:blacklisted-name +plugins/modules/synchronize.py use-argspec-type-path +plugins/modules/synchronize.py validate-modules:doc-default-does-not-match-spec +plugins/modules/synchronize.py validate-modules:nonexistent-parameter-documented +plugins/modules/synchronize.py validate-modules:parameter-list-no-elements +plugins/modules/synchronize.py validate-modules:parameter-type-not-in-doc +plugins/modules/synchronize.py validate-modules:undocumented-parameter +plugins/modules/at.py validate-modules:doc-required-mismatch +plugins/modules/authorized_key.py validate-modules:parameter-type-not-in-doc +plugins/modules/seboolean.py validate-modules:parameter-type-not-in-doc +plugins/modules/selinux.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/selinux.py validate-modules:parameter-type-not-in-doc +plugins/modules/sysctl.py validate-modules:doc-missing-type +plugins/modules/sysctl.py validate-modules:parameter-type-not-in-doc +tests/unit/mock/path.py future-import-boilerplate +tests/unit/mock/path.py metaclass-boilerplate +tests/unit/mock/yaml_helper.py future-import-boilerplate +tests/unit/mock/yaml_helper.py metaclass-boilerplate +tests/unit/modules/conftest.py future-import-boilerplate +tests/unit/modules/conftest.py metaclass-boilerplate +tests/unit/modules/system/interfaces_file/test_interfaces_file.py future-import-boilerplate +tests/unit/modules/system/interfaces_file/test_interfaces_file.py metaclass-boilerplate +tests/unit/modules/system/interfaces_file/test_interfaces_file.py pylint:blacklisted-name +tests/unit/modules/system/test_mount.py future-import-boilerplate +tests/unit/modules/system/test_mount.py metaclass-boilerplate +tests/unit/modules/utils.py future-import-boilerplate +tests/unit/modules/utils.py metaclass-boilerplate +tests/unit/plugins/action/test_synchronize.py future-import-boilerplate +tests/unit/plugins/action/test_synchronize.py metaclass-boilerplate \ No newline at end of file diff --git a/tests/sanity/requirements.txt b/tests/sanity/requirements.txt new file mode 100644 index 0000000..3e3a966 --- /dev/null +++ b/tests/sanity/requirements.txt @@ -0,0 +1,4 @@ +packaging # needed for update-bundled and changelog +sphinx ; python_version >= '3.5' # docs build requires python 3+ +sphinx-notfound-page ; python_version >= '3.5' # docs build requires python 3+ +straight.plugin ; python_version >= '3.5' # needed for hacking/build-ansible.py which will host changelog generation and requires python 3+ diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/compat/__init__.py b/tests/unit/compat/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/compat/builtins.py b/tests/unit/compat/builtins.py new file mode 100644 index 0000000..f60ee67 --- /dev/null +++ b/tests/unit/compat/builtins.py @@ -0,0 +1,33 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' diff --git a/tests/unit/compat/mock.py b/tests/unit/compat/mock.py new file mode 100644 index 0000000..0972cd2 --- /dev/null +++ b/tests/unit/compat/mock.py @@ -0,0 +1,122 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/tests/unit/compat/unittest.py b/tests/unit/compat/unittest.py new file mode 100644 index 0000000..98f08ad --- /dev/null +++ b/tests/unit/compat/unittest.py @@ -0,0 +1,38 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') +else: + from unittest import * diff --git a/tests/unit/mock/__init__.py b/tests/unit/mock/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/mock/loader.py b/tests/unit/mock/loader.py new file mode 100644 index 0000000..0ee47fb --- /dev/null +++ b/tests/unit/mock/loader.py @@ -0,0 +1,116 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.errors import AnsibleParserError +from ansible.parsing.dataloader import DataLoader +from ansible.module_utils._text import to_bytes, to_text + + +class DictDataLoader(DataLoader): + + def __init__(self, file_mapping=None): + file_mapping = {} if file_mapping is None else file_mapping + assert type(file_mapping) == dict + + super(DictDataLoader, self).__init__() + + self._file_mapping = file_mapping + self._build_known_directories() + self._vault_secrets = None + + def load_from_file(self, path, cache=True, unsafe=False): + path = to_text(path) + if path in self._file_mapping: + return self.load(self._file_mapping[path], path) + return None + + # TODO: the real _get_file_contents returns a bytestring, so we actually convert the + # unicode/text it's created with to utf-8 + def _get_file_contents(self, path): + path = to_text(path) + if path in self._file_mapping: + return (to_bytes(self._file_mapping[path]), False) + else: + raise AnsibleParserError("file not found: %s" % path) + + def path_exists(self, path): + path = to_text(path) + return path in self._file_mapping or path in self._known_directories + + def is_file(self, path): + path = to_text(path) + return path in self._file_mapping + + def is_directory(self, path): + path = to_text(path) + return path in self._known_directories + + def list_directory(self, path): + ret = [] + path = to_text(path) + for x in (list(self._file_mapping.keys()) + self._known_directories): + if x.startswith(path): + if os.path.dirname(x) == path: + ret.append(os.path.basename(x)) + return ret + + def is_executable(self, path): + # FIXME: figure out a way to make paths return true for this + return False + + def _add_known_directory(self, directory): + if directory not in self._known_directories: + self._known_directories.append(directory) + + def _build_known_directories(self): + self._known_directories = [] + for path in self._file_mapping: + dirname = os.path.dirname(path) + while dirname not in ('/', ''): + self._add_known_directory(dirname) + dirname = os.path.dirname(dirname) + + def push(self, path, content): + rebuild_dirs = False + if path not in self._file_mapping: + rebuild_dirs = True + + self._file_mapping[path] = content + + if rebuild_dirs: + self._build_known_directories() + + def pop(self, path): + if path in self._file_mapping: + del self._file_mapping[path] + self._build_known_directories() + + def clear(self): + self._file_mapping = dict() + self._known_directories = [] + + def get_basedir(self): + return os.getcwd() + + def set_vault_secrets(self, vault_secrets): + self._vault_secrets = vault_secrets diff --git a/tests/unit/mock/path.py b/tests/unit/mock/path.py new file mode 100644 index 0000000..d128b5f --- /dev/null +++ b/tests/unit/mock/path.py @@ -0,0 +1,5 @@ +from ansible_collections.ansible.posix.tests.unit.compat.mock import MagicMock +from ansible.utils.path import unfrackpath + + +mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x) diff --git a/tests/unit/mock/procenv.py b/tests/unit/mock/procenv.py new file mode 100644 index 0000000..4740452 --- /dev/null +++ b/tests/unit/mock/procenv.py @@ -0,0 +1,90 @@ +# (c) 2016, Matt Davis +# (c) 2016, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import json + +from contextlib import contextmanager +from io import BytesIO, StringIO +from ansible_collections.ansible.posix.tests.unit.compat import unittest +from ansible.module_utils.six import PY3 +from ansible.module_utils._text import to_bytes + + +@contextmanager +def swap_stdin_and_argv(stdin_data='', argv_data=tuple()): + """ + context manager that temporarily masks the test runner's values for stdin and argv + """ + real_stdin = sys.stdin + real_argv = sys.argv + + if PY3: + fake_stream = StringIO(stdin_data) + fake_stream.buffer = BytesIO(to_bytes(stdin_data)) + else: + fake_stream = BytesIO(to_bytes(stdin_data)) + + try: + sys.stdin = fake_stream + sys.argv = argv_data + + yield + finally: + sys.stdin = real_stdin + sys.argv = real_argv + + +@contextmanager +def swap_stdout(): + """ + context manager that temporarily replaces stdout for tests that need to verify output + """ + old_stdout = sys.stdout + + if PY3: + fake_stream = StringIO() + else: + fake_stream = BytesIO() + + try: + sys.stdout = fake_stream + + yield fake_stream + finally: + sys.stdout = old_stdout + + +class ModuleTestCase(unittest.TestCase): + def setUp(self, module_args=None): + if module_args is None: + module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False} + + args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args)) + + # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually + self.stdin_swap = swap_stdin_and_argv(stdin_data=args) + self.stdin_swap.__enter__() + + def tearDown(self): + # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually + self.stdin_swap.__exit__(None, None, None) diff --git a/tests/unit/mock/vault_helper.py b/tests/unit/mock/vault_helper.py new file mode 100644 index 0000000..dcce9c7 --- /dev/null +++ b/tests/unit/mock/vault_helper.py @@ -0,0 +1,39 @@ +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils._text import to_bytes + +from ansible.parsing.vault import VaultSecret + + +class TextVaultSecret(VaultSecret): + '''A secret piece of text. ie, a password. Tracks text encoding. + + The text encoding of the text may not be the default text encoding so + we keep track of the encoding so we encode it to the same bytes.''' + + def __init__(self, text, encoding=None, errors=None, _bytes=None): + super(TextVaultSecret, self).__init__() + self.text = text + self.encoding = encoding or 'utf-8' + self._bytes = _bytes + self.errors = errors or 'strict' + + @property + def bytes(self): + '''The text encoded with encoding, unless we specifically set _bytes.''' + return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors) diff --git a/tests/unit/mock/yaml_helper.py b/tests/unit/mock/yaml_helper.py new file mode 100644 index 0000000..cc095fe --- /dev/null +++ b/tests/unit/mock/yaml_helper.py @@ -0,0 +1,121 @@ +import io +import yaml + +from ansible.module_utils.six import PY3 +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.parsing.yaml.dumper import AnsibleDumper + + +class YamlTestUtils(object): + """Mixin class to combine with a unittest.TestCase subclass.""" + def _loader(self, stream): + """Vault related tests will want to override this. + + Vault cases should setup a AnsibleLoader that has the vault password.""" + return AnsibleLoader(stream) + + def _dump_stream(self, obj, stream, dumper=None): + """Dump to a py2-unicode or py3-string stream.""" + if PY3: + return yaml.dump(obj, stream, Dumper=dumper) + else: + return yaml.dump(obj, stream, Dumper=dumper, encoding=None) + + def _dump_string(self, obj, dumper=None): + """Dump to a py2-unicode or py3-string""" + if PY3: + return yaml.dump(obj, Dumper=dumper) + else: + return yaml.dump(obj, Dumper=dumper, encoding=None) + + def _dump_load_cycle(self, obj): + # Each pass though a dump or load revs the 'generation' + # obj to yaml string + string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper) + + # wrap a stream/file like StringIO around that yaml + stream_from_object_dump = io.StringIO(string_from_object_dump) + loader = self._loader(stream_from_object_dump) + # load the yaml stream to create a new instance of the object (gen 2) + obj_2 = loader.get_data() + + # dump the gen 2 objects directory to strings + string_from_object_dump_2 = self._dump_string(obj_2, + dumper=AnsibleDumper) + + # The gen 1 and gen 2 yaml strings + self.assertEqual(string_from_object_dump, string_from_object_dump_2) + # the gen 1 (orig) and gen 2 py object + self.assertEqual(obj, obj_2) + + # again! gen 3... load strings into py objects + stream_3 = io.StringIO(string_from_object_dump_2) + loader_3 = self._loader(stream_3) + obj_3 = loader_3.get_data() + + string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper) + + self.assertEqual(obj, obj_3) + # should be transitive, but... + self.assertEqual(obj_2, obj_3) + self.assertEqual(string_from_object_dump, string_from_object_dump_3) + + def _old_dump_load_cycle(self, obj): + '''Dump the passed in object to yaml, load it back up, dump again, compare.''' + stream = io.StringIO() + + yaml_string = self._dump_string(obj, dumper=AnsibleDumper) + self._dump_stream(obj, stream, dumper=AnsibleDumper) + + yaml_string_from_stream = stream.getvalue() + + # reset stream + stream.seek(0) + + loader = self._loader(stream) + # loader = AnsibleLoader(stream, vault_password=self.vault_password) + obj_from_stream = loader.get_data() + + stream_from_string = io.StringIO(yaml_string) + loader2 = self._loader(stream_from_string) + # loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password) + obj_from_string = loader2.get_data() + + stream_obj_from_stream = io.StringIO() + stream_obj_from_string = io.StringIO() + + if PY3: + yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper) + yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper) + else: + yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None) + yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None) + + yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue() + yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue() + + stream_obj_from_stream.seek(0) + stream_obj_from_string.seek(0) + + if PY3: + yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper) + yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper) + else: + yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None) + yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None) + + assert yaml_string == yaml_string_obj_from_stream + assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string + assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream == + yaml_string_stream_obj_from_string) + assert obj == obj_from_stream + assert obj == obj_from_string + assert obj == yaml_string_obj_from_stream + assert obj == yaml_string_obj_from_string + assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string + return {'obj': obj, + 'yaml_string': yaml_string, + 'yaml_string_from_stream': yaml_string_from_stream, + 'obj_from_stream': obj_from_stream, + 'obj_from_string': obj_from_string, + 'yaml_string_obj_from_string': yaml_string_obj_from_string} diff --git a/tests/unit/modules/__init__.py b/tests/unit/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/conftest.py b/tests/unit/modules/conftest.py new file mode 100644 index 0000000..3bbfe0b --- /dev/null +++ b/tests/unit/modules/conftest.py @@ -0,0 +1,28 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import json + +import pytest + +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common._collections_compat import MutableMapping + + +@pytest.fixture +def patch_ansible_module(request, mocker): + if isinstance(request.param, string_types): + args = request.param + elif isinstance(request.param, MutableMapping): + if 'ANSIBLE_MODULE_ARGS' not in request.param: + request.param = {'ANSIBLE_MODULE_ARGS': request.param} + if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + args = json.dumps(request.param) + else: + raise Exception('Malformed data to the patch_ansible_module pytest fixture') + + mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) diff --git a/tests/unit/modules/system/__init__.py b/tests/unit/modules/system/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/__init__.py b/tests/unit/modules/system/interfaces_file/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/__init__.py b/tests/unit/modules/system/interfaces_file/fixtures/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes new file mode 100644 index 0000000..bc4ecea --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes @@ -0,0 +1,12 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up new file mode 100644 index 0000000..bc4ecea --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up @@ -0,0 +1,12 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.exceptions.txt new file mode 100644 index 0000000..8d223b0 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.exceptions.txt @@ -0,0 +1,8 @@ +[0] fail_json message: Error: interface aggi not found +options: +{ + "iface": "aggi", + "option": "up", + "state": "present", + "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up new file mode 100644 index 0000000..bc4ecea --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up @@ -0,0 +1,12 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt new file mode 100644 index 0000000..1c9adbd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt @@ -0,0 +1,17 @@ +[0] fail_json message: Error: interface aggi not found +options: +{ + "iface": "aggi", + "option": "up", + "state": "present", + "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi" +} +===== +[1] fail_json message: Error: interface aggi not found +options: +{ + "iface": "aggi", + "option": "up", + "state": "absent", + "value": null +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4 b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4 new file mode 100644 index 0000000..9a2f5b0 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4 @@ -0,0 +1,12 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.42 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up new file mode 100644 index 0000000..5077e3a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up @@ -0,0 +1,13 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + post-up XXXX_ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up new file mode 100644 index 0000000..5c0f697 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up @@ -0,0 +1,13 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + pre-up XXXX_ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6 b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6 new file mode 100644 index 0000000..afaaac9 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6 @@ -0,0 +1,12 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::42 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up new file mode 100644 index 0000000..cb3e98b --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up @@ -0,0 +1,13 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 + post-up XXXX_ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up new file mode 100644 index 0000000..149da56 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up @@ -0,0 +1,13 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 + pre-up XXXX_ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method new file mode 100644 index 0000000..bc4ecea --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method @@ -0,0 +1,12 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.exceptions.txt new file mode 100644 index 0000000..050a983 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.exceptions.txt @@ -0,0 +1,8 @@ +fail_json message: Error: interface eth1 not found +options: +{ + "iface": "eth1", + "option": "method", + "state": "present", + "value": "dhcp" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_revert b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_revert new file mode 100644 index 0000000..bc4ecea --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_revert @@ -0,0 +1,12 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu new file mode 100644 index 0000000..4033127 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu @@ -0,0 +1,13 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 + mtu 1350 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt new file mode 100644 index 0000000..3f0da8b --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt @@ -0,0 +1,8 @@ +[0] fail_json message: Error: interface aggi not found +options: +{ + "iface": "aggi", + "option": "mtu", + "state": "present", + "value": "1350" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves new file mode 100644 index 0000000..bc4ecea --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves @@ -0,0 +1,12 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt new file mode 100644 index 0000000..0af8775 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt @@ -0,0 +1,8 @@ +[0] fail_json message: Error: interface aggi not found +options: +{ + "iface": "aggi", + "option": "slaves", + "state": "present", + "value": "int1 int3" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.json new file mode 100644 index 0000000..ee632bd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.json @@ -0,0 +1,21 @@ +{ + "eth0": { + "address": "fc00::1", + "address_family": "inet6", + "down": [], + "method": "static", + "post-up": [ + "echo configuring ipv6" + ], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes new file mode 100644 index 0000000..bd4522e --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes @@ -0,0 +1,6 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up new file mode 100644 index 0000000..bd4522e --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up @@ -0,0 +1,6 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt new file mode 100644 index 0000000..8d223b0 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt @@ -0,0 +1,8 @@ +[0] fail_json message: Error: interface aggi not found +options: +{ + "iface": "aggi", + "option": "up", + "state": "present", + "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up new file mode 100644 index 0000000..bd4522e --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up @@ -0,0 +1,6 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt new file mode 100644 index 0000000..1c9adbd --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt @@ -0,0 +1,17 @@ +[0] fail_json message: Error: interface aggi not found +options: +{ + "iface": "aggi", + "option": "up", + "state": "present", + "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi" +} +===== +[1] fail_json message: Error: interface aggi not found +options: +{ + "iface": "aggi", + "option": "up", + "state": "absent", + "value": null +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4 b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4 new file mode 100644 index 0000000..6962937 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4 @@ -0,0 +1,7 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp + address 192.168.0.42 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up new file mode 100644 index 0000000..998f484 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up @@ -0,0 +1,7 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp + post-up XXXX_ipv4 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up new file mode 100644 index 0000000..5e6af40 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up @@ -0,0 +1,7 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp + pre-up XXXX_ipv4 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6 b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6 new file mode 100644 index 0000000..bd4522e --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6 @@ -0,0 +1,6 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt new file mode 100644 index 0000000..04c2089 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt @@ -0,0 +1,9 @@ +fail_json message: Error: interface eth0 not found +options: +{ + "address_family": "inet6", + "iface": "eth0", + "option": "address", + "state": "present", + "value": "fc00::42" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up new file mode 100644 index 0000000..bd4522e --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up @@ -0,0 +1,6 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt new file mode 100644 index 0000000..48cb29b --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt @@ -0,0 +1,9 @@ +fail_json message: Error: interface eth0 not found +options: +{ + "address_family": "inet6", + "iface": "eth0", + "option": "post-up", + "state": "present", + "value": "XXXX_ipv6" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up new file mode 100644 index 0000000..bd4522e --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up @@ -0,0 +1,6 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt new file mode 100644 index 0000000..fbfed6b --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt @@ -0,0 +1,9 @@ +fail_json message: Error: interface eth0 not found +options: +{ + "address_family": "inet6", + "iface": "eth0", + "option": "pre-up", + "state": "present", + "value": "XXXX_ipv6" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method new file mode 100644 index 0000000..bd4522e --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method @@ -0,0 +1,6 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.exceptions.txt new file mode 100644 index 0000000..050a983 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.exceptions.txt @@ -0,0 +1,8 @@ +fail_json message: Error: interface eth1 not found +options: +{ + "iface": "eth1", + "option": "method", + "state": "present", + "value": "dhcp" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert new file mode 100644 index 0000000..bd4522e --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert @@ -0,0 +1,6 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu new file mode 100644 index 0000000..7bbad22 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu @@ -0,0 +1,7 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp + mtu 1350 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt new file mode 100644 index 0000000..3f0da8b --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt @@ -0,0 +1,8 @@ +[0] fail_json message: Error: interface aggi not found +options: +{ + "iface": "aggi", + "option": "mtu", + "state": "present", + "value": "1350" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves new file mode 100644 index 0000000..bd4522e --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves @@ -0,0 +1,6 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt new file mode 100644 index 0000000..0af8775 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt @@ -0,0 +1,8 @@ +[0] fail_json message: Error: interface aggi not found +options: +{ + "iface": "aggi", + "option": "slaves", + "state": "present", + "value": "int1 int3" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.json new file mode 100644 index 0000000..bffc17a --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.json @@ -0,0 +1,18 @@ +{ + "eth0": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes new file mode 100644 index 0000000..c826bbe --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up new file mode 100644 index 0000000..e86b257 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up @@ -0,0 +1,62 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up new file mode 100644 index 0000000..c826bbe --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4 b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4 new file mode 100644 index 0000000..c826bbe --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4 @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.exceptions.txt new file mode 100644 index 0000000..a1600d9 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.exceptions.txt @@ -0,0 +1,9 @@ +fail_json message: Error: interface eth0 not found +options: +{ + "address_family": "inet", + "iface": "eth0", + "option": "address", + "state": "present", + "value": "192.168.0.42" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up new file mode 100644 index 0000000..c826bbe --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt new file mode 100644 index 0000000..e1e0152 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt @@ -0,0 +1,9 @@ +fail_json message: Error: interface eth0 not found +options: +{ + "address_family": "inet", + "iface": "eth0", + "option": "post-up", + "state": "present", + "value": "XXXX_ipv4" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up new file mode 100644 index 0000000..c826bbe --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt new file mode 100644 index 0000000..9e51065 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt @@ -0,0 +1,9 @@ +fail_json message: Error: interface eth0 not found +options: +{ + "address_family": "inet", + "iface": "eth0", + "option": "pre-up", + "state": "present", + "value": "XXXX_ipv4" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6 b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6 new file mode 100644 index 0000000..c826bbe --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6 @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.exceptions.txt new file mode 100644 index 0000000..04c2089 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.exceptions.txt @@ -0,0 +1,9 @@ +fail_json message: Error: interface eth0 not found +options: +{ + "address_family": "inet6", + "iface": "eth0", + "option": "address", + "state": "present", + "value": "fc00::42" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up new file mode 100644 index 0000000..c826bbe --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt new file mode 100644 index 0000000..48cb29b --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt @@ -0,0 +1,9 @@ +fail_json message: Error: interface eth0 not found +options: +{ + "address_family": "inet6", + "iface": "eth0", + "option": "post-up", + "state": "present", + "value": "XXXX_ipv6" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up new file mode 100644 index 0000000..c826bbe --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt new file mode 100644 index 0000000..fbfed6b --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt @@ -0,0 +1,9 @@ +fail_json message: Error: interface eth0 not found +options: +{ + "address_family": "inet6", + "iface": "eth0", + "option": "pre-up", + "state": "present", + "value": "XXXX_ipv6" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method new file mode 100644 index 0000000..065bf0f --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet dhcp + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.json new file mode 100644 index 0000000..8e9863b --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "dhcp", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert new file mode 100644 index 0000000..c826bbe --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.exceptions.txt new file mode 100644 index 0000000..fddf3b3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.exceptions.txt @@ -0,0 +1,8 @@ +fail_json message: Error: interface eth0 not found +options: +{ + "iface": "eth0", + "option": "mtu", + "state": "absent", + "value": "1350" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu new file mode 100644 index 0000000..5218eed --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1350 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt new file mode 100644 index 0000000..764c9cb --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt @@ -0,0 +1,8 @@ +[1] fail_json message: Error: interface eth0 not found +options: +{ + "iface": "eth0", + "option": "mtu", + "state": "present", + "value": "1350" +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves new file mode 100644 index 0000000..e2b78e9 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int3 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.json b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.json new file mode 100644 index 0000000..9e97da3 --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.json @@ -0,0 +1,109 @@ +{ + "agge": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "aggi": { + "address": "10.44.15.196", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "down": [], + "hwaddress": "ether 22:44:77:88:D5:96", + "method": "static", + "mtu": "1500", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K aggi tx off tso off" + ], + "pre-up": [], + "slaves": "int1 int2", + "up": [] + }, + "br0": { + "address": "188.44.133.76", + "address_family": "inet", + "bond_downdelay": "200", + "bond_lacp_rate": "slow", + "bond_miimon": "100", + "bond_mode": "4", + "bond_updelay": "200", + "bond_xmit_hash_policy": "layer3+4", + "bridge_ports": "agge", + "down": [], + "gateway": "188.44.133.75", + "hwaddress": "ether 22:44:77:88:D5:98", + "method": "static", + "netmask": "255.255.255.248", + "post-up": [ + "/sbin/ethtool -K agge tx off tso off" + ], + "pre-up": [], + "slaves": "ext1 ext2", + "up": [ + "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi", + "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi", + "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi" + ] + }, + "eth1": { + "address_family": "inet", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext1": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "ext2": { + "address_family": "inet", + "bond-master": "agge", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int1": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "int2": { + "address_family": "inet", + "bond-master": "aggi", + "down": [], + "method": "manual", + "post-up": [], + "pre-up": [], + "up": [] + }, + "lo": { + "address_family": "inet", + "down": [], + "method": "loopback", + "post-up": [], + "pre-up": [], + "up": [] + } +} \ No newline at end of file diff --git a/tests/unit/modules/system/interfaces_file/fixtures/input/address_family b/tests/unit/modules/system/interfaces_file/fixtures/input/address_family new file mode 100644 index 0000000..bc4ecea --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/input/address_family @@ -0,0 +1,12 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet static + address 192.168.0.1 + post-up echo configuring ipv4 + +iface eth0 inet6 static + address fc00::1 + post-up echo configuring ipv6 diff --git a/tests/unit/modules/system/interfaces_file/fixtures/input/default_dhcp b/tests/unit/modules/system/interfaces_file/fixtures/input/default_dhcp new file mode 100644 index 0000000..bd4522e --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/input/default_dhcp @@ -0,0 +1,6 @@ +# The loopback network interface +auto lo eth0 +iface lo inet loopback + +# The primary network interface +iface eth0 inet dhcp diff --git a/tests/unit/modules/system/interfaces_file/fixtures/input/servers.com b/tests/unit/modules/system/interfaces_file/fixtures/input/servers.com new file mode 100644 index 0000000..c826bbe --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/fixtures/input/servers.com @@ -0,0 +1,61 @@ + auto aggi + iface aggi inet static + hwaddress ether 22:44:77:88:D5:96 + address 10.44.15.196 + netmask 255.255.255.248 + mtu 1500 + slaves int1 int2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K aggi tx off tso off + + auto agge + iface agge inet manual + + auto br0 + iface br0 inet static + bridge_ports agge + hwaddress ether 22:44:77:88:D5:98 + address 188.44.133.76 + netmask 255.255.255.248 + gateway 188.44.133.75 + slaves ext1 ext2 + bond_mode 4 + bond_miimon 100 + bond_downdelay 200 + bond_updelay 200 + bond_lacp_rate slow + bond_xmit_hash_policy layer3+4 + post-up /sbin/ethtool -K agge tx off tso off + + up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi + up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi + up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi + + auto int1 + iface int1 inet manual + bond-master aggi + + auto int2 + iface int2 inet manual + bond-master aggi + + auto ext1 + iface ext1 inet manual + bond-master agge + + auto ext2 + iface ext2 inet manual + bond-master agge + + auto eth1 + iface eth1 inet manual + + auto lo + iface lo inet loopback + +source /etc/network/interfaces.d/*.cfg diff --git a/tests/unit/modules/system/interfaces_file/test_interfaces_file.py b/tests/unit/modules/system/interfaces_file/test_interfaces_file.py new file mode 100644 index 0000000..3f81e6b --- /dev/null +++ b/tests/unit/modules/system/interfaces_file/test_interfaces_file.py @@ -0,0 +1,327 @@ +# (c) 2017, Roman Belyakovsky +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible_collections.ansible.posix.tests.unit.compat import unittest +from ansible_collections.community.general.plugins.modules import interfaces_file +from shutil import copyfile, move +import difflib +import inspect +import io +import json +import os +import re +import shutil +import tempfile + + +class AnsibleFailJson(Exception): + pass + + +class ModuleMocked(): + def atomic_move(self, src, dst): + move(src, dst) + + def backup_local(self, path): + backupp = os.path.join("/tmp", os.path.basename(path) + ".bak") + copyfile(path, backupp) + return backupp + + def fail_json(self, msg): + raise AnsibleFailJson(msg) + + +module = ModuleMocked() +fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'input') +golden_output_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'golden_output') + + +class TestInterfacesFileModule(unittest.TestCase): + def getTestFiles(self, include_filter=None, exclude_filter=None): + flist = next(os.walk(fixture_path))[2] + if include_filter: + flist = filter(lambda x: re.match(include_filter, x), flist) + if exclude_filter: + flist = filter(lambda x: not re.match(exclude_filter, x), flist) + return flist + + def compareFileToBackup(self, path, backup): + with open(path) as f1: + with open(backup) as f2: + diffs = difflib.context_diff(f1.readlines(), + f2.readlines(), + fromfile=os.path.basename(path), + tofile=os.path.basename(backup)) + # Restore backup + move(backup, path) + deltas = [d for d in diffs] + self.assertTrue(len(deltas) == 0) + + def compareInterfacesLinesToFile(self, interfaces_lines, path, testname=None): + if not testname: + testname = "%s.%s" % (path, inspect.stack()[1][3]) + self.compareStringWithFile("".join([d['line'] for d in interfaces_lines if 'line' in d]), testname) + + def compareInterfacesToFile(self, ifaces, path, testname=None): + if not testname: + testname = "%s.%s.json" % (path, inspect.stack()[1][3]) + self.compareStringWithFile(json.dumps(ifaces, sort_keys=True, indent=4, separators=(',', ': ')), testname) + + def compareStringWithFile(self, string, path): + # self.assertEqual("","_",msg=path) + testfilepath = os.path.join(golden_output_path, path) + goldenstring = string + if not os.path.isfile(testfilepath): + f = io.open(testfilepath, 'wb') + f.write(string) + f.close() + else: + with open(testfilepath, 'r') as goldenfile: + goldenstring = goldenfile.read() + goldenfile.close() + self.assertEqual(string, goldenstring) + + def test_no_changes(self): + for testfile in self.getTestFiles(): + path = os.path.join(fixture_path, testfile) + lines, ifaces = interfaces_file.read_interfaces_file(module, path) + self.compareInterfacesLinesToFile(lines, testfile) + self.compareInterfacesToFile(ifaces, testfile) + + def test_add_up_aoption_to_aggi(self): + testcases = { + "add_aggi_up": [ + { + 'iface': 'aggi', + 'option': 'up', + 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi', + 'state': 'present', + } + ], + "add_and_delete_aggi_up": [ + { + 'iface': 'aggi', + 'option': 'up', + 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi', + 'state': 'present', + }, + { + 'iface': 'aggi', + 'option': 'up', + 'value': None, + 'state': 'absent', + }, + ], + "set_aggi_slaves": [ + { + 'iface': 'aggi', + 'option': 'slaves', + 'value': 'int1 int3', + 'state': 'present', + }, + ], + "set_aggi_and_eth0_mtu": [ + { + 'iface': 'aggi', + 'option': 'mtu', + 'value': '1350', + 'state': 'present', + }, + { + 'iface': 'eth0', + 'option': 'mtu', + 'value': '1350', + 'state': 'present', + }, + ], + } + for testname, options_list in testcases.items(): + for testfile in self.getTestFiles(): + path = os.path.join(fixture_path, testfile) + lines, ifaces = interfaces_file.read_interfaces_file(module, path) + fail_json_iterations = [] + for i, options in enumerate(options_list): + try: + _, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'], options['option'], options['value'], options['state']) + except AnsibleFailJson as e: + fail_json_iterations.append("[%d] fail_json message: %s\noptions:\n%s" % + (i, str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')))) + self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname)) + + self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname)) + self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname)) + + def test_revert(self): + testcases = { + "revert": [ + { + 'iface': 'eth0', + 'option': 'mtu', + 'value': '1350', + } + ], + } + for testname, options_list in testcases.items(): + for testfile in self.getTestFiles(): + with tempfile.NamedTemporaryFile() as temp_file: + src_path = os.path.join(fixture_path, testfile) + path = temp_file.name + shutil.copy(src_path, path) + lines, ifaces = interfaces_file.read_interfaces_file(module, path) + backupp = module.backup_local(path) + options = options_list[0] + for state in ['present', 'absent']: + fail_json_iterations = [] + options['state'] = state + try: + _, lines = interfaces_file.setInterfaceOption(module, lines, + options['iface'], options['option'], options['value'], options['state']) + except AnsibleFailJson as e: + fail_json_iterations.append("fail_json message: %s\noptions:\n%s" % + (str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')))) + interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path) + + self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname)) + + self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname)) + self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname)) + self.compareFileToBackup(path, backupp) + + def test_change_method(self): + testcases = { + "change_method": [ + { + 'iface': 'eth1', + 'option': 'method', + 'value': 'dhcp', + 'state': 'present', + } + ], + } + for testname, options_list in testcases.items(): + for testfile in self.getTestFiles(): + with tempfile.NamedTemporaryFile() as temp_file: + src_path = os.path.join(fixture_path, testfile) + path = temp_file.name + shutil.copy(src_path, path) + lines, ifaces = interfaces_file.read_interfaces_file(module, path) + backupp = module.backup_local(path) + options = options_list[0] + fail_json_iterations = [] + try: + changed, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'], options['option'], + options['value'], options['state']) + # When a changed is made try running it again for proper idempotency + if changed: + changed_again, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'], + options['option'], options['value'], options['state']) + self.assertFalse(changed_again, + msg='Second request for change should return false for {0} running on {1}'.format(testname, + testfile)) + except AnsibleFailJson as e: + fail_json_iterations.append("fail_json message: %s\noptions:\n%s" % + (str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')))) + interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path) + + self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname)) + + self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname)) + self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname)) + # Restore backup + move(backupp, path) + + def test_inet_inet6(self): + testcases = { + "change_ipv4": [ + { + 'iface': 'eth0', + 'address_family': 'inet', + 'option': 'address', + 'value': '192.168.0.42', + 'state': 'present', + } + ], + "change_ipv6": [ + { + 'iface': 'eth0', + 'address_family': 'inet6', + 'option': 'address', + 'value': 'fc00::42', + 'state': 'present', + } + ], + "change_ipv4_pre_up": [ + { + 'iface': 'eth0', + 'address_family': 'inet', + 'option': 'pre-up', + 'value': 'XXXX_ipv4', + 'state': 'present', + } + ], + "change_ipv6_pre_up": [ + { + 'iface': 'eth0', + 'address_family': 'inet6', + 'option': 'pre-up', + 'value': 'XXXX_ipv6', + 'state': 'present', + } + ], + "change_ipv4_post_up": [ + { + 'iface': 'eth0', + 'address_family': 'inet', + 'option': 'post-up', + 'value': 'XXXX_ipv4', + 'state': 'present', + } + ], + "change_ipv6_post_up": [ + { + 'iface': 'eth0', + 'address_family': 'inet6', + 'option': 'post-up', + 'value': 'XXXX_ipv6', + 'state': 'present', + } + ], + } + for testname, options_list in testcases.items(): + for testfile in self.getTestFiles(): + with tempfile.NamedTemporaryFile() as temp_file: + src_path = os.path.join(fixture_path, testfile) + path = temp_file.name + shutil.copy(src_path, path) + lines, ifaces = interfaces_file.read_interfaces_file(module, path) + backupp = module.backup_local(path) + options = options_list[0] + fail_json_iterations = [] + try: + _, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'], options['option'], + options['value'], options['state'], options['address_family']) + except AnsibleFailJson as e: + fail_json_iterations.append("fail_json message: %s\noptions:\n%s" % + (str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')))) + interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path) + + self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname)) + + self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname)) + self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname)) + # Restore backup + move(backupp, path) diff --git a/tests/unit/modules/system/test_mount.py b/tests/unit/modules/system/test_mount.py new file mode 100644 index 0000000..f25e6e1 --- /dev/null +++ b/tests/unit/modules/system/test_mount.py @@ -0,0 +1,25 @@ +import os +import tempfile + +from ansible_collections.ansible.posix.tests.unit.compat import unittest +from ansible.module_utils._text import to_bytes + +from ansible_collections.ansible.posix.plugins.modules.mount import get_linux_mounts + + +class LinuxMountsTestCase(unittest.TestCase): + + def _create_file(self, content): + tmp_file = tempfile.NamedTemporaryFile(prefix='ansible-test-', delete=False) + tmp_file.write(to_bytes(content)) + tmp_file.close() + self.addCleanup(os.unlink, tmp_file.name) + return tmp_file.name + + def test_code_comment(self): + path = self._create_file( + '140 136 253:2 /rootfs / rw - ext4 /dev/sdb2 rw\n' + '141 140 253:2 /rootfs/tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw\n' + ) + mounts = get_linux_mounts(None, path) + self.assertEqual(mounts['/tmp/bbb']['src'], '/tmp/aaa') diff --git a/tests/unit/modules/utils.py b/tests/unit/modules/utils.py new file mode 100644 index 0000000..322a472 --- /dev/null +++ b/tests/unit/modules/utils.py @@ -0,0 +1,47 @@ +import json + +from ansible_collections.ansible.posix.tests.unit.compat import unittest +from ansible_collections.ansible.posix.tests.unit.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes + + +def set_module_args(args): + if '_ansible_remote_tmp' not in args: + args['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in args: + args['_ansible_keep_remote_files'] = False + + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) + + +class AnsibleExitJson(Exception): + pass + + +class AnsibleFailJson(Exception): + pass + + +def exit_json(*args, **kwargs): + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class ModuleTestCase(unittest.TestCase): + + def setUp(self): + self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module.start() + self.mock_sleep = patch('time.sleep') + self.mock_sleep.start() + set_module_args({}) + self.addCleanup(self.mock_module.stop) + self.addCleanup(self.mock_sleep.stop) diff --git a/tests/unit/plugins/__init__.py b/tests/unit/plugins/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/plugins/action/__init__.py b/tests/unit/plugins/action/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/plugins/action/fixtures/__init__.py b/tests/unit/plugins/action/fixtures/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/plugins/action/fixtures/synchronize/__init__.py b/tests/unit/plugins/action/fixtures/synchronize/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic/meta.yaml b/tests/unit/plugins/action/fixtures/synchronize/basic/meta.yaml new file mode 100644 index 0000000..7608ebf --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic/meta.yaml @@ -0,0 +1,17 @@ +fixtures: + taskvars_in: taskvars_in.json + taskvars_out: taskvars_out.json +connection: + transport: 'ssh' +hostvars: + '127.0.0.1': {} + '::1': {} + 'localhost': {} +asserts: + - "hasattr(SAM._connection, 'ismock')" + - "SAM._connection.transport == 'local'" + - "self._play_context.shell == 'sh'" + - "self.execute_called" + - "self.final_module_args['_local_rsync_path'] == 'rsync'" + - "self.final_module_args['src'] == '/tmp/deleteme'" + - "self.final_module_args['dest'] == 'root@el6host:/tmp/deleteme'" diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic/task_args_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic/task_args_out.json new file mode 100644 index 0000000..93ae7bb --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic/task_args_out.json @@ -0,0 +1,5 @@ +{ + "dest": "root@el6host:/tmp/deleteme", + "src": "/tmp/deleteme", + "_local_rsync_path": "rsync" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic/taskvars_in.json b/tests/unit/plugins/action/fixtures/synchronize/basic/taskvars_in.json new file mode 100644 index 0000000..9eb5d50 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic/taskvars_in.json @@ -0,0 +1,151 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "ansible_connection": "smart", + "ansible_ssh_common_args": "", + "environment": [], + "inventory_hostname": "el6host", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "el6host", + "inventory_file": "inventory", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "play_hosts": [ + "el6host" + ], + "ansible_play_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__03600813b83569c710bf5cb2a040d6e01da927c6", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__03600813b83569c710bf5cb2a040d6e01da927c6", + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "ansible_ssh_host": "el6host", + "ansible_current_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__03600813b83569c710bf5cb2a040d6e01da927c6", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": "inventory", + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "ansible_host": "el6host", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "el6host", + "omit": "__omit_place_holder__03600813b83569c710bf5cb2a040d6e01da927c6", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "role_names": [], + "play_hosts": [ + "el6host" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic/taskvars_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic/taskvars_out.json new file mode 100644 index 0000000..01ebee2 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic/taskvars_out.json @@ -0,0 +1,156 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "ansible_connection": "smart", + "ansible_ssh_common_args": "", + "environment": [], + "inventory_hostname": "el6host", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "el6host", + "inventory_file": "inventory", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "play_hosts": [ + "el6host" + ], + "ansible_play_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "::1" + ], + "all": [ + "el6host", + "::1" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__03600813b83569c710bf5cb2a040d6e01da927c6", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__03600813b83569c710bf5cb2a040d6e01da927c6", + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "ansible_ssh_host": "el6host", + "ansible_current_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "::1" + ], + "all": [ + "el6host", + "::1" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__03600813b83569c710bf5cb2a040d6e01da927c6", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": "inventory", + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "ansible_host": "el6host", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "el6host", + "omit": "__omit_place_holder__03600813b83569c710bf5cb2a040d6e01da927c6", + "ansible_python_interpreter": "/usr/bin/python", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "role_names": [], + "play_hosts": [ + "el6host" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_become/meta.yaml b/tests/unit/plugins/action/fixtures/synchronize/basic_become/meta.yaml new file mode 100644 index 0000000..1ba3b4a --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_become/meta.yaml @@ -0,0 +1,38 @@ +task_args: + src: /tmp/deleteme + dest: /tmp/deleteme + #rsync_path: rsync +_task: + become: True + become_method: None +fixtures: + taskvars_in: task_vars_in.json + taskvars_out: task_vars_out.json +connection: + transport: 'ssh' +_play_context: + become: True + become_method: sudo + remote_addr: el6host + remote_user: root +hostvars: + '127.0.0.1': {} + '::1': {} + 'localhost': {} +asserts: + - "hasattr(SAM._connection, 'ismock')" + - "SAM._connection.transport == 'local'" + - "self.execute_called" + - "self.final_module_args['_local_rsync_path'] == 'rsync'" + # this is a crucial aspect of this scenario ... + - "self.final_module_args['rsync_path'] == 'sudo rsync'" + - "self.final_module_args['src'] == '/tmp/deleteme'" + - "self.final_module_args['dest'] == 'root@el6host:/tmp/deleteme'" + - "self.task.become == True" + - "self.task.become_user == None" + - "self._play_context.shell == 'sh'" + - "self._play_context.remote_addr == 'el6host'" + - "self._play_context.remote_user == 'root'" + - "self._play_context.become == False" + - "self._play_context.become_user == 'root'" + - "self._play_context.password == None" diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_become/task_args_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic_become/task_args_out.json new file mode 100644 index 0000000..27f8e25 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_become/task_args_out.json @@ -0,0 +1,6 @@ +{ + "dest": "root@el6host:/tmp/deleteme", + "src": "/tmp/deleteme", + "rsync_path": "sudo rsync", + "_local_rsync_path": "rsync" +} diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_become/task_vars_in.json b/tests/unit/plugins/action/fixtures/synchronize/basic_become/task_vars_in.json new file mode 100644 index 0000000..0f1f978 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_become/task_vars_in.json @@ -0,0 +1,151 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "ansible_connection": "smart", + "ansible_ssh_common_args": "", + "environment": [], + "inventory_hostname": "el6host", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "el6host", + "inventory_file": "inventory", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "play_hosts": [ + "el6host" + ], + "ansible_play_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "ansible_ssh_host": "el6host", + "ansible_current_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": "inventory", + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "ansible_host": "el6host", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "el6host", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "role_names": [], + "play_hosts": [ + "el6host" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_become/task_vars_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic_become/task_vars_out.json new file mode 100644 index 0000000..75abced --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_become/task_vars_out.json @@ -0,0 +1,156 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "ansible_connection": "smart", + "ansible_ssh_common_args": "", + "environment": [], + "inventory_hostname": "el6host", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "el6host", + "inventory_file": "inventory", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "play_hosts": [ + "el6host" + ], + "ansible_play_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "::1" + ], + "all": [ + "el6host", + "::1" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "ansible_ssh_host": "el6host", + "ansible_current_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "::1" + ], + "all": [ + "el6host", + "::1" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": "inventory", + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "ansible_host": "el6host", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "el6host", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "ansible_python_interpreter": "/usr/bin/python", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "role_names": [], + "play_hosts": [ + "el6host" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/meta.yaml b/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/meta.yaml new file mode 100644 index 0000000..5e55801 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/meta.yaml @@ -0,0 +1,38 @@ +task_args: + src: /tmp/deleteme + dest: /tmp/deleteme + #rsync_path: rsync +_task: + become: None + become_method: None +fixtures: + taskvars_in: task_vars_in.json + taskvars_out: task_vars_out.json +connection: + transport: 'ssh' +_play_context: + become: True + become_method: sudo + remote_addr: el6host + remote_user: root +hostvars: + '127.0.0.1': {} + '::1': {} + 'localhost': {} +asserts: + - "hasattr(SAM._connection, 'ismock')" + - "SAM._connection.transport == 'local'" + - "self.execute_called" + - "self.final_module_args['_local_rsync_path'] == 'rsync'" + # this is a crucial aspect of this scenario ... + - "self.final_module_args['rsync_path'] == 'sudo rsync'" + - "self.final_module_args['src'] == '/tmp/deleteme'" + - "self.final_module_args['dest'] == 'root@el6host:/tmp/deleteme'" + - "self.task.become == None" + - "self.task.become_user == None" + - "self._play_context.shell == 'sh'" + - "self._play_context.remote_addr == 'el6host'" + - "self._play_context.remote_user == 'root'" + - "self._play_context.become == False" + - "self._play_context.become_user == 'root'" + - "self._play_context.password == None" diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/task_args_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/task_args_out.json new file mode 100644 index 0000000..27f8e25 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/task_args_out.json @@ -0,0 +1,6 @@ +{ + "dest": "root@el6host:/tmp/deleteme", + "src": "/tmp/deleteme", + "rsync_path": "sudo rsync", + "_local_rsync_path": "rsync" +} diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/task_vars_in.json b/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/task_vars_in.json new file mode 100644 index 0000000..0f1f978 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/task_vars_in.json @@ -0,0 +1,151 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "ansible_connection": "smart", + "ansible_ssh_common_args": "", + "environment": [], + "inventory_hostname": "el6host", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "el6host", + "inventory_file": "inventory", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "play_hosts": [ + "el6host" + ], + "ansible_play_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "ansible_ssh_host": "el6host", + "ansible_current_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": "inventory", + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "ansible_host": "el6host", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "el6host", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "role_names": [], + "play_hosts": [ + "el6host" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/task_vars_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/task_vars_out.json new file mode 100644 index 0000000..75abced --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_become_cli/task_vars_out.json @@ -0,0 +1,156 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "ansible_connection": "smart", + "ansible_ssh_common_args": "", + "environment": [], + "inventory_hostname": "el6host", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "el6host", + "inventory_file": "inventory", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "play_hosts": [ + "el6host" + ], + "ansible_play_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "::1" + ], + "all": [ + "el6host", + "::1" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "ansible_ssh_host": "el6host", + "ansible_current_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "::1" + ], + "all": [ + "el6host", + "::1" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": "inventory", + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "el6host" + ], + "all": [ + "el6host" + ] + }, + "ansible_host": "el6host", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "el6host", + "omit": "__omit_place_holder__b3ac1e6ebeed06f4be0c1edca3dca34036cf7f57", + "ansible_python_interpreter": "/usr/bin/python", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "role_names": [], + "play_hosts": [ + "el6host" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/meta.yaml b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/meta.yaml new file mode 100644 index 0000000..7654cc6 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/meta.yaml @@ -0,0 +1,29 @@ +task_args: + src: /tmp/deleteme + dest: /tmp/deleteme +fixtures: + taskvars_in: task_vars_in.json + taskvars_out: task_vars_out.json +connection: + transport: 'ssh' +_play_context: + remote_addr: '127.0.0.1' + remote_user: vagrant +hostvars: + '127.0.0.1': {} + '::1': {} + 'localhost': {} +asserts: + - "hasattr(SAM._connection, 'ismock')" + - "SAM._connection.transport == 'local'" + - "self.execute_called" + - "self.final_module_args['_local_rsync_path'] == 'rsync'" + - "self.final_module_args['dest_port'] == 2202" + - "self.final_module_args['src'] == '/tmp/deleteme'" + - "self.final_module_args['dest'] == 'vagrant@127.0.0.1:/tmp/deleteme'" + - "self._play_context.shell == 'sh'" + - "self._play_context.remote_addr == '127.0.0.1'" + - "self._play_context.remote_user == 'vagrant'" + - "self._play_context.become == False" + - "self._play_context.become_user == 'root'" + - "self._play_context.password == None" diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/task_args_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/task_args_out.json new file mode 100644 index 0000000..48ea779 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/task_args_out.json @@ -0,0 +1,7 @@ +{ + "dest": "/tmp/deleteme", + "src": "/tmp/deleteme", + "private_key": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "dest_port": 2202, + "_local_rsync_path": "rsync" +} diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/task_vars_in.json b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/task_vars_in.json new file mode 100644 index 0000000..a43a2db --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/task_vars_in.json @@ -0,0 +1,164 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "vagrant", + "ansible_play_hosts": [ + "default" + ], + "ansible_connection": "ssh", + "ansible_ssh_common_args": "", + "ansible_ssh_host": "127.0.0.1", + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "vars": { + "ansible_check_mode": false, + "environment": [], + "inventory_hostname": "default", + "inventory_file": null, + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "play_hosts": [ + "default" + ], + "ansible_play_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__06c4a1b11530cabdf4248804078c1ddacfb88b5e", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__06c4a1b11530cabdf4248804078c1ddacfb88b5e", + "ansible_ssh_port": 2202, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "vagrant" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "environment": [], + "ansible_current_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__06c4a1b11530cabdf4248804078c1ddacfb88b5e", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": null, + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "ansible_port": 2202, + "ansible_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "ansible_host": "127.0.0.1", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "default", + "omit": "__omit_place_holder__06c4a1b11530cabdf4248804078c1ddacfb88b5e", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "vagrant", + "role_names": [], + "play_hosts": [ + "default" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/task_vars_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/task_vars_out.json new file mode 100644 index 0000000..26ea5d4 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant/task_vars_out.json @@ -0,0 +1,169 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "vagrant", + "ansible_play_hosts": [ + "default" + ], + "ansible_connection": "ssh", + "ansible_ssh_common_args": "", + "ansible_ssh_host": "127.0.0.1", + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "vars": { + "ansible_check_mode": false, + "environment": [], + "inventory_hostname": "default", + "inventory_file": null, + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "play_hosts": [ + "default" + ], + "ansible_play_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default", + "::1" + ], + "all": [ + "default", + "::1" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__06c4a1b11530cabdf4248804078c1ddacfb88b5e", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__06c4a1b11530cabdf4248804078c1ddacfb88b5e", + "ansible_ssh_port": 2202, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "vagrant" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "environment": [], + "ansible_current_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default", + "::1" + ], + "all": [ + "default", + "::1" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__06c4a1b11530cabdf4248804078c1ddacfb88b5e", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": null, + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "ansible_port": 2202, + "ansible_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "ansible_host": "127.0.0.1", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "default", + "omit": "__omit_place_holder__06c4a1b11530cabdf4248804078c1ddacfb88b5e", + "ansible_python_interpreter": "/usr/bin/python", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "vagrant", + "role_names": [], + "play_hosts": [ + "default" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/meta.yaml b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/meta.yaml new file mode 100644 index 0000000..242de92 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/meta.yaml @@ -0,0 +1,32 @@ +task: + #become: None +task_args: + src: /tmp/deleteme + dest: /tmp/deleteme +fixtures: + taskvars_in: task_vars_in.json + taskvars_out: task_vars_out.json +connection: + transport: 'ssh' +_play_context: + become: True + remote_addr: '127.0.0.1' + remote_user: vagrant +hostvars: + '127.0.0.1': {} + '::1': {} + 'localhost': {} +asserts: + - "hasattr(SAM._connection, 'ismock')" + - "SAM._connection.transport == 'local'" + - "self.execute_called" + - "self.final_module_args['_local_rsync_path'] == 'rsync'" + - "self.final_module_args['dest_port'] == 2202" + - "self.final_module_args['src'] == '/tmp/deleteme'" + - "self.final_module_args['dest'] == 'vagrant@127.0.0.1:/tmp/deleteme'" + - "self._play_context.shell == 'sh'" + - "self._play_context.remote_addr == '127.0.0.1'" + - "self._play_context.remote_user == 'vagrant'" + - "self._play_context.become == False" + - "self._play_context.become_user == 'root'" + - "self._play_context.password == None" diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/task_args_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/task_args_out.json new file mode 100644 index 0000000..9c77ccf --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/task_args_out.json @@ -0,0 +1,7 @@ +{ + "dest": "/tmp/deleteme", + "src": "/tmp/deleteme", + "private_key": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "dest_port": 2202, + "_local_rsync_path": "rsync" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/task_vars_in.json b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/task_vars_in.json new file mode 100644 index 0000000..1ecbda5 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/task_vars_in.json @@ -0,0 +1,164 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "vagrant", + "ansible_play_hosts": [ + "default" + ], + "ansible_connection": "ssh", + "ansible_ssh_common_args": "", + "ansible_ssh_host": "127.0.0.1", + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "vars": { + "ansible_check_mode": false, + "environment": [], + "inventory_hostname": "default", + "inventory_file": null, + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "play_hosts": [ + "default" + ], + "ansible_play_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "ansible_ssh_port": 2202, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "vagrant" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "environment": [], + "ansible_current_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": null, + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "ansible_port": 2202, + "ansible_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "ansible_host": "127.0.0.1", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "default", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "vagrant", + "role_names": [], + "play_hosts": [ + "default" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/task_vars_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/task_vars_out.json new file mode 100644 index 0000000..a1cf5f2 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_become_cli/task_vars_out.json @@ -0,0 +1,169 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "vagrant", + "ansible_play_hosts": [ + "default" + ], + "ansible_connection": "ssh", + "ansible_ssh_common_args": "", + "ansible_ssh_host": "127.0.0.1", + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "vars": { + "ansible_check_mode": false, + "environment": [], + "inventory_hostname": "default", + "inventory_file": null, + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "play_hosts": [ + "default" + ], + "ansible_play_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default", + "::1" + ], + "all": [ + "default", + "::1" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "ansible_ssh_port": 2202, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "vagrant" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "environment": [], + "ansible_current_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default", + "::1" + ], + "all": [ + "default", + "::1" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": null, + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "ansible_port": 2202, + "ansible_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "ansible_host": "127.0.0.1", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "default", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "ansible_python_interpreter": "/usr/bin/python", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "vagrant", + "role_names": [], + "play_hosts": [ + "default" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/meta.yaml b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/meta.yaml new file mode 100644 index 0000000..7654cc6 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/meta.yaml @@ -0,0 +1,29 @@ +task_args: + src: /tmp/deleteme + dest: /tmp/deleteme +fixtures: + taskvars_in: task_vars_in.json + taskvars_out: task_vars_out.json +connection: + transport: 'ssh' +_play_context: + remote_addr: '127.0.0.1' + remote_user: vagrant +hostvars: + '127.0.0.1': {} + '::1': {} + 'localhost': {} +asserts: + - "hasattr(SAM._connection, 'ismock')" + - "SAM._connection.transport == 'local'" + - "self.execute_called" + - "self.final_module_args['_local_rsync_path'] == 'rsync'" + - "self.final_module_args['dest_port'] == 2202" + - "self.final_module_args['src'] == '/tmp/deleteme'" + - "self.final_module_args['dest'] == 'vagrant@127.0.0.1:/tmp/deleteme'" + - "self._play_context.shell == 'sh'" + - "self._play_context.remote_addr == '127.0.0.1'" + - "self._play_context.remote_user == 'vagrant'" + - "self._play_context.become == False" + - "self._play_context.become_user == 'root'" + - "self._play_context.password == None" diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/task_args_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/task_args_out.json new file mode 100644 index 0000000..9c77ccf --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/task_args_out.json @@ -0,0 +1,7 @@ +{ + "dest": "/tmp/deleteme", + "src": "/tmp/deleteme", + "private_key": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "dest_port": 2202, + "_local_rsync_path": "rsync" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/task_vars_in.json b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/task_vars_in.json new file mode 100644 index 0000000..1ecbda5 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/task_vars_in.json @@ -0,0 +1,164 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "vagrant", + "ansible_play_hosts": [ + "default" + ], + "ansible_connection": "ssh", + "ansible_ssh_common_args": "", + "ansible_ssh_host": "127.0.0.1", + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "vars": { + "ansible_check_mode": false, + "environment": [], + "inventory_hostname": "default", + "inventory_file": null, + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "play_hosts": [ + "default" + ], + "ansible_play_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "ansible_ssh_port": 2202, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "vagrant" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "environment": [], + "ansible_current_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": null, + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "ansible_port": 2202, + "ansible_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "ansible_host": "127.0.0.1", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "default", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "vagrant", + "role_names": [], + "play_hosts": [ + "default" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/task_vars_out.json b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/task_vars_out.json new file mode 100644 index 0000000..a1cf5f2 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/basic_vagrant_sudo/task_vars_out.json @@ -0,0 +1,169 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "vagrant", + "ansible_play_hosts": [ + "default" + ], + "ansible_connection": "ssh", + "ansible_ssh_common_args": "", + "ansible_ssh_host": "127.0.0.1", + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "vars": { + "ansible_check_mode": false, + "environment": [], + "inventory_hostname": "default", + "inventory_file": null, + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "play_hosts": [ + "default" + ], + "ansible_play_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default", + "::1" + ], + "all": [ + "default", + "::1" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "ansible_ssh_port": 2202, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "vagrant" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "environment": [], + "ansible_current_hosts": [ + "default" + ], + "hostvars": { + "default": { + "inventory_file": null, + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "default", + "::1" + ], + "all": [ + "default", + "::1" + ] + }, + "ansible_ssh_port": 2202, + "inventory_hostname": "default", + "ansible_ssh_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "inventory_hostname_short": "default", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "ansible_ssh_host": "127.0.0.1", + "ansible_ssh_user": "vagrant", + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": null, + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "ansible_port": 2202, + "ansible_private_key_file": "/home/jtanner/workspace/issues/AP-15905/.vagrant/machines/default/virtualbox/private_key", + "groups": { + "ungrouped": [ + "default" + ], + "all": [ + "default" + ] + }, + "ansible_ssh_port": 2202, + "ansible_host": "127.0.0.1", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "default", + "omit": "__omit_place_holder__c360b80aa60ddd99087425dcd3a2094cdd5b8474", + "ansible_python_interpreter": "/usr/bin/python", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905/.vagrant/provisioners/ansible/inventory", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "vagrant", + "role_names": [], + "play_hosts": [ + "default" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/meta.yaml b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/meta.yaml new file mode 100644 index 0000000..1c55b28 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/meta.yaml @@ -0,0 +1,26 @@ +fixtures: + taskvars_in: task_vars_in.json + taskvars_out: task_vars_out.json +task_args: + src: /tmp/deleteme + dest: /tmp/deleteme +_task: + delegate_to: u1404 +_play_context: + shell: None + remote_addr: u1404 + remote_user: root +connection: + transport: 'ssh' +hostvars: + '127.0.0.1': {} + '::1': {} + 'localhost': {} +asserts: + - "hasattr(SAM._connection, 'ismock')" + - "SAM._connection.transport == 'ssh'" + - "self._play_context.shell == None" + - "self.execute_called" + - "self.final_module_args['_local_rsync_path'] == 'rsync'" + - "self.final_module_args['src'] == '/tmp/deleteme'" + - "self.final_module_args['dest'] == 'root@el6host:/tmp/deleteme'" diff --git a/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/task_args_out.json b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/task_args_out.json new file mode 100644 index 0000000..7b2ac02 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/task_args_out.json @@ -0,0 +1,5 @@ +{ + "dest": "el6host:/tmp/deleteme", + "src": "/tmp/deleteme", + "_local_rsync_path": "rsync" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/task_vars_in.json b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/task_vars_in.json new file mode 100644 index 0000000..b8893ee --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/task_vars_in.json @@ -0,0 +1,379 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "ansible_connection": "smart", + "ansible_ssh_common_args": "", + "environment": [], + "inventory_hostname": "el6host", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "el6host", + "inventory_file": "inventory", + "ansible_delegated_vars": { + "u1404": { + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "vars": { + "inventory_file": "inventory", + "role_names": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "ansible_ssh_user": "root", + "group_names": [ + "ungrouped" + ], + "play_hosts": [ + "el6host" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "group_names": [ + "ungrouped" + ], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "ansible_host": "u1404", + "environment": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "play_hosts": [ + "el6host" + ], + "role_names": [], + "ansible_port": null, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + } + }, + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "play_hosts": [ + "el6host" + ], + "ansible_play_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "u1404": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "ansible_ssh_host": "u1404", + "ansible_current_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "u1404": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": "inventory", + "ansible_delegated_vars": { + "u1404": { + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "ansible_ssh_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "group_names": [ + "ungrouped" + ], + "play_hosts": [ + "el6host" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "role_names": [] + }, + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "group_names": [ + "ungrouped" + ], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "ansible_host": "u1404", + "environment": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "play_hosts": [ + "el6host" + ], + "role_names": [], + "ansible_port": null, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + } + }, + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_host": "u1404", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "el6host", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "role_names": [], + "play_hosts": [ + "el6host" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/task_vars_out.json b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/task_vars_out.json new file mode 100644 index 0000000..0b551f0 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote/task_vars_out.json @@ -0,0 +1,387 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "ansible_connection": "smart", + "ansible_ssh_common_args": "", + "environment": [], + "inventory_hostname": "el6host", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "el6host", + "inventory_file": "inventory", + "ansible_delegated_vars": { + "u1404": { + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "vars": { + "inventory_file": "inventory", + "role_names": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "ansible_ssh_user": "root", + "group_names": [ + "ungrouped" + ], + "play_hosts": [ + "el6host" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "group_names": [ + "ungrouped" + ], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "ansible_host": "u1404", + "environment": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "play_hosts": [ + "el6host" + ], + "role_names": [], + "ansible_port": null, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + } + }, + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "play_hosts": [ + "el6host" + ], + "ansible_play_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404", + "::1" + ], + "all": [ + "el6host", + "u1404", + "::1" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "u1404": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404", + "::1" + ], + "all": [ + "el6host", + "u1404", + "::1" + ] + }, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "ansible_ssh_host": "u1404", + "ansible_current_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404", + "::1" + ], + "all": [ + "el6host", + "u1404", + "::1" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "u1404": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404", + "::1" + ], + "all": [ + "el6host", + "u1404", + "::1" + ] + }, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": "inventory", + "ansible_delegated_vars": { + "u1404": { + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "ansible_ssh_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "group_names": [ + "ungrouped" + ], + "play_hosts": [ + "el6host" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "role_names": [] + }, + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "group_names": [ + "ungrouped" + ], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "ansible_host": "u1404", + "environment": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "play_hosts": [ + "el6host" + ], + "role_names": [], + "ansible_port": null, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + } + }, + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_host": "u1404", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "el6host", + "omit": "__omit_place_holder__2433ce0463ffd13b68850ce9cdd98a1cde088e22", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "role_names": [], + "play_hosts": [ + "el6host" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/meta.yaml b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/meta.yaml new file mode 100644 index 0000000..28e35c9 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/meta.yaml @@ -0,0 +1,33 @@ +fixtures: + taskvars_in: task_vars_in.json + taskvars_out: task_vars_out.json +task_args: + src: /tmp/deleteme + dest: /tmp/deleteme +_task: + delegate_to: u1404 +_play_context: + become: True + become_user: None #if ! None|root, different testcase + become_method: su + shell: None + remote_addr: u1404 + remote_user: root +connection: + transport: 'ssh' +hostvars: + '127.0.0.1': {} + '::1': {} + 'localhost': {} +asserts: + - "hasattr(SAM._connection, 'ismock')" + - "SAM._connection.transport == 'ssh'" + - "self._play_context.shell == None" + - "self._play_context.remote_addr == 'u1404'" + - "self._play_context.remote_user == 'root'" + - "not self._play_context.become" + - "self._play_context.become_method == 'su'" + - "self.execute_called" + - "self.final_module_args['_local_rsync_path'] == 'rsync'" + - "self.final_module_args['src'] == '/tmp/deleteme'" + - "self.final_module_args['dest'] == 'root@el6host:/tmp/deleteme'" diff --git a/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/task_args_out.json b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/task_args_out.json new file mode 100644 index 0000000..57257db --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/task_args_out.json @@ -0,0 +1,6 @@ +{ + "dest": "el6host:/tmp/deleteme", + "src": "/tmp/deleteme", + "rsync_path": "sudo rsync", + "_local_rsync_path": "rsync" +} diff --git a/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/task_vars_in.json b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/task_vars_in.json new file mode 100644 index 0000000..634bcb3 --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/task_vars_in.json @@ -0,0 +1,379 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "ansible_connection": "smart", + "ansible_ssh_common_args": "", + "environment": [], + "inventory_hostname": "el6host", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "el6host", + "inventory_file": "inventory", + "ansible_delegated_vars": { + "u1404": { + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "vars": { + "inventory_file": "inventory", + "role_names": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "ansible_ssh_user": "root", + "group_names": [ + "ungrouped" + ], + "play_hosts": [ + "el6host" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "group_names": [ + "ungrouped" + ], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "ansible_host": "u1404", + "environment": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "play_hosts": [ + "el6host" + ], + "role_names": [], + "ansible_port": null, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + } + }, + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "play_hosts": [ + "el6host" + ], + "ansible_play_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "u1404": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "ansible_ssh_host": "u1404", + "ansible_current_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "u1404": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": "inventory", + "ansible_delegated_vars": { + "u1404": { + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "ansible_ssh_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "group_names": [ + "ungrouped" + ], + "play_hosts": [ + "el6host" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "role_names": [] + }, + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "group_names": [ + "ungrouped" + ], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "ansible_host": "u1404", + "environment": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "play_hosts": [ + "el6host" + ], + "role_names": [], + "ansible_port": null, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + } + }, + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_host": "u1404", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "el6host", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "role_names": [], + "play_hosts": [ + "el6host" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/task_vars_out.json b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/task_vars_out.json new file mode 100644 index 0000000..54707ce --- /dev/null +++ b/tests/unit/plugins/action/fixtures/synchronize/delegate_remote_su/task_vars_out.json @@ -0,0 +1,387 @@ +{ + "ansible_pipelining": false, + "ansible_docker_extra_args": "", + "ansible_scp_extra_args": "", + "ansible_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "ansible_connection": "smart", + "ansible_ssh_common_args": "", + "environment": [], + "inventory_hostname": "el6host", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "el6host", + "inventory_file": "inventory", + "ansible_delegated_vars": { + "u1404": { + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "vars": { + "inventory_file": "inventory", + "role_names": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "ansible_ssh_user": "root", + "group_names": [ + "ungrouped" + ], + "play_hosts": [ + "el6host" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "group_names": [ + "ungrouped" + ], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "ansible_host": "u1404", + "environment": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "play_hosts": [ + "el6host" + ], + "role_names": [], + "ansible_port": null, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + } + }, + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "role_names": [], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "play_hosts": [ + "el6host" + ], + "ansible_play_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404", + "::1" + ], + "all": [ + "el6host", + "u1404", + "::1" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "u1404": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404", + "::1" + ], + "all": [ + "el6host", + "u1404", + "::1" + ] + }, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "group_names": [ + "ungrouped" + ], + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + }, + "ansible_accelerate_port": 5099, + "roledir": null, + "ansible_ssh_extra_args": "", + "ansible_ssh_host": "u1404", + "ansible_current_hosts": [ + "el6host" + ], + "hostvars": { + "el6host": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404", + "::1" + ], + "all": [ + "el6host", + "u1404", + "::1" + ] + }, + "inventory_hostname": "el6host", + "inventory_hostname_short": "el6host", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + }, + "u1404": { + "inventory_file": "inventory", + "group_names": [ + "ungrouped" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404", + "::1" + ], + "all": [ + "el6host", + "u1404", + "::1" + ] + }, + "inventory_hostname": "u1404", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "ansible_check_mode": false, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + } + } + }, + "group_names": [ + "ungrouped" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_pipelining": false, + "inventory_file": "inventory", + "ansible_delegated_vars": { + "u1404": { + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "vars": { + "ansible_check_mode": false, + "inventory_hostname": "u1404", + "inventory_file": "inventory", + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "environment": [], + "ansible_ssh_user": "root", + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "group_names": [ + "ungrouped" + ], + "play_hosts": [ + "el6host" + ], + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "role_names": [] + }, + "inventory_hostname_short": "u1404", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "group_names": [ + "ungrouped" + ], + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "ansible_host": "u1404", + "environment": [], + "ansible_play_hosts": [ + "el6host" + ], + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_check_mode": false, + "play_hosts": [ + "el6host" + ], + "role_names": [], + "ansible_port": null, + "ansible_version": { + "major": 2, + "full": "2.2.0", + "string": "2.2.0", + "minor": 2, + "revision": 0 + }, + "ansible_ssh_user": "root" + } + }, + "ansible_module_compression": "ZIP_DEFLATED", + "ansible_failed_hosts": [], + "ansible_check_mode": false, + "groups": { + "ungrouped": [ + "el6host", + "u1404" + ], + "all": [ + "el6host", + "u1404" + ] + }, + "ansible_host": "u1404", + "ansible_shell_executable": "/bin/sh", + "inventory_hostname_short": "el6host", + "omit": "__omit_place_holder__32a8706cee222390e0d92197fb49cc967bfafb57", + "inventory_dir": "/home/jtanner/workspace/issues/AP-15905", + "playbook_dir": "/home/jtanner/workspace/issues/AP-15905", + "ansible_ssh_user": "root", + "role_names": [], + "play_hosts": [ + "el6host" + ], + "ansible_sftp_extra_args": "" +} \ No newline at end of file diff --git a/tests/unit/plugins/action/test_synchronize.py b/tests/unit/plugins/action/test_synchronize.py new file mode 100644 index 0000000..4f78928 --- /dev/null +++ b/tests/unit/plugins/action/test_synchronize.py @@ -0,0 +1,264 @@ +''' +(Epdb) pprint(DeepDiff(self.final_task_vars, out_task_vars), indent=2) +{ 'dic_item_added': set([u"root['ansible_python_interpreter']"]), + 'dic_item_removed': set([ u"root['hostvars']['127.0.0.1']", + u"root['hostvars']['::1']", + u"root['hostvars']['localhost']"]), + 'iterable_item_added': { u"root['hostvars']['el6host']['groups']['all'][1]": u'::1', + u"root['hostvars']['el6host']['groups']['ungrouped'][1]": u'::1', + u"root['vars']['hostvars']['el6host']['groups']['all'][1]": u'::1', + u"root['vars']['hostvars']['el6host']['groups']['ungrouped'][1]": u'::1'}} +''' + +import json +import os +import unittest +import yaml + +import ansible.plugins +from ansible_collections.ansible.posix.tests.unit.compat.mock import patch, MagicMock +from ansible_collections.ansible.posix.plugins.action.synchronize import ActionModule + + +# Getting the incoming and outgoing task vars from the plugin's run method + +''' +import copy +safe_vars = {} +for k,v in task_vars.items(): + if k not in ['vars', 'hostvars']: + safe_vars[k] = copy.deepcopy(v) + else: + sdata = str(v) + newv = eval(sdata) + safe_vars[k] = newv + +import json +with open('task_vars.json', 'wb') as f: + f.write(json.dumps(safe_vars, indent=2)) +''' + + +class BreakPoint(Exception): + pass + + +class TaskMock(object): + args = {'src': u'/tmp/deleteme', + 'dest': '/tmp/deleteme', + 'rsync_path': 'rsync'} + async_val = None + become = None + become_user = None + become_method = None + + +class StdinMock(object): + shell = None + + +class ConnectionMock(object): + ismock = True + _play_context = None + # transport = 'ssh' + transport = None + _new_stdin = StdinMock() + + get_option = MagicMock(return_value='root') + + # my shell + _shell = MagicMock() + _shell.mkdtemp.return_value = 'mkdir command' + _shell.join_path.side_effect = os.path.join + _shell.get_option = MagicMock(return_value=['root', 'toor']) + + +class PlayContextMock(object): + shell = None + private_key_file = None + become = False + become_user = 'root' + become_method = None + check_mode = False + no_log = None + diff = None + remote_addr = None + remote_user = None + password = None + + +class ModuleLoaderMock(object): + def find_plugin(self, module_name, mod_type): + pass + + +class SharedLoaderMock(object): + module_loader = ModuleLoaderMock() + + +class SynchronizeTester(object): + + ''' A wrapper for mocking out synchronize environments ''' + + task = TaskMock() + connection = ConnectionMock() + _play_context = PlayContextMock() + loader = None + templar = None + shared_loader_obj = SharedLoaderMock() + + final_task_vars = None + execute_called = False + + def _execute_module(self, module_name, module_args=None, task_vars=None): + self.execute_called = True + self.final_module_args = module_args + self.final_task_vars = task_vars + return {} + + def runtest(self, fixturepath='fixtures/synchronize/basic'): + + metapath = os.path.join(fixturepath, 'meta.yaml') + with open(metapath, 'rb') as f: + fdata = f.read() + test_meta = yaml.load(fdata) + + # load initial play context vars + if '_play_context' in test_meta: + if test_meta['_play_context']: + self.task.args = {} + for (k, v) in test_meta['_play_context'].items(): + if v == 'None': + v = None + setattr(self._play_context, k, v) + + # load initial task context vars + if '_task' in test_meta: + if test_meta['_task']: + self.task.args = {} + for (k, v) in test_meta['_task'].items(): + # import epdb; epdb.st() + if v == 'None': + v = None + setattr(self.task, k, v) + + # load initial task vars + if 'task_args' in test_meta: + if test_meta['task_args']: + self.task.args = {} + for (k, v) in test_meta['task_args'].items(): + self.task.args[k] = v + + # load initial task vars + invarspath = os.path.join(fixturepath, test_meta.get('fixtures', {}).get('taskvars_in', 'taskvars_in.json')) + with open(invarspath, 'rb') as f: + fdata = f.read() + fdata = fdata.decode("utf-8") + in_task_vars = json.loads(fdata) + + # load expected final task vars + outvarspath = os.path.join(fixturepath, test_meta.get('fixtures', {}).get('taskvars_out', 'taskvars_out.json')) + with open(outvarspath, 'rb') as f: + fdata = f.read() + fdata = fdata.decode("utf-8") + out_task_vars = json.loads(fdata) + + # fixup the connection + for (k, v) in test_meta['connection'].items(): + setattr(self.connection, k, v) + + # fixup the hostvars + if test_meta['hostvars']: + for (k, v) in test_meta['hostvars'].items(): + in_task_vars['hostvars'][k] = v + + # initialize and run the module + SAM = ActionModule(self.task, self.connection, self._play_context, + self.loader, self.templar, self.shared_loader_obj) + SAM._execute_module = self._execute_module + result = SAM.run(task_vars=in_task_vars) + + # run assertions + for check in test_meta['asserts']: + value = eval(check) + # if not value: + # print(check, value) + # import epdb; epdb.st() + assert value, check + + +class FakePluginLoader(object): + mocked = True + + @staticmethod + def get(transport, play_context, new_stdin): + conn = ConnectionMock() + conn.transport = transport + conn._play_context = play_context + conn._new_stdin = new_stdin + return conn + + +class TestSynchronizeAction(unittest.TestCase): + + fixturedir = os.path.dirname(__file__) + fixturedir = os.path.join(fixturedir, 'fixtures', 'synchronize') + # print(basedir) + + @patch('ansible_collections.ansible.posix.plugins.action.synchronize.connection_loader', FakePluginLoader) + def test_basic(self): + x = SynchronizeTester() + x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic')) + + @patch('ansible_collections.ansible.posix.plugins.action.synchronize.connection_loader', FakePluginLoader) + def test_basic_become(self): + x = SynchronizeTester() + x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_become')) + + @patch('ansible_collections.ansible.posix.plugins.action.synchronize.connection_loader', FakePluginLoader) + def test_basic_become_cli(self): + # --become on the cli sets _play_context.become + x = SynchronizeTester() + x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_become_cli')) + + @patch('ansible_collections.ansible.posix.plugins.action.synchronize.connection_loader', FakePluginLoader) + def test_basic_vagrant(self): + # simple vagrant example + x = SynchronizeTester() + x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_vagrant')) + + @patch('ansible_collections.ansible.posix.plugins.action.synchronize.connection_loader', FakePluginLoader) + def test_basic_vagrant_sudo(self): + # vagrant plus sudo + x = SynchronizeTester() + x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_vagrant_sudo')) + + @patch('ansible_collections.ansible.posix.plugins.action.synchronize.connection_loader', FakePluginLoader) + def test_basic_vagrant_become_cli(self): + # vagrant plus sudo + x = SynchronizeTester() + x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_vagrant_become_cli')) + + @patch('ansible_collections.ansible.posix.plugins.action.synchronize.connection_loader', FakePluginLoader) + def test_delegate_remote(self): + # delegate to other remote host + x = SynchronizeTester() + x.runtest(fixturepath=os.path.join(self.fixturedir, 'delegate_remote')) + + @patch('ansible_collections.ansible.posix.plugins.action.synchronize.connection_loader', FakePluginLoader) + def test_delegate_remote_su(self): + # delegate to other remote host with su enabled + x = SynchronizeTester() + x.runtest(fixturepath=os.path.join(self.fixturedir, 'delegate_remote_su')) + + @patch.object(ActionModule, '_low_level_execute_command', side_effect=BreakPoint) + @patch.object(ActionModule, '_remote_expand_user', side_effect=ActionModule._remote_expand_user, autospec=True) + def test_remote_user_not_in_local_tmpdir(self, spy_remote_expand_user, ll_ec): + x = SynchronizeTester() + SAM = ActionModule(x.task, x.connection, x._play_context, + x.loader, x.templar, x.shared_loader_obj) + try: + SAM.run(task_vars={'hostvars': {'foo': {}, 'localhost': {}}, 'inventory_hostname': 'foo'}) + except BreakPoint: + pass + self.assertEqual(spy_remote_expand_user.call_count, 0) diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt new file mode 100644 index 0000000..a9772be --- /dev/null +++ b/tests/unit/requirements.txt @@ -0,0 +1,42 @@ +boto3 +placebo +pycrypto +passlib +pypsrp +python-memcached +pytz +pyvmomi +redis +requests +setuptools > 0.6 # pytest-xdist installed via requirements does not work with very old setuptools (sanity_ok) +unittest2 ; python_version < '2.7' +importlib ; python_version < '2.7' +netaddr +ipaddress +netapp-lib +solidfire-sdk-python + +# requirements for F5 specific modules +f5-sdk ; python_version >= '2.7' +f5-icontrol-rest ; python_version >= '2.7' +deepdiff + +# requirement for Fortinet specific modules +pyFMG + +# requirement for aci_rest module +xmljson + +# requirement for winrm connection plugin tests +pexpect + +# requirement for the linode module +linode-python # APIv3 +linode_api4 ; python_version > '2.6' # APIv4 + +# requirement for the gitlab module +python-gitlab +httmock + +# requirment for kubevirt modules +openshift ; python_version >= '2.7'