Initial commit

This commit is contained in:
Jon Ervine 2021-03-03 11:36:53 +08:00
parent a5dc995324
commit 4cdbe9a6d5
137 changed files with 12374 additions and 2 deletions

1
.dockerignore Normal file
View File

@ -0,0 +1 @@
.tox

6
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,6 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.0.0
hooks:
- id: flake8

14
.yamllint Normal file
View File

@ -0,0 +1,14 @@
---
extends: default
ignore: |
.tox
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
line-length: disable

117
.zuul.d/jobs.yaml Normal file
View File

@ -0,0 +1,117 @@
---
- job:
name: ansible-runner-build-container-image-base
parent: ansible-build-container-image
abstract: true
description: Build ansible-runner container image
required-projects:
- name: github.com/ansible/ansible-runner
timeout: 2700
vars:
zuul_work_dir: "{{ zuul.projects['github.com/ansible/ansible-runner'].src_dir }}"
- job:
name: ansible-runner-upload-container-image-base
parent: ansible-upload-container-image
abstract: true
description: Build ansible-runner container image and upload to quay.io
required-projects:
- name: github.com/ansible/ansible-runner
timeout: 2700
vars:
zuul_work_dir: "{{ zuul.projects['github.com/ansible/ansible-runner'].src_dir }}"
- job:
name: ansible-runner-build-container-image
parent: ansible-runner-build-container-image-base
provides: ansible-runner-container-image
requires: ansible-core-container-image
vars: &ansible_runner_image_vars
container_images: &container_images
- context: .
registry: quay.io
repository: quay.io/ansible/ansible-runner
tags:
# If zuul.tag is defined: [ '3', '3.19', '3.19.0' ]. Only works for 3-component tags.
# Otherwise: ['devel']
"{{ zuul.tag is defined | ternary([zuul.get('tag', '').split('.')[0], '.'.join(zuul.get('tag', '').split('.')[:2]), zuul.get('tag', '')], ['devel']) }}"
docker_images: *container_images
- job:
name: ansible-runner-upload-container-image
parent: ansible-runner-upload-container-image-base
provides: ansible-runner-container-image
requires: ansible-core-container-image
vars: *ansible_runner_image_vars
- job:
name: ansible-runner-build-container-image-stable-2.10
parent: ansible-runner-build-container-image-base
provides: ansible-runner-stable-2.10-container-image
requires:
- python-base-container-image
- python-builder-container-image
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.10
vars: &ansible_runner_image_vars_stable_2_10
container_images: &container_images_stable_2_10
- context: .
build_args:
- ANSIBLE_BRANCH=stable-2.10
# NOTE(pabelanger): There is no ansible-core 2.10, so we have to build ansible-base 2.10 ourself.
- ANSIBLE_CORE_IMAGE=quay.io/ansible/python-base:latest
registry: quay.io
repository: quay.io/ansible/ansible-runner
siblings:
- github.com/ansible/ansible
tags: ['stable-2.10-devel']
docker_images: *container_images_stable_2_10
- job:
name: ansible-runner-upload-container-image-stable-2.10
parent: ansible-runner-upload-container-image-base
provides: ansible-runner-stable-2.10-container-image
requires:
- python-base-container-image
- python-builder-container-image
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.10
vars: *ansible_runner_image_vars_stable_2_10
- job:
name: ansible-runner-build-container-image-stable-2.9
parent: ansible-runner-build-container-image-base
provides: ansible-runner-stable-2.9-container-image
requires:
- python-base-container-image
- python-builder-container-image
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.9
vars: &ansible_runner_image_vars_stable_2_9
container_images: &container_images_stable_2_9
- context: .
build_args:
- ANSIBLE_BRANCH=stable-2.9
# NOTE(pabelanger): There is no ansible-core 2.9, so we have to build ansible 2.9 ourself.
- ANSIBLE_CORE_IMAGE=quay.io/ansible/python-base:latest
registry: quay.io
repository: quay.io/ansible/ansible-runner
siblings:
- github.com/ansible/ansible
tags: ['stable-2.9-devel']
docker_images: *container_images_stable_2_9
- job:
name: ansible-runner-upload-container-image-stable-2.9
parent: ansible-runner-upload-container-image-base
provides: ansible-runner-stable-2.9-container-image
requires:
- python-base-container-image
- python-builder-container-image
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.9
vars: *ansible_runner_image_vars_stable_2_9

34
.zuul.d/project.yaml Normal file
View File

@ -0,0 +1,34 @@
---
- project:
check:
jobs:
- ansible-runner-build-container-image
- ansible-runner-build-container-image-stable-2.9
- ansible-runner-build-container-image-stable-2.10
gate:
jobs:
- ansible-runner-build-container-image
- ansible-runner-build-container-image-stable-2.9
- ansible-runner-build-container-image-stable-2.10
post:
jobs:
- ansible-runner-upload-container-image:
vars:
upload_container_image_promote: false
- ansible-runner-upload-container-image-stable-2.9:
vars:
upload_container_image_promote: false
- ansible-runner-upload-container-image-stable-2.10:
vars:
upload_container_image_promote: false
periodic:
jobs:
- ansible-runner-upload-container-image:
vars:
upload_container_image_promote: false
- ansible-runner-upload-container-image-stable-2.9:
vars:
upload_container_image_promote: false
- ansible-runner-upload-container-image-stable-2.10:
vars:
upload_container_image_promote: false

194
CHANGES.rst Normal file
View File

@ -0,0 +1,194 @@
.. :changelog:
Changelog
---------
1.4.6 (2020-03-26)
++++++++++++++++++
- Fixed a bug that broke Ansible playbook execution prior to version 2.8 of
Ansible.
1.4.5 (2020-03-19)
++++++++++++++++++
- Fix an issue with --process_isoloation_*_ paths parsing cli args
- Switch default docker images to centos:8
- Switch extravar format so we can support more than just string types
- Make sure job events folder is created earlier to prevent errors when
using immediately after starting a runner job
- Annotate all runner_on events with start/end/duration times
1.4.4 (2019-10-25)
++++++++++++++++++
- Fix some unicode issues when using command line override on python 2.7
- Fix an issue with file handles not closing on the pexpect pty
- Fix missing ssh_key parameter from module interface
- Fix a bug where the ssh agent process would hang around after process
isolation exit causing a failure to remove temporary directories
1.4.2 (2019-10-04)
++++++++++++++++++
- Reverted ansible-runner --version to print a semantic version number
1.4.1 (2019-10-03)
++++++++++++++++++
- Fixed a bug that prevented ANSIBLE_HOST_KEY_CHECKING from being respected
1.4.0 (2019-09-20)
++++++++++++++++++
- Added changed count to stats data
- Added initial support for gathering performance statistics using
the system's cgroup interface
- Fix command line args override missing from module run kwargs
- Omit inventory argument entirely if no inventory content is supplied
this allows ansible to pick up inventory from implicit locations and
ansible.cfg
- Fix an issue where Runner wouldn't properly clean up process isolation
temporary directories
- Fix error generated if unsafe parameter is used on vars prompt tasks
- Fix an issue where additional callback plugins weren't being used when
defined in the environment
- Fix an issue where Runner would stop returning events after the playbook
finished when using run_async
- Fix an issue where unicode in task data would cause Runner to fail
- Fix issues using vaulted data that would cause Runner to fail
- Fix an issue where artifact-dir was only allowed in ad-hoc mode
1.3.4 (2019-04-25)
++++++++++++++++++
- Removed an explicit version pin of the six library (which is unavailable in
certain Linux distributions).
- Fixed an event handling bug in the callback plugin in Ansible2.9+
1.3.3 (2019-04-22)
++++++++++++++++++
- Fix various issues involving unicode input and output
- Fix an issue where cancelling execution could cause an error rather
than assigning the proper return code and exiting cleanly
- Fix various errors that would cause Runner to silently exit if some
dependencies weren't met or some commands weren't available
- Fix an issue where the job_events directory wasn't created and would result
in no output for non-ansible commands
1.3.2 (2019-04-10)
++++++++++++++++++
- Add direct support for forks and environment variable in parameterization
- Fix a bug where unicode in playbooks would cause a crash
- Fix a bug where unicode in environment variables would cause a crash
- Capture command and cwd as part of the artifacts delivered for the job
- Automatically remove process isolation temp directories
- Fail more gracefully if ansible and/or bubblewrap isn't available at startup
- Fix an issue where `verbose` events would be delayed until the end of execution
1.3.1 (2019-03-27)
++++++++++++++++++
- Fixes to make default file permissions much more secure (0600)
- Adding git to the reference container image to support galaxy requests
1.3.0 (2019-03-20)
++++++++++++++++++
- Add support for directory isolation
- Add Debian packaging support
- Add fact caching support
- Add process isolation configuration in the settings file
- Fix event and display issues related to alternative Ansible strategies
- Add Runner config reference to status handler callback
- Add some more direct access to various ansible command line arguments
- Adding playbook stats for "ignored" and "rescued"
- Fix loading of some ansible resources from outside of the private data
directory (such as projects/playbooks)
- Fix handling of artifact dir when specified outside of the private data
directory
- Fix an issue where the stdout handle wasn't closed and not all data
would be flushed
- Fixed extravar loading behavior
- Added support for resolving parent events by associating their event uuid
as parent_uuid
- Allow PYTHONPATH to be overridden
- Expand support for executing non-ansible tools
1.2.0 (2018-12-19)
++++++++++++++++++
- Add support for runner_on_start from Ansible 2.8
- Fix thread race condition issues in event gathering
- Add Code Of Conduct
- Fix an issue where the "running" status wouldn't be emitted to the
status callback
- Add process isolation support via bubblewrap
- Fix an issue with orphaned file descriptors
- Add ability to suppress ansible output from the module interface
1.1.2 (2018-10-18)
++++++++++++++++++
- Fix an issue where ssh sock path could be too long
- Fix an issue passing extra vars as dictionaries via the interface
- Fix an issue where stdout was delayed on buffering which also caused
stdout not to be available if the task was canceled or failed
- Fix role-path parameter not being honored when given on the command line
Also fixed up unit tests to actually surface this error if it comes back
- Fully onboard Zuul-CI for unit and integration testing
1.1.1 (2018-09-13)
++++++++++++++++++
- Fix an issue when attaching PYTHONPATH environment variable
- Allow selecting a different ansible binary with the RUNNER_BINARY
- Fix --inventory command line arguments
- Fix some issues related to terminating ansible
- Add runner ident to to the event processing callback
- Adding integration tests and improving unit tests
1.1.0 (2018-08-16)
++++++++++++++++++
- Added a feature that supports sending ansible status and events to external systems via a plugin
interface
- Added support for Runner module users to receive runtime status changes in the form of a callback
that can be supplied to the run() methods (or passing it directly on Runner initialization)
- Fix an issue where timeout settings were far too short
- Add a new status and return code to indicate Runner timeout occurred.
- Add support for running ad-hoc commands (direct module invocation, ala ansible vs ansible-playbook)
- Fix an issue that caused missing data in events sent to the event handler(s)
- Adding support for supplying role_path in module interface
- Fix an issue where messages would still be emitted when --quiet was used
- Fix a bug where ansible processes could be orphaned after canceling a job
- Fix a bug where calling the Runner stats method would fail on python 3
- Fix a bug where direct execution of roles couldn't be daemonized
- Fix a bug where relative paths couldn't be used when calling start vs run
1.0.5 (2018-07-23)
++++++++++++++++++
- Fix a bug that could cause a hang if unicode environment variables are used
- Allow select() to be used instead of poll() when invoking pexpect
- Check for the presence of Ansible before executing
- Fix an issue where a missing project directory would cause Runner to fail silently
- Add support for automatic cleanup/rotation of artifact directories
- Adding support for Runner module users to receive events in the form of a callback
that can be supplied to the run() methods (or passing it directly on Runner initialization)
- Adding support for Runner module users to provide a callback that will be invoked when the
Runner Ansible process has finished. This can be supplied to the run() methods (or passing it
directly on Runner initialization).
1.0.4 (2018-06-29)
++++++++++++++++++
- Adding support for pexpect 4.6 for performance and efficiency improvements
- Adding support for launching roles directly
- Adding support for changing the output mode to json instead of vanilla Ansible (-j)
- Adding arguments to increase ansible verbosity (-v[vvv]) and quiet mode (-q)
- Adding support for overriding the artifact directory location
- Adding the ability to pass arbitrary arguments to the invocation of Ansible
- Improving debug and verbose output
- Various fixes for broken python 2/3 compatibility, including the event generator in the python module
- Fixing a bug when providing an ssh key via the private directory interface
- Fixing bugs that prevented Runner from working on MacOS
- Fixing a bug that caused issues when providing extra vars via the private dir interface

38
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,38 @@
# Ansible Runner Contributing Guidelines
Hi there! We're excited to have you as a contributor.
If you have questions about this document or anything not covered here? Come chat with us `#ansible-awx` on irc.freenode.net
## Things to know prior to submitting code
- All code and doc submissions are done through pull requests against the `devel` branch.
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
## Setting up your development environment
Ansible Runner development is powered by [Poetry](https://python-poetry.org/), make sure you have it [installed](https://python-poetry.org/docs/#installation) and then:
```bash
(host)$ poetry install
```
This will automatically setup the development environment under a virtualenv, which you can then switch to with:
```bash
(host)$ poetry shell
```
## Linting and Unit Tests
`tox` is used to run linters (`flake8` and `yamllint`) and unit tests on both Python 2 and 3. It uses poetry to bootstrap these two environments.
## A note about setup.py
In this repository you will find a [`setup.py` file](https://docs.python.org/3/installing/index.html#installing-index),
for the time being this needs to be kept in-sync by hand until [Poetry](https://python-poetry.org/).
has the ability to generate a `setup.py` in a compatible way *or* the built-in
Poetry tooling allows for the `packaging/poetry-gen-setup.py` to be sufficient
for auto-generation. This is necessary as it allows the Ansible Runner codebase
to be compatible with build and release systems that do not yet support Poetry.

57
Dockerfile Normal file
View File

@ -0,0 +1,57 @@
ARG ANSIBLE_CORE_IMAGE=quay.io/ansible/ansible-core:latest
ARG PYTHON_BUILDER_IMAGE=quay.io/ansible/python-builder:latest
FROM $PYTHON_BUILDER_IMAGE as builder
# =============================================================================
ARG ANSIBLE_BRANCH=""
ARG ZUUL_SIBLINGS=""
COPY . /tmp/src
RUN if [ "$ANSIBLE_BRANCH" != "" ] ; then \
echo "Installing requirements.txt / upper-constraints.txt for Ansible $ANSIBLE_BRANCH" ; \
cp /tmp/src/tools/bindep-$ANSIBLE_BRANCH.txt /tmp/src/bindep.txt ; \
cp /tmp/src/tools/requirements-$ANSIBLE_BRANCH.txt /tmp/src/requirements.txt ; \
cp /tmp/src/tools/upper-constraints-$ANSIBLE_BRANCH.txt /tmp/src/upper-constraints.txt ; \
fi
RUN assemble
FROM $ANSIBLE_CORE_IMAGE as ansible-core
# =============================================================================
COPY --from=builder /output/ /output
RUN /output/install-from-bindep \
&& rm -rf /output
# Prepare the /runner folder, seed the folder with demo data
ADD demo /runner
# In OpenShift, container will run as a random uid number and gid 0. Make sure things
# are writeable by the root group.
RUN for dir in \
/home/runner \
/home/runner/.ansible \
/home/runner/.ansible/tmp \
/runner \
/home/runner \
/runner/env \
/runner/inventory \
/runner/project \
/runner/artifacts ; \
do mkdir -m 0775 -p $dir ; chmod -R g+rwx $dir ; chgrp -R root $dir ; done && \
for file in \
/home/runner/.ansible/galaxy_token \
/etc/passwd \
/etc/group ; \
do touch $file ; chmod g+rw $file ; chgrp root $file ; done
VOLUME /runner
WORKDIR /runner
ENV HOME=/home/runner
ADD utils/entrypoint.sh /bin/entrypoint
RUN chmod +x /bin/entrypoint
ENTRYPOINT ["entrypoint"]
CMD ["ansible-runner", "run", "/runner"]

38
Jenkinsfile vendored Normal file
View File

@ -0,0 +1,38 @@
podTemplate(yaml: """
kind: Pod
spec:
containers:
- name: kaniko
image: gcr.io/kaniko-project/executor:debug-539ddefcae3fd6b411a95982a830d987f4214251
imagePullPolicy: Always
command:
- /busybox/cat
tty: true
volumeMounts:
- name: jenkins-docker-cfg
mountPath: /kaniko/.docker
volumes:
- name: jenkins-docker-cfg
projected:
sources:
- secret:
name: regcred
items:
- key: .dockerconfigjson
path: config.json
"""
) {
node(POD_LABEL) {
stage('Build with Kaniko') {
git url: 'ssh://git@git.ervine.org/jonny/ansible-runner.git', credentialsId: 'jenkins-to-git'
container('kaniko') {
sh '/kaniko/executor -f `pwd`/Dockerfile -c `pwd` --cache=true --destination=harbor.ervine.dev/public/x86_64/ansible-runner:15.0.0'
}
}
stage('Notify gchat') {
hangoutsNotify message: "Ansible Runner container has built",token: "A2ET831pVslqXTqAx6ycu573r",threadByJob: false
}
}
}

168
LICENSE.md Normal file
View File

@ -0,0 +1,168 @@
Apache License
==============
_Version 2.0, January 2004_
_&lt;<http://www.apache.org/licenses/>&gt;_
### Terms and Conditions for use, reproduction, and distribution
#### 1. Definitions
“License” shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
“Licensor” shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
“Legal Entity” shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, “control” means **(i)** the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
outstanding shares, or **(iii)** beneficial ownership of such entity.
“You” (or “Your”) shall mean an individual or Legal Entity exercising
permissions granted by this License.
“Source” form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
“Object” form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
“Work” shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
“Derivative Works” shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
“Contribution” shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
“submitted” means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as “Not a Contribution.”
“Contributor” shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
#### 2. Grant of Copyright License
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
#### 3. Grant of Patent License
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
#### 4. Redistribution
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
* **(a)** You must give any other recipients of the Work or Derivative Works a copy of
this License; and
* **(b)** You must cause any modified files to carry prominent notices stating that You
changed the files; and
* **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
#### 5. Submission of Contributions
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
#### 6. Trademarks
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
#### 7. Disclaimer of Warranty
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
#### 8. Limitation of Liability
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
#### 9. Accepting Warranty or Additional Liability
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.

4
MANIFEST.in Normal file
View File

@ -0,0 +1,4 @@
include README.md
include LICENSE.md
include Makefile

198
Makefile Normal file
View File

@ -0,0 +1,198 @@
PYTHON ?= python
ifeq ($(origin VIRTUAL_ENV), undefined)
DIST_PYTHON ?= poetry run $(PYTHON)
else
DIST_PYTHON ?= $(PYTHON)
endif
CONTAINER_ENGINE ?= docker
BASE_IMAGE ?= docker.io/fedora:32
NAME = ansible-runner
IMAGE_NAME ?= quay.io/ansible/ansible-runner
IMAGE_NAME_STRIPPED := $(word 1,$(subst :, ,$(IMAGE_NAME)))
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
ANSIBLE_BRANCH ?= devel
ANSIBLE_VERSIONS ?= stable-2.9 stable-2.10 devel
PIP_NAME = ansible_runner
LONG_VERSION := $(shell poetry version)
VERSION := $(filter-out $(NAME), $(LONG_VERSION))
ifeq ($(OFFICIAL),yes)
RELEASE ?= 1
else
ifeq ($(origin RELEASE), undefined)
RELEASE := 0.git$(shell date -u +%Y%m%d%H).$(shell git rev-parse --short HEAD)
endif
endif
# RPM build variables
MOCK_BIN ?= mock
MOCK_CONFIG ?= epel-7-x86_64
RPM_NVR = $(NAME)-$(VERSION)-$(RELEASE)$(RPM_DIST)
RPM_DIST ?= $(shell rpm --eval '%{?dist}' 2>/dev/null)
RPM_ARCH ?= $(shell rpm --eval '%{_arch}' 2>/dev/null)
# Provide a fallback value for RPM_ARCH
ifeq ($(RPM_ARCH),)
RPM_ARCH = $(shell uname -m)
endif
# Debian Packaging
DEBUILD_BIN ?= debuild
DEBUILD_OPTS ?=
DPUT_BIN ?= dput
DPUT_OPTS ?=
GPG_KEY_ID ?=
ifeq ($(origin GPG_SIGNING_KEY), undefined)
GPG_SIGNING_KEY = /dev/null
endif
ifeq ($(OFFICIAL),yes)
# Sign official builds
DEBUILD_OPTS += -k$(GPG_KEY_ID)
else
# Do not sign unofficial builds
DEBUILD_OPTS += -uc -us
endif
DEBUILD = $(DEBUILD_BIN) $(DEBUILD_OPTS)
DEB_PPA ?= mini_dinstall
DEB_ARCH ?= amd64
DEB_NVR = $(NAME)_$(VERSION)-$(RELEASE)~$(DEB_DIST)
DEB_NVRA = $(DEB_NVR)_$(DEB_ARCH)
DEB_NVRS = $(DEB_NVR)_source
DEB_TAR_NAME=$(NAME)-$(VERSION)
DEB_TAR_FILE=$(NAME)_$(VERSION).orig.tar.gz
DEB_DATE := $(shell LC_TIME=C date +"%a, %d %b %Y %T %z")
.PHONY: test clean dist sdist dev shell image devimage rpm srpm docs deb debian deb-src
clean:
rm -rf dist
rm -rf build
rm -rf ansible-runner.egg-info
rm -rf rpm-build
rm -rf deb-build
find . -type f -regex ".*\py[co]$$" -delete
rm -rf $(shell find test/ -type d -name "artifacts")
dist:
poetry build
sdist: dist/$(NAME)-$(VERSION).tar.gz
# Generate setup.py transiently for the sdist so we don't have to deal with
# packaging poetry as a RPM for rpm build time dependencies.
dist/$(NAME)-$(VERSION).tar.gz:
$(DIST_PYTHON) setup.py sdist
dev:
poetry install
shell:
poetry shell
test:
tox
docs:
cd docs && make html
image: sdist
$(CONTAINER_ENGINE) pull $(BASE_IMAGE)
$(CONTAINER_ENGINE) build --rm=true \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg RUNNER_VERSION=$(VERSION) \
--build-arg ANSIBLE_BRANCH=$(ANSIBLE_BRANCH) \
-t $(IMAGE_NAME) -f Dockerfile .
$(CONTAINER_ENGINE) tag $(IMAGE_NAME) $(IMAGE_NAME_STRIPPED):$(GIT_BRANCH)
image_matrix:
for version in $(ANSIBLE_VERSIONS) ; do \
ANSIBLE_BRANCH=$$version GIT_BRANCH=$$version.$(GIT_BRANCH) make image ; \
done
$(CONTAINER_ENGINE) tag $(IMAGE_NAME) $(IMAGE_NAME_STRIPPED):$(GIT_BRANCH)
image_matrix_publish:
for version in $(ANSIBLE_VERSIONS) ; do \
$(CONTAINER_ENGINE) push $(IMAGE_NAME_STRIPPED):$$version.$(GIT_BRANCH) ; \
done
$(CONTAINER_ENGINE) push $(IMAGE_NAME_STRIPPED):$(GIT_BRANCH)
rpm:
MOCK_CONFIG=$(MOCK_CONFIG) docker-compose -f packaging/rpm/docker-compose.yml build
MOCK_CONFIG=$(MOCK_CONFIG) docker-compose -f packaging/rpm/docker-compose.yml \
run --rm -e RELEASE=$(RELEASE) rpm-builder "make mock-rpm"
srpm:
MOCK_CONFIG=$(MOCK_CONFIG) docker-compose -f packaging/rpm/docker-compose.yml build
MOCK_CONFIG=$(MOCK_CONFIG) docker-compose -f packaging/rpm/docker-compose.yml \
run --rm -e RELEASE=$(RELEASE) rpm-builder "make mock-srpm"
mock-rpm: rpm-build/$(RPM_NVR).$(RPM_ARCH).rpm
rpm-build/$(RPM_NVR).$(RPM_ARCH).rpm: rpm-build/$(RPM_NVR).src.rpm
$(MOCK_BIN) -r $(MOCK_CONFIG) --arch=noarch \
--resultdir=rpm-build \
--rebuild rpm-build/$(RPM_NVR).src.rpm
mock-srpm: rpm-build/$(RPM_NVR).src.rpm
rpm-build/$(RPM_NVR).src.rpm: dist/$(NAME)-$(VERSION).tar.gz rpm-build rpm-build/$(NAME).spec
$(MOCK_BIN) -r $(MOCK_CONFIG) --arch=noarch \
--resultdir=rpm-build \
--spec=rpm-build/$(NAME).spec \
--sources=rpm-build \
--buildsrpm
rpm-build/$(NAME).spec:
ansible -c local -i localhost, all \
-m template \
-a "src=packaging/rpm/$(NAME).spec.j2 dest=rpm-build/$(NAME).spec" \
-e version=$(VERSION) \
-e release=$(RELEASE)
rpm-build: sdist
mkdir -p $@
cp dist/$(NAME)-$(VERSION).tar.gz rpm-build/$(NAME)-$(VERSION)-$(RELEASE).tar.gz
deb:
docker-compose -f packaging/debian/docker/docker-compose.yml \
run --rm \
-e OFFICIAL=$(OFFICIAL) -e RELEASE=$(RELEASE) \
-e GPG_KEY_ID=$(GPG_KEY_ID) -e GPG_SIGNING_KEY=$(GPG_SIGNING_KEY) \
deb-builder "make debian"
ifeq ($(OFFICIAL),yes)
debian: gpg-import deb-build/$(DEB_NVRA).deb
gpg-import:
gpg --import /signing_key.asc
else
debian: deb-build/$(DEB_NVRA).deb
endif
deb-src: deb-build/$(DEB_NVR).dsc
deb-build/$(DEB_NVRA).deb: deb-build/$(DEB_NVR).dsc
cd deb-build/$(NAME)-$(VERSION) && $(DEBUILD) -b
deb-build/$(DEB_NVR).dsc: deb-build/$(NAME)-$(VERSION)
cd deb-build/$(NAME)-$(VERSION) && $(DEBUILD) -S
deb-build/$(NAME)-$(VERSION): dist/$(NAME)-$(VERSION).tar.gz
mkdir -p $(dir $@)
@if [ "$(OFFICIAL)" != "yes" ] ; then \
tar -C deb-build/ -xvf dist/$(NAME)-$(VERSION).tar.gz ; \
cd deb-build && tar czf $(DEB_TAR_FILE) $(NAME)-$(VERSION) ; \
else \
cp -a dist/$(NAME)-$(VERSION).tar.gz deb-build/$(DEB_TAR_FILE) ; \
fi
cd deb-build && tar -xf $(DEB_TAR_FILE)
cp -a packaging/debian deb-build/$(NAME)-$(VERSION)/
sed -ie "s|%VERSION%|$(VERSION)|g;s|%RELEASE%|$(RELEASE)|g;s|%DATE%|$(DEB_DATE)|g" $@/debian/changelog
print-%:
@echo $($*)

View File

@ -1,3 +1,22 @@
# ansible-runner
Ansible Runner
==============
Customised Ansible runner container
[![PyPi](https://img.shields.io/pypi/v/ansible-runner.svg?logo=Python)](https://pypi.org/project/ansible-runner/)
[![Documentation](https://readthedocs.org/projects/ansible-runner/badge/?version=stable)](https://ansible-runner.readthedocs.io/en/latest/)
[![Code of Conduct](https://img.shields.io/badge/Code%20of%20Conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
[![Ansible Mailing lists](https://img.shields.io/badge/Mailing%20lists-Ansible-orange.svg)](https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information)
Ansible Runner is a tool and python library that helps when interfacing with Ansible directly or as part of another system whether that be through a container image interface, as a standalone tool, or as a Python module that can be imported. The goal is to provide a stable and consistent interface abstraction to Ansible.
For the latest documentation see: [https://ansible-runner.readthedocs.io](https://ansible-runner.readthedocs.io/en/latest/)
Get Involved
============
* We use [GitHub issues](https://github.com/ansible/ansible-runner/issues) to track bug report and feature ideas...
* ... and [GitHub Milestones](https://github.com/ansible/ansible-runner/milestones) to track what's for the next release
* Want to contribute, check out our [guide](CONTRIBUTING.md)
* Join us in the `#ansible-runner` channel on Freenode IRC
* Join the discussion in [awx-project](https://groups.google.com/forum/#!forum/awx-project)
* For the full list of Ansible email Lists, IRC channels see the [Ansible Mailing lists](https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information)

View File

@ -0,0 +1,13 @@
import pkg_resources
from .interface import run, run_async # noqa
from .exceptions import AnsibleRunnerException, ConfigurationError, CallbackError # noqa
from .runner_config import RunnerConfig # noqa
from .runner import Runner # noqa
plugins = {
entry_point.name: entry_point.load()
for entry_point
in pkg_resources.iter_entry_points('ansible_runner.plugins')
}

959
ansible_runner/__main__.py Normal file
View File

@ -0,0 +1,959 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
import pkg_resources
import threading
import traceback
import argparse
import logging
import signal
import sys
import errno
import json
import stat
import os
import shutil
import textwrap
import tempfile
import atexit
from contextlib import contextmanager
from uuid import uuid4
from yaml import safe_load
from ansible_runner import run
from ansible_runner import output
from ansible_runner.utils import dump_artifact, Bunch
from ansible_runner.runner import Runner
from ansible_runner.exceptions import AnsibleRunnerException
VERSION = pkg_resources.require("ansible_runner")[0].version
DEFAULT_ROLES_PATH = os.getenv('ANSIBLE_ROLES_PATH', None)
DEFAULT_RUNNER_BINARY = os.getenv('RUNNER_BINARY', None)
DEFAULT_RUNNER_PLAYBOOK = os.getenv('RUNNER_PLAYBOOK', None)
DEFAULT_RUNNER_ROLE = os.getenv('RUNNER_ROLE', None)
DEFAULT_RUNNER_MODULE = os.getenv('RUNNER_MODULE', None)
DEFAULT_UUID = uuid4()
DEFAULT_CLI_ARGS = {
"positional_args": (
(
('private_data_dir',),
dict(
help="base directory containing the ansible-runner metadata "
"(project, inventory, env, etc)"
),
),
),
"generic_args": (
(
('--version',),
dict(
action='version',
version=VERSION
),
),
(
("--debug",),
dict(
action="store_true",
help="enable ansible-runner debug output logging (default=False)"
),
),
(
("--logfile",),
dict(
help="log output messages to a file (default=None)"
),
),
),
"mutually_exclusive_group": (
(
("-p", "--playbook",),
dict(
default=DEFAULT_RUNNER_PLAYBOOK,
help="invoke an Ansible playbook from the ansible-runner project "
"(See Ansible Playbook Options below)"
),
),
(
("-m", "--module",),
dict(
default=DEFAULT_RUNNER_MODULE,
help="invoke an Ansible module directly without a playbook "
"(See Ansible Module Options below)"
),
),
(
("-r", "--role",),
dict(
default=DEFAULT_RUNNER_ROLE,
help="invoke an Ansible role directly without a playbook "
"(See Ansible Role Options below)"
),
),
),
"ansible_group": (
(
("--limit",),
dict(
help="matches Ansible's ```--limit``` parameter to further constrain "
"the inventory to be used (default=None)"
),
),
(
("--cmdline",),
dict(
help="command line options to pass to ansible-playbook at "
"execution time (default=None)"
),
),
(
("--hosts",),
dict(
help="define the set of hosts to execute against (default=None) "
"Note: this parameter only works with -m or -r"
),
),
(
("--forks",),
dict(
help="matches Ansible's ```--forks``` parameter to set the number "
"of concurrent processes (default=None)"
),
),
),
"runner_group": (
# ansible-runner options
(
("-b", "--binary",),
dict(
default=DEFAULT_RUNNER_BINARY,
help="specifies the full path pointing to the Ansible binaries "
"(default={})".format(DEFAULT_RUNNER_BINARY)
),
),
(
("-i", "--ident",),
dict(
default=DEFAULT_UUID,
help="an identifier that will be used when generating the artifacts "
"directory and can be used to uniquely identify a playbook run "
"(default={})".format(DEFAULT_UUID)
),
),
(
("--rotate-artifacts",),
dict(
default=0,
type=int,
help="automatically clean up old artifact directories after a given "
"number have been created (default=0, disabled)"
),
),
(
("--artifact-dir",),
dict(
help="optional path for the artifact root directory "
"(default=<private_data_dir>/artifacts)"
),
),
(
("--project-dir",),
dict(
help="optional path for the location of the playbook content directory "
"(default=<private_data_dir/project)"
),
),
(
("--inventory",),
dict(
help="optional path for the location of the inventory content directory "
"(default=<private_data_dir>/inventory)"
),
),
(
("-j", "--json",),
dict(
action="store_true",
help="output the JSON event structure to stdout instead of "
"Ansible output (default=False)"
),
),
(
("--omit-event-data",),
dict(
action="store_true",
help="Omits including extra event data in the callback payloads "
"or the Runner payload data files "
"(status and stdout still included)"
),
),
(
("--only-failed-event-data",),
dict(
action="store_true",
help="Only adds extra event data for failed tasks in the callback "
"payloads or the Runner payload data files "
"(status and stdout still included for other events)"
),
),
(
("-q", "--quiet",),
dict(
action="store_true",
help="disable all messages sent to stdout/stderr (default=False)"
),
),
(
("-v",),
dict(
action="count",
help="increase the verbosity with multiple v's (up to 5) of the "
"ansible-playbook output (default=None)"
),
),
),
"roles_group": (
(
("--roles-path",),
dict(
default=DEFAULT_ROLES_PATH,
help="path used to locate the role to be executed (default=None)"
),
),
(
("--role-vars",),
dict(
help="set of variables to be passed to the role at run time in the "
"form of 'key1=value1 key2=value2 keyN=valueN'(default=None)"
),
),
(
("--role-skip-facts",),
dict(
action="store_true",
default=False,
help="disable fact collection when the role is executed (default=False)"
),
)
),
"playbook_group": (
(
("--process-isolation",),
dict(
dest="process_isolation",
action="store_true",
help="Isolate execution. Two methods are supported: (1) using a container engine (e.g. podman or docker) "
"to execute **Ansible**. (2) using a sandbox (e.g. bwrap) which will by default restrict access to /tmp "
"(default=False)"
),
),
(
("--process-isolation-executable",),
dict(
dest="process_isolation_executable",
default="podman",
help="Process isolation executable or container engine used to isolate execution. (default=podman)"
)
),
(
("--process-isolation-path",),
dict(
dest="process_isolation_path",
default="/tmp",
help="path that an isolated playbook run will use for staging. "
"(default=/tmp)"
)
),
(
("--process-isolation-hide-paths",),
dict(
dest="process_isolation_hide_paths",
nargs='*',
help="list of paths on the system that should be hidden from the "
"playbook run (default=None)"
)
),
(
("--process-isolation-show-paths",),
dict(
dest="process_isolation_show_paths",
nargs='*',
help="list of paths on the system that should be exposed to the "
"playbook run (default=None)"
)
),
(
("--process-isolation-ro-paths",),
dict(
dest="process_isolation_ro_paths",
nargs='*',
help="list of paths on the system that should be exposed to the "
"playbook run as read-only (default=None)"
)
),
(
("--directory-isolation-base-path",),
dict(
dest="directory_isolation_base_path",
help="copies the project directory to a location in this directory "
"to prevent multiple simultaneous executions from conflicting "
"(default=None)"
)
),
(
("--resource-profiling",),
dict(
dest='resource_profiling',
action="store_true",
help="Records resource utilization during playbook execution"
)
),
(
("--resource-profiling-base-cgroup",),
dict(
dest='resource_profiling_base_cgroup',
default="ansible-runner",
help="Top-level cgroup used to collect information on resource utilization. Defaults to ansible-runner"
)
),
(
("--resource-profiling-cpu-poll-interval",),
dict(
dest='resource_profiling_cpu_poll_interval',
default=0.25,
help="Interval (in seconds) between CPU polling for determining CPU usage. Defaults to 0.25"
)
),
(
("--resource-profiling-memory-poll-interval",),
dict(
dest='resource_profiling_memory_poll_interval',
default=0.25,
help="Interval (in seconds) between memory polling for determining memory usage. Defaults to 0.25"
)
),
(
("--resource-profiling-pid-poll-interval",),
dict(
dest='resource_profiling_pid_poll_interval',
default=0.25,
help="Interval (in seconds) between polling PID count for determining number of processes used. Defaults to 0.25"
)
),
(
("--resource-profiling-results-dir",),
dict(
dest='resource_profiling_results_dir',
help="Directory where profiling data files should be saved. "
"Defaults to None (profiling_data folder under private data dir is used in this case)."
)
)
),
"modules_group": (
(
("-a", "--args",),
dict(
dest='module_args',
help="set of arguments to be passed to the module at run time in the "
"form of 'key1=value1 key2=value2 keyN=valueN'(default=None)"
)
),
),
"container_group": (
(
("--container-image",),
dict(
dest="container_image",
default="quay.io/ansible/ansible-runner:devel",
help="Container image to use when running an ansible task"
)
),
(
("--container-volume-mount",),
dict(
dest="container_volume_mounts",
action='append',
help="Bind mounts (in the form 'host_dir:/container_dir)'. "
"Can be used more than once to create multiple bind mounts."
)
),
(
("--container-option",),
dict(
dest="container_options",
action='append',
help="Container options to pass to execution engine. "
"Can be used more than once to send multiple options."
)
),
),
"execenv_cli_group": (
(
('--container-runtime',),
dict(
dest='container_runtime',
default='podman',
help="OCI Compliant container runtime to use. Examples: podman, docker"
),
),
(
('--keep-files',),
dict(
dest='keep_files',
action='store_true',
default=False,
help="Keep temporary files persistent on disk instead of cleaning them automatically. "
"(Useful for debugging)"
),
),
),
}
logger = logging.getLogger('ansible-runner')
@contextmanager
def role_manager(vargs):
if vargs.get('role'):
role = {'name': vargs.get('role')}
if vargs.get('role_vars'):
role_vars = {}
for item in vargs['role_vars'].split():
key, value = item.split('=')
try:
role_vars[key] = ast.literal_eval(value)
except Exception:
role_vars[key] = value
role['vars'] = role_vars
kwargs = Bunch(**vargs)
kwargs.update(private_data_dir=vargs.get('private_data_dir'),
json_mode=vargs.get('json'),
ignore_logging=False,
project_dir=vargs.get('project_dir'),
rotate_artifacts=vargs.get('rotate_artifacts'))
if vargs.get('artifact_dir'):
kwargs.artifact_dir = vargs.get('artifact_dir')
if vargs.get('project_dir'):
project_path = kwargs.project_dir = vargs.get('project_dir')
else:
project_path = os.path.join(vargs.get('private_data_dir'), 'project')
project_exists = os.path.exists(project_path)
env_path = os.path.join(vargs.get('private_data_dir'), 'env')
env_exists = os.path.exists(env_path)
envvars_path = os.path.join(vargs.get('private_data_dir'), 'env/envvars')
envvars_exists = os.path.exists(envvars_path)
if vargs.get('cmdline'):
kwargs.cmdline = vargs.get('cmdline')
playbook = None
tmpvars = None
play = [{'hosts': vargs.get('hosts') if vargs.get('hosts') is not None else "all",
'gather_facts': not vargs.get('role_skip_facts'),
'roles': [role]}]
filename = str(uuid4().hex)
playbook = dump_artifact(json.dumps(play), project_path, filename)
kwargs.playbook = playbook
output.debug('using playbook file %s' % playbook)
if vargs.get('inventory'):
inventory_file = os.path.join(vargs.get('private_data_dir'), 'inventory', vargs.get('inventory'))
if not os.path.exists(inventory_file):
raise AnsibleRunnerException('location specified by --inventory does not exist')
kwargs.inventory = inventory_file
output.debug('using inventory file %s' % inventory_file)
roles_path = vargs.get('roles_path') or os.path.join(vargs.get('private_data_dir'), 'roles')
roles_path = os.path.abspath(roles_path)
output.debug('setting ANSIBLE_ROLES_PATH to %s' % roles_path)
envvars = {}
if envvars_exists:
with open(envvars_path, 'rb') as f:
tmpvars = f.read()
new_envvars = safe_load(tmpvars)
if new_envvars:
envvars = new_envvars
envvars['ANSIBLE_ROLES_PATH'] = roles_path
kwargs.envvars = envvars
else:
kwargs = vargs
yield kwargs
if vargs.get('role'):
if not project_exists and os.path.exists(project_path):
logger.debug('removing dynamically generated project folder')
shutil.rmtree(project_path)
elif playbook and os.path.isfile(playbook):
logger.debug('removing dynamically generated playbook')
os.remove(playbook)
# if a previous envvars existed in the private_data_dir,
# restore the original file contents
if tmpvars:
with open(envvars_path, 'wb') as f:
f.write(tmpvars)
elif not envvars_exists and os.path.exists(envvars_path):
logger.debug('removing dynamically generated envvars folder')
os.remove(envvars_path)
# since ansible-runner created the env folder, remove it
if not env_exists and os.path.exists(env_path):
logger.debug('removing dynamically generated env folder')
shutil.rmtree(env_path)
def print_common_usage():
print(textwrap.dedent("""
These are common Ansible Runner commands:
execute a playbook contained in an ansible-runner directory:
ansible-runner run /tmp/private -p playbook.yml
ansible-runner start /tmp/private -p playbook.yml
ansible-runner stop /tmp/private
ansible-runner is-alive /tmp/private
directly execute ansible primitives:
ansible-runner run . -r role_name --hosts myhost
ansible-runner run . -m command -a "ls -l" --hosts myhost
run ansible execution environments:
ansible-runner adhoc myhosts -m ping
ansible-runner playbook my_playbook.yml
`ansible-runner --help` list of optional command line arguments
"""))
def add_args_to_parser(parser, args):
"""
Traverse a tuple of argments to add to a parser
:param parser: Instance of a parser, subparser, or argument group
:type sys_args: argparse.ArgumentParser
:param args: Tuple of tuples, format ((arg1, arg2), {'kwarg1':'val1'},)
:type sys_args: tuple
:returns: None
"""
for arg in args:
parser.add_argument(*arg[0], **arg[1])
def main(sys_args=None):
"""Main entry point for ansible-runner executable
When the ```ansible-runner``` command is executed, this function
is the main entry point that is called and executed.
:param sys_args: List of arguments to be parsed by the parser
:type sys_args: list
:returns: an instance of SystemExit
:rtype: SystemExit
"""
parser = argparse.ArgumentParser(
prog='ansible-runner',
description="Use 'ansible-runner' (with no arguments) to see basic usage"
)
subparser = parser.add_subparsers(
help="Command to invoke",
dest='command',
description="COMMAND PRIVATE_DATA_DIR [ARGS]"
)
add_args_to_parser(parser, DEFAULT_CLI_ARGS['generic_args'])
subparser.required = True
# positional options
run_subparser = subparser.add_parser(
'run',
help="Run ansible-runner in the foreground"
)
add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['positional_args'])
start_subparser = subparser.add_parser(
'start',
help="Start an ansible-runner process in the background"
)
add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['positional_args'])
stop_subparser = subparser.add_parser(
'stop',
help="Stop an ansible-runner process that's running in the background"
)
add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['positional_args'])
isalive_subparser = subparser.add_parser(
'is-alive',
help="Check if a an ansible-runner process in the background is still running."
)
add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['positional_args'])
# streaming commands
transmit_subparser = subparser.add_parser(
'transmit',
help="Send a job to a remote ansible-runner process"
)
add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['positional_args'])
worker_subparser = subparser.add_parser(
'worker',
help="Execute work streamed from a controlling instance"
)
worker_subparser.add_argument(
"--private-data-dir",
help="base directory containing the ansible-runner metadata "
"(project, inventory, env, etc)",
)
process_subparser = subparser.add_parser(
'process',
help="Receive the output of remote ansible-runner work and distribute the results"
)
add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['positional_args'])
# adhoc command exec
adhoc_subparser = subparser.add_parser(
'adhoc',
help="Run ansible adhoc commands in an Execution Environment"
)
adhoc_subparser.add_argument(
"--private-data-dir",
help="base directory containing the ansible-runner metadata "
"(project, inventory, env, etc)",
)
add_args_to_parser(adhoc_subparser, DEFAULT_CLI_ARGS['execenv_cli_group'])
# playbook command exec
playbook_subparser = subparser.add_parser(
'playbook',
help="Run ansible-playbook commands in an Execution Environment"
)
playbook_subparser.add_argument(
"--private-data-dir",
help="base directory containing the ansible-runner metadata "
"(project, inventory, env, etc)",
)
add_args_to_parser(playbook_subparser, DEFAULT_CLI_ARGS['execenv_cli_group'])
# generic args for all subparsers
add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(adhoc_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(playbook_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(worker_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['generic_args'])
# runner group
ansible_runner_group_options = (
"Ansible Runner Options",
"configuration options for controlling the ansible-runner "
"runtime environment.",
)
base_runner_group = parser.add_argument_group(*ansible_runner_group_options)
run_runner_group = run_subparser.add_argument_group(*ansible_runner_group_options)
start_runner_group = start_subparser.add_argument_group(*ansible_runner_group_options)
stop_runner_group = stop_subparser.add_argument_group(*ansible_runner_group_options)
isalive_runner_group = isalive_subparser.add_argument_group(*ansible_runner_group_options)
transmit_runner_group = transmit_subparser.add_argument_group(*ansible_runner_group_options)
add_args_to_parser(base_runner_group, DEFAULT_CLI_ARGS['runner_group'])
add_args_to_parser(run_runner_group, DEFAULT_CLI_ARGS['runner_group'])
add_args_to_parser(start_runner_group, DEFAULT_CLI_ARGS['runner_group'])
add_args_to_parser(stop_runner_group, DEFAULT_CLI_ARGS['runner_group'])
add_args_to_parser(isalive_runner_group, DEFAULT_CLI_ARGS['runner_group'])
add_args_to_parser(transmit_runner_group, DEFAULT_CLI_ARGS['runner_group'])
# mutually exclusive group
run_mutually_exclusive_group = run_subparser.add_mutually_exclusive_group()
start_mutually_exclusive_group = start_subparser.add_mutually_exclusive_group()
stop_mutually_exclusive_group = stop_subparser.add_mutually_exclusive_group()
isalive_mutually_exclusive_group = isalive_subparser.add_mutually_exclusive_group()
transmit_mutually_exclusive_group = transmit_subparser.add_mutually_exclusive_group()
add_args_to_parser(run_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])
add_args_to_parser(start_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])
add_args_to_parser(stop_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])
add_args_to_parser(isalive_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])
add_args_to_parser(transmit_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])
# ansible options
ansible_options = (
"Ansible Options",
"control the ansible[-playbook] execution environment",
)
run_ansible_group = run_subparser.add_argument_group(*ansible_options)
start_ansible_group = start_subparser.add_argument_group(*ansible_options)
stop_ansible_group = stop_subparser.add_argument_group(*ansible_options)
isalive_ansible_group = isalive_subparser.add_argument_group(*ansible_options)
transmit_ansible_group = transmit_subparser.add_argument_group(*ansible_options)
add_args_to_parser(run_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
add_args_to_parser(start_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
add_args_to_parser(stop_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
add_args_to_parser(isalive_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
add_args_to_parser(transmit_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
# roles group
roles_group_options = (
"Ansible Role Options",
"configuration options for directly executing Ansible roles",
)
run_roles_group = run_subparser.add_argument_group(*roles_group_options)
start_roles_group = start_subparser.add_argument_group(*roles_group_options)
stop_roles_group = stop_subparser.add_argument_group(*roles_group_options)
isalive_roles_group = isalive_subparser.add_argument_group(*roles_group_options)
transmit_roles_group = transmit_subparser.add_argument_group(*roles_group_options)
add_args_to_parser(run_roles_group, DEFAULT_CLI_ARGS['roles_group'])
add_args_to_parser(start_roles_group, DEFAULT_CLI_ARGS['roles_group'])
add_args_to_parser(stop_roles_group, DEFAULT_CLI_ARGS['roles_group'])
add_args_to_parser(isalive_roles_group, DEFAULT_CLI_ARGS['roles_group'])
add_args_to_parser(transmit_roles_group, DEFAULT_CLI_ARGS['roles_group'])
# modules groups
modules_group_options = (
"Ansible Module Options",
"configuration options for directly executing Ansible modules",
)
run_modules_group = run_subparser.add_argument_group(*modules_group_options)
start_modules_group = start_subparser.add_argument_group(*modules_group_options)
stop_modules_group = stop_subparser.add_argument_group(*modules_group_options)
isalive_modules_group = isalive_subparser.add_argument_group(*modules_group_options)
transmit_modules_group = transmit_subparser.add_argument_group(*modules_group_options)
add_args_to_parser(run_modules_group, DEFAULT_CLI_ARGS['modules_group'])
add_args_to_parser(start_modules_group, DEFAULT_CLI_ARGS['modules_group'])
add_args_to_parser(stop_modules_group, DEFAULT_CLI_ARGS['modules_group'])
add_args_to_parser(isalive_modules_group, DEFAULT_CLI_ARGS['modules_group'])
add_args_to_parser(transmit_modules_group, DEFAULT_CLI_ARGS['modules_group'])
# playbook options
playbook_group_options = (
"Ansible Playbook Options",
"configuation options for executing Ansible playbooks",
)
run_playbook_group = run_subparser.add_argument_group(*playbook_group_options)
start_playbook_group = start_subparser.add_argument_group(*playbook_group_options)
stop_playbook_group = stop_subparser.add_argument_group(*playbook_group_options)
isalive_playbook_group = isalive_subparser.add_argument_group(*playbook_group_options)
transmit_playbook_group = transmit_subparser.add_argument_group(*playbook_group_options)
add_args_to_parser(run_playbook_group, DEFAULT_CLI_ARGS['playbook_group'])
add_args_to_parser(start_playbook_group, DEFAULT_CLI_ARGS['playbook_group'])
add_args_to_parser(stop_playbook_group, DEFAULT_CLI_ARGS['playbook_group'])
add_args_to_parser(isalive_playbook_group, DEFAULT_CLI_ARGS['playbook_group'])
add_args_to_parser(transmit_playbook_group, DEFAULT_CLI_ARGS['playbook_group'])
# container group
container_group_options = (
"Ansible Container Options",
"configuation options for executing Ansible playbooks",
)
run_container_group = run_subparser.add_argument_group(*container_group_options)
start_container_group = start_subparser.add_argument_group(*container_group_options)
stop_container_group = stop_subparser.add_argument_group(*container_group_options)
isalive_container_group = isalive_subparser.add_argument_group(*container_group_options)
transmit_container_group = transmit_subparser.add_argument_group(*container_group_options)
adhoc_container_group = adhoc_subparser.add_argument_group(*container_group_options)
playbook_container_group = playbook_subparser.add_argument_group(*container_group_options)
add_args_to_parser(run_container_group, DEFAULT_CLI_ARGS['container_group'])
add_args_to_parser(start_container_group, DEFAULT_CLI_ARGS['container_group'])
add_args_to_parser(stop_container_group, DEFAULT_CLI_ARGS['container_group'])
add_args_to_parser(isalive_container_group, DEFAULT_CLI_ARGS['container_group'])
add_args_to_parser(transmit_container_group, DEFAULT_CLI_ARGS['container_group'])
add_args_to_parser(adhoc_container_group, DEFAULT_CLI_ARGS['container_group'])
add_args_to_parser(playbook_container_group, DEFAULT_CLI_ARGS['container_group'])
if len(sys.argv) == 1:
parser.print_usage()
print_common_usage()
parser.exit(status=0)
if ('playbook' in sys.argv) or ('adhoc' in sys.argv):
args, leftover_args = parser.parse_known_args(sys_args)
else:
args = parser.parse_args(sys_args)
vargs = vars(args)
# FIXME - Probably a more elegant way to handle this.
# set some state about CLI Exec Env
cli_execenv_cmd = ""
if vargs.get('command') in ('adhoc', 'playbook'):
cli_execenv_cmd = vargs.get('command')
if not leftover_args:
parser.exit(
status=1,
message="The {} subcommand requires arguments to pass to Ansible inside the container.\n".format(
vargs.get('command')
)
)
if vargs.get('command') in ('worker', 'process', 'adhoc', 'playbook'):
if not vargs.get('private_data_dir'):
temp_private_dir = tempfile.mkdtemp()
vargs['private_data_dir'] = temp_private_dir
if vargs.get('keep_files', False):
print("ANSIBLE-RUNNER: keeping temporary data directory: {}".format(temp_private_dir))
else:
@atexit.register
def conditonally_clean_cli_execenv_tempdir():
shutil.rmtree(temp_private_dir)
if vargs.get('command') in ('start', 'run', 'transmit'):
if vargs.get('hosts') and not (vargs.get('module') or vargs.get('role')):
parser.exit(status=1, message="The --hosts option can only be used with -m or -r\n")
if not (vargs.get('module') or vargs.get('role')) and not vargs.get('playbook'):
parser.exit(status=1, message="The -p option must be specified when not using -m or -r\n")
output.configure()
# enable or disable debug mode
output.set_debug('enable' if vargs.get('debug') else 'disable')
# set the output logfile
if ('logfile' in args) and vargs.get('logfile'):
output.set_logfile(vargs.get('logfile'))
output.debug('starting debug logging')
# get the absolute path for start since it is a daemon
vargs['private_data_dir'] = os.path.abspath(vargs.get('private_data_dir'))
pidfile = os.path.join(vargs.get('private_data_dir'), 'pid')
try:
os.makedirs(vargs.get('private_data_dir'), mode=0o700)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(vargs.get('private_data_dir')):
pass
else:
raise
stderr_path = None
context = None
if vargs.get('command') not in ('run', 'transmit', 'worker', 'adhoc', 'playbook'):
stderr_path = os.path.join(vargs.get('private_data_dir'), 'daemon.log')
if not os.path.exists(stderr_path):
os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
if vargs.get('command') in ('start', 'run', 'transmit', 'worker', 'process', 'adhoc', 'playbook'):
if vargs.get('command') == 'start':
import daemon
from daemon.pidfile import TimeoutPIDLockFile
context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
else:
context = threading.Lock()
streamer = None
if vargs.get('command') in ('transmit', 'worker', 'process'):
streamer = vargs.get('command')
with context:
with role_manager(vargs) as vargs:
run_options = dict(private_data_dir=vargs.get('private_data_dir'),
ident=vargs.get('ident'),
binary=vargs.get('binary'),
playbook=vargs.get('playbook'),
module=vargs.get('module'),
module_args=vargs.get('module_args'),
host_pattern=vargs.get('hosts'),
verbosity=vargs.get('v'),
quiet=vargs.get('quiet'),
rotate_artifacts=vargs.get('rotate_artifacts'),
ignore_logging=False,
json_mode=vargs.get('json'),
omit_event_data=vargs.get('omit_event_data'),
only_failed_event_data=vargs.get('only_failed_event_data'),
inventory=vargs.get('inventory'),
forks=vargs.get('forks'),
project_dir=vargs.get('project_dir'),
artifact_dir=vargs.get('artifact_dir'),
roles_path=[vargs.get('roles_path')] if vargs.get('roles_path') else None,
process_isolation=vargs.get('process_isolation'),
process_isolation_executable=vargs.get('process_isolation_executable'),
process_isolation_path=vargs.get('process_isolation_path'),
process_isolation_hide_paths=vargs.get('process_isolation_hide_paths'),
process_isolation_show_paths=vargs.get('process_isolation_show_paths'),
process_isolation_ro_paths=vargs.get('process_isolation_ro_paths'),
container_image=vargs.get('container_image'),
container_volume_mounts=vargs.get('container_volume_mounts'),
container_options=vargs.get('container_options'),
directory_isolation_base_path=vargs.get('directory_isolation_base_path'),
resource_profiling=vargs.get('resource_profiling'),
resource_profiling_base_cgroup=vargs.get('resource_profiling_base_cgroup'),
resource_profiling_cpu_poll_interval=vargs.get('resource_profiling_cpu_poll_interval'),
resource_profiling_memory_poll_interval=vargs.get('resource_profiling_memory_poll_interval'),
resource_profiling_pid_poll_interval=vargs.get('resource_profiling_pid_poll_interval'),
resource_profiling_results_dir=vargs.get('resource_profiling_results_dir'),
limit=vargs.get('limit'),
streamer=streamer,
cli_execenv_cmd=cli_execenv_cmd
)
if vargs.get('command') in ('adhoc', 'playbook'):
run_options['cmdline'] = sys.argv[sys.argv.index(leftover_args[0]):]
run_options['process_isolation']=True
run_options['process_isolation_executable']=vargs.get('container_runtime')
try:
res = run(**run_options)
except Exception:
exc = traceback.format_exc()
if stderr_path:
open(stderr_path, 'w+').write(exc)
else:
sys.stderr.write(exc)
return 1
return(res.rc)
try:
with open(pidfile, 'r') as f:
pid = int(f.readline())
except IOError:
return(1)
if vargs.get('command') == 'stop':
Runner.handle_termination(pid, pidfile=pidfile)
return (0)
elif vargs.get('command') == 'is-alive':
try:
os.kill(pid, signal.SIG_DFL)
return(0)
except OSError:
return(1)

View File

View File

@ -0,0 +1,50 @@
# Copyright (c) 2017 Ansible by Red Hat
#
# This file is part of Ansible Tower, but depends on code imported from Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
DOCUMENTATION = '''
callback: awx_display
short_description: Playbook event dispatcher for ansible-runner
version_added: "2.0"
description:
- This callback is necessary for ansible-runner to work
type: stdout
extends_documentation_fragment:
- default_callback
requirements:
- Set as stdout in config
'''
# Python
import os # noqa
import sys # noqa
# Add awx/lib to sys.path.
awx_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if awx_lib_path not in sys.path:
sys.path.insert(0, awx_lib_path)
# Tower Display Callback
from display_callback import AWXDefaultCallbackModule # noqa
# In order to be recognized correctly, self.__class__.__name__ needs to
# match "CallbackModule"
class CallbackModule(AWXDefaultCallbackModule):
pass

View File

@ -0,0 +1,50 @@
# Copyright (c) 2017 Ansible by Red Hat
#
# This file is part of Ansible Tower, but depends on code imported from Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
DOCUMENTATION = '''
callback: minimal
short_description: Ad hoc event dispatcher for ansible-runner
version_added: "2.0"
description:
- This callback is necessary for ansible-runner to work
type: stdout
extends_documentation_fragment:
- default_callback
requirements:
- Set as stdout in config
'''
# Python
import os # noqa
import sys # noqa
# Add awx/lib to sys.path.
awx_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if awx_lib_path not in sys.path:
sys.path.insert(0, awx_lib_path)
# Tower Display Callback
from display_callback import AWXMinimalCallbackModule # noqa
# In order to be recognized correctly, self.__class__.__name__ needs to
# match "CallbackModule"
class CallbackModule(AWXMinimalCallbackModule):
pass

View File

@ -0,0 +1,2 @@
default_process_isolation_executable = 'podman'
default_container_image = 'quay.io/ansible/ansible-runner:devel'

View File

@ -0,0 +1,24 @@
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# This file is part of Ansible Tower, but depends on code imported from Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
# AWX Display Callback
from . import display # noqa (wraps ansible.display.Display methods)
from .module import AWXDefaultCallbackModule, AWXMinimalCallbackModule
__all__ = ['AWXDefaultCallbackModule', 'AWXMinimalCallbackModule']

View File

@ -0,0 +1,98 @@
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# This file is part of Ansible Tower, but depends on code imported from Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
# Python
import functools
import sys
import uuid
# Ansible
from ansible.utils.display import Display
# Tower Display Callback
from .events import event_context
__all__ = []
def with_context(**context):
global event_context
def wrap(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with event_context.set_local(**context):
return f(*args, **kwargs)
return wrapper
return wrap
for attr in dir(Display):
if attr.startswith('_') or 'cow' in attr or 'prompt' in attr:
continue
if attr in ('display', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv', 'verbose'):
continue
if not callable(getattr(Display, attr)):
continue
setattr(Display, attr, with_context(**{attr: True})(getattr(Display, attr)))
def with_verbosity(f):
global event_context
@functools.wraps(f)
def wrapper(*args, **kwargs):
host = args[2] if len(args) >= 3 else kwargs.get('host', None)
caplevel = args[3] if len(args) >= 4 else kwargs.get('caplevel', 2)
context = dict(verbose=True, verbosity=(caplevel + 1))
if host is not None:
context['remote_addr'] = host
with event_context.set_local(**context):
return f(*args, **kwargs)
return wrapper
Display.verbose = with_verbosity(Display.verbose)
def display_with_context(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
log_only = args[5] if len(args) >= 6 else kwargs.get('log_only', False)
stderr = args[3] if len(args) >= 4 else kwargs.get('stderr', False)
event_uuid = event_context.get().get('uuid', None)
with event_context.display_lock:
# If writing only to a log file or there is already an event UUID
# set (from a callback module method), skip dumping the event data.
if log_only or event_uuid:
return f(*args, **kwargs)
try:
fileobj = sys.stderr if stderr else sys.stdout
event_context.add_local(uuid=str(uuid.uuid4()))
event_context.dump_begin(fileobj)
return f(*args, **kwargs)
finally:
event_context.dump_end(fileobj)
event_context.remove_local(uuid=None)
return wrapper
Display.display = display_with_context(Display.display)

View File

@ -0,0 +1,203 @@
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# This file is part of Ansible Tower, but depends on code imported from Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
# Python
import base64
import contextlib
import datetime
import json
import multiprocessing
import os
import stat
import threading
import uuid
__all__ = ['event_context']
# use a custom JSON serializer so we can properly handle !unsafe and !vault
# objects that may exist in events emitted by the callback plugin
# see: https://github.com/ansible/ansible/pull/38759
class AnsibleJSONEncoderLocal(json.JSONEncoder):
'''
The class AnsibleJSONEncoder exists in Ansible core for this function
this performs a mostly identical function via duck typing
'''
def default(self, o):
if getattr(o, 'yaml_tag', None) == '!vault':
encrypted_form = o._ciphertext
if isinstance(encrypted_form, bytes):
encrypted_form = encrypted_form.decode('utf-8')
return {'__ansible_vault': encrypted_form}
elif isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
return super(AnsibleJSONEncoderLocal, self).default(o)
class IsolatedFileWrite:
'''
Class that will write partial event data to a file
'''
def __init__(self):
self.private_data_dir = os.getenv('AWX_ISOLATED_DATA_DIR')
def set(self, key, value):
# Strip off the leading key identifying characters :1:ev-
event_uuid = key[len(':1:ev-'):]
# Write data in a staging area and then atomic move to pickup directory
filename = '{}-partial.json'.format(event_uuid)
if not os.path.exists(os.path.join(self.private_data_dir, 'job_events')):
os.mkdir(os.path.join(self.private_data_dir, 'job_events'), 0o700)
dropoff_location = os.path.join(self.private_data_dir, 'job_events', filename)
write_location = '.'.join([dropoff_location, 'tmp'])
partial_data = json.dumps(value, cls=AnsibleJSONEncoderLocal)
with os.fdopen(os.open(write_location, os.O_WRONLY | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR), 'w') as f:
f.write(partial_data)
os.rename(write_location, dropoff_location)
class EventContext(object):
'''
Store global and local (per thread/process) data associated with callback
events and other display output methods.
'''
def __init__(self):
self.display_lock = multiprocessing.RLock()
self._local = threading.local()
if os.getenv('AWX_ISOLATED_DATA_DIR', False):
self.cache = IsolatedFileWrite()
def add_local(self, **kwargs):
tls = vars(self._local)
ctx = tls.setdefault('_ctx', {})
ctx.update(kwargs)
def remove_local(self, **kwargs):
for key in kwargs.keys():
self._local._ctx.pop(key, None)
@contextlib.contextmanager
def set_local(self, **kwargs):
try:
self.add_local(**kwargs)
yield
finally:
self.remove_local(**kwargs)
def get_local(self):
return getattr(getattr(self, '_local', None), '_ctx', {})
def add_global(self, **kwargs):
if not hasattr(self, '_global_ctx'):
self._global_ctx = {}
self._global_ctx.update(kwargs)
def remove_global(self, **kwargs):
if hasattr(self, '_global_ctx'):
for key in kwargs.keys():
self._global_ctx.pop(key, None)
@contextlib.contextmanager
def set_global(self, **kwargs):
try:
self.add_global(**kwargs)
yield
finally:
self.remove_global(**kwargs)
def get_global(self):
return getattr(self, '_global_ctx', {})
def get(self):
ctx = {}
ctx.update(self.get_global())
ctx.update(self.get_local())
return ctx
def get_begin_dict(self):
omit_event_data = os.getenv("RUNNER_OMIT_EVENTS", "False").lower() == "true"
include_only_failed_event_data = os.getenv("RUNNER_ONLY_FAILED_EVENTS", "False").lower() == "true"
event_data = self.get()
event = event_data.pop('event', None)
if not event:
event = 'verbose'
for key in ('debug', 'verbose', 'deprecated', 'warning', 'system_warning', 'error'):
if event_data.get(key, False):
event = key
break
event_dict = dict(event=event)
should_process_event_data = (include_only_failed_event_data and event in ('runner_on_failed', 'runner_on_async_failed', 'runner_on_item_failed')) \
or not include_only_failed_event_data
if os.getenv('JOB_ID', ''):
event_dict['job_id'] = int(os.getenv('JOB_ID', '0'))
if os.getenv('AD_HOC_COMMAND_ID', ''):
event_dict['ad_hoc_command_id'] = int(os.getenv('AD_HOC_COMMAND_ID', '0'))
if os.getenv('PROJECT_UPDATE_ID', ''):
event_dict['project_update_id'] = int(os.getenv('PROJECT_UPDATE_ID', '0'))
event_dict['pid'] = event_data.get('pid', os.getpid())
event_dict['uuid'] = event_data.get('uuid', str(uuid.uuid4()))
event_dict['created'] = event_data.get('created', datetime.datetime.utcnow().isoformat())
if not event_data.get('parent_uuid', None):
for key in ('task_uuid', 'play_uuid', 'playbook_uuid'):
parent_uuid = event_data.get(key, None)
if parent_uuid and parent_uuid != event_data.get('uuid', None):
event_dict['parent_uuid'] = parent_uuid
break
else:
event_dict['parent_uuid'] = event_data.get('parent_uuid', None)
if "verbosity" in event_data.keys():
event_dict["verbosity"] = event_data.pop("verbosity")
if not omit_event_data and should_process_event_data:
max_res = int(os.getenv("MAX_EVENT_RES", 700000))
if event not in ('playbook_on_stats',) and "res" in event_data and len(str(event_data['res'])) > max_res:
event_data['res'] = {}
else:
event_data = dict()
event_dict['event_data'] = event_data
return event_dict
def get_end_dict(self):
return {}
def dump(self, fileobj, data, max_width=78, flush=False):
b64data = base64.b64encode(json.dumps(data).encode('utf-8')).decode()
with self.display_lock:
# pattern corresponding to OutputEventFilter expectation
fileobj.write(u'\x1b[K')
for offset in range(0, len(b64data), max_width):
chunk = b64data[offset:offset + max_width]
escaped_chunk = u'{}\x1b[{}D'.format(chunk, len(chunk))
fileobj.write(escaped_chunk)
fileobj.write(u'\x1b[K')
if flush:
fileobj.flush()
def dump_begin(self, fileobj):
begin_dict = self.get_begin_dict()
self.cache.set(":1:ev-{}".format(begin_dict['uuid']), begin_dict)
self.dump(fileobj, {'uuid': begin_dict['uuid']})
def dump_end(self, fileobj):
self.dump(fileobj, self.get_end_dict(), flush=True)
event_context = EventContext()

View File

@ -0,0 +1,29 @@
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# This file is part of Ansible Tower, but depends on code imported from Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
# Python
import os
# Ansible
import ansible
# Because of the way Ansible loads plugins, it's not possible to import
# ansible.plugins.callback.minimal when being loaded as the minimal plugin. Ugh.
minimal_plugin = os.path.join(os.path.dirname(ansible.__file__), 'plugins', 'callback', 'minimal.py')
exec(compile(open(minimal_plugin, "rb").read(), minimal_plugin, 'exec'))

View File

@ -0,0 +1,548 @@
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# This file is part of Ansible Tower, but depends on code imported from Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
# Python
import collections
import contextlib
import datetime
import sys
import uuid
from copy import copy
# Ansible
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.plugins.callback.default import CallbackModule as DefaultCallbackModule
# AWX Display Callback
from .events import event_context
from .minimal import CallbackModule as MinimalCallbackModule
CENSORED = "the output has been hidden due to the fact that 'no_log: true' was specified for this result" # noqa
def current_time():
return datetime.datetime.utcnow()
class BaseCallbackModule(CallbackBase):
'''
Callback module for logging ansible/ansible-playbook events.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
# These events should never have an associated play.
EVENTS_WITHOUT_PLAY = [
'playbook_on_start',
'playbook_on_stats',
]
# These events should never have an associated task.
EVENTS_WITHOUT_TASK = EVENTS_WITHOUT_PLAY + [
'playbook_on_setup',
'playbook_on_notify',
'playbook_on_import_for_host',
'playbook_on_not_import_for_host',
'playbook_on_no_hosts_matched',
'playbook_on_no_hosts_remaining',
]
def __init__(self):
super(BaseCallbackModule, self).__init__()
self._host_start = {}
self.task_uuids = set()
self.duplicate_task_counts = collections.defaultdict(lambda: 1)
self.play_uuids = set()
self.duplicate_play_counts = collections.defaultdict(lambda: 1)
@contextlib.contextmanager
def capture_event_data(self, event, **event_data):
event_data.setdefault('uuid', str(uuid.uuid4()))
if event not in self.EVENTS_WITHOUT_TASK:
task = event_data.pop('task', None)
else:
task = None
if event_data.get('res'):
if event_data['res'].get('_ansible_no_log', False):
event_data['res'] = {'censored': CENSORED}
if event_data['res'].get('results', []):
event_data['res']['results'] = copy(event_data['res']['results'])
for i, item in enumerate(event_data['res'].get('results', [])):
if isinstance(item, dict) and item.get('_ansible_no_log', False):
event_data['res']['results'][i] = {'censored': CENSORED}
with event_context.display_lock:
try:
event_context.add_local(event=event, **event_data)
if task:
self.set_task(task, local=True)
event_context.dump_begin(sys.stdout)
yield
finally:
event_context.dump_end(sys.stdout)
if task:
self.clear_task(local=True)
event_context.remove_local(event=None, **event_data)
def set_playbook(self, playbook):
# NOTE: Ansible doesn't generate a UUID for playbook_on_start so do it for them.
self.playbook_uuid = str(uuid.uuid4())
file_name = getattr(playbook, '_file_name', '???')
event_context.add_global(playbook=file_name, playbook_uuid=self.playbook_uuid)
self.clear_play()
def set_play(self, play):
if hasattr(play, 'hosts'):
if isinstance(play.hosts, list):
pattern = ','.join(play.hosts)
else:
pattern = play.hosts
else:
pattern = ''
name = play.get_name().strip() or pattern
event_context.add_global(play=name, play_uuid=str(play._uuid), play_pattern=pattern)
self.clear_task()
def clear_play(self):
event_context.remove_global(play=None, play_uuid=None, play_pattern=None)
self.clear_task()
def set_task(self, task, local=False):
self.clear_task(local)
# FIXME: Task is "global" unless using free strategy!
task_ctx = dict(
task=(task.name or task.action),
task_uuid=str(task._uuid),
task_action=task.action,
task_args='',
)
try:
task_ctx['task_path'] = task.get_path()
except AttributeError:
pass
if C.DISPLAY_ARGS_TO_STDOUT:
if task.no_log:
task_ctx['task_args'] = "the output has been hidden due to the fact that 'no_log: true' was specified for this result"
else:
task_args = ', '.join(('%s=%s' % a for a in task.args.items()))
task_ctx['task_args'] = task_args
if getattr(task, '_role', None):
task_role = task._role._role_name
else:
task_role = getattr(task, 'role_name', '')
if task_role:
task_ctx['role'] = task_role
if local:
event_context.add_local(**task_ctx)
else:
event_context.add_global(**task_ctx)
def clear_task(self, local=False):
task_ctx = dict(task=None, task_path=None, task_uuid=None, task_action=None, task_args=None, role=None)
if local:
event_context.remove_local(**task_ctx)
else:
event_context.remove_global(**task_ctx)
def v2_playbook_on_start(self, playbook):
self.set_playbook(playbook)
event_data = dict(
uuid=self.playbook_uuid,
)
with self.capture_event_data('playbook_on_start', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_start(playbook)
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None,
salt=None, default=None, unsafe=None):
event_data = dict(
varname=varname,
private=private,
prompt=prompt,
encrypt=encrypt,
confirm=confirm,
salt_size=salt_size,
salt=salt,
default=default,
unsafe=unsafe,
)
with self.capture_event_data('playbook_on_vars_prompt', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_vars_prompt(
varname, private, prompt, encrypt, confirm, salt_size, salt,
default,
)
def v2_playbook_on_include(self, included_file):
event_data = dict(
included_file=included_file._filename if included_file is not None else None,
)
with self.capture_event_data('playbook_on_include', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_include(included_file)
def v2_playbook_on_play_start(self, play):
play_uuid = str(play._uuid)
if play_uuid in self.play_uuids:
# When this play UUID repeats, it means the play is using the
# free strategy (or serial:1) so different hosts may be running
# different tasks within a play (where duplicate UUIDS are common).
#
# When this is the case, modify the UUID slightly to append
# a counter so we can still _track_ duplicate events, but also
# avoid breaking the display in these scenarios.
self.duplicate_play_counts[play_uuid] += 1
play_uuid = '_'.join([
play_uuid,
str(self.duplicate_play_counts[play_uuid])
])
self.play_uuids.add(play_uuid)
play._uuid = play_uuid
self.set_play(play)
if hasattr(play, 'hosts'):
if isinstance(play.hosts, list):
pattern = ','.join(play.hosts)
else:
pattern = play.hosts
else:
pattern = ''
name = play.get_name().strip() or pattern
event_data = dict(
name=name,
pattern=pattern,
uuid=str(play._uuid),
)
with self.capture_event_data('playbook_on_play_start', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_play_start(play)
def v2_playbook_on_import_for_host(self, result, imported_file):
# NOTE: Not used by Ansible 2.x.
with self.capture_event_data('playbook_on_import_for_host'):
super(BaseCallbackModule, self).v2_playbook_on_import_for_host(result, imported_file)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
# NOTE: Not used by Ansible 2.x.
with self.capture_event_data('playbook_on_not_import_for_host'):
super(BaseCallbackModule, self).v2_playbook_on_not_import_for_host(result, missing_file)
def v2_playbook_on_setup(self):
# NOTE: Not used by Ansible 2.x.
with self.capture_event_data('playbook_on_setup'):
super(BaseCallbackModule, self).v2_playbook_on_setup()
def v2_playbook_on_task_start(self, task, is_conditional):
# FIXME: Flag task path output as vv.
task_uuid = str(task._uuid)
if task_uuid in self.task_uuids:
# When this task UUID repeats, it means the play is using the
# free strategy (or serial:1) so different hosts may be running
# different tasks within a play (where duplicate UUIDS are common).
#
# When this is the case, modify the UUID slightly to append
# a counter so we can still _track_ duplicate events, but also
# avoid breaking the display in these scenarios.
self.duplicate_task_counts[task_uuid] += 1
task_uuid = '_'.join([
task_uuid,
str(self.duplicate_task_counts[task_uuid])
])
self.task_uuids.add(task_uuid)
self.set_task(task)
event_data = dict(
task=task,
name=task.get_name(),
is_conditional=is_conditional,
uuid=task_uuid,
)
with self.capture_event_data('playbook_on_task_start', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
def v2_playbook_on_cleanup_task_start(self, task):
# NOTE: Not used by Ansible 2.x.
self.set_task(task)
event_data = dict(
task=task,
name=task.get_name(),
uuid=str(task._uuid),
is_conditional=True,
)
with self.capture_event_data('playbook_on_task_start', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_cleanup_task_start(task)
def v2_playbook_on_handler_task_start(self, task):
# NOTE: Re-using playbook_on_task_start event for this v2-specific
# event, but setting is_conditional=True, which is how v1 identified a
# task run as a handler.
self.set_task(task)
event_data = dict(
task=task,
name=task.get_name(),
uuid=str(task._uuid),
is_conditional=True,
)
with self.capture_event_data('playbook_on_task_start', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_handler_task_start(task)
def v2_playbook_on_no_hosts_matched(self):
with self.capture_event_data('playbook_on_no_hosts_matched'):
super(BaseCallbackModule, self).v2_playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
with self.capture_event_data('playbook_on_no_hosts_remaining'):
super(BaseCallbackModule, self).v2_playbook_on_no_hosts_remaining()
def v2_playbook_on_notify(self, handler, host):
# NOTE: Not used by Ansible < 2.5.
event_data = dict(
host=host.get_name(),
handler=handler.get_name(),
)
with self.capture_event_data('playbook_on_notify', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_notify(handler, host)
'''
ansible_stats is, retoractively, added in 2.2
'''
def v2_playbook_on_stats(self, stats):
self.clear_play()
# FIXME: Add count of plays/tasks.
event_data = dict(
changed=stats.changed,
dark=stats.dark,
failures=stats.failures,
ignored=getattr(stats, 'ignored', 0),
ok=stats.ok,
processed=stats.processed,
rescued=getattr(stats, 'rescued', 0),
skipped=stats.skipped,
artifact_data=stats.custom.get('_run', {}) if hasattr(stats, 'custom') else {}
)
with self.capture_event_data('playbook_on_stats', **event_data):
super(BaseCallbackModule, self).v2_playbook_on_stats(stats)
@staticmethod
def _get_event_loop(task):
if hasattr(task, 'loop_with'): # Ansible >=2.5
return task.loop_with
elif hasattr(task, 'loop'): # Ansible <2.4
return task.loop
return None
def _get_result_timing_data(self, result):
host_start = self._host_start.get(result._host.get_name())
if host_start:
end_time = current_time()
return host_start, end_time, (end_time - host_start).total_seconds()
return None, None, None
def v2_runner_on_ok(self, result):
# FIXME: Display detailed results or not based on verbosity.
# strip environment vars from the job event; it already exists on the
# job and sensitive values are filtered there
if result._task.action in ('setup', 'gather_facts'):
result._result.get('ansible_facts', {}).pop('ansible_env', None)
host_start, end_time, duration = self._get_result_timing_data(result)
event_data = dict(
host=result._host.get_name(),
remote_addr=result._host.address,
task=result._task,
res=result._result,
start=host_start,
end=end_time,
duration=duration,
event_loop=self._get_event_loop(result._task),
)
with self.capture_event_data('runner_on_ok', **event_data):
super(BaseCallbackModule, self).v2_runner_on_ok(result)
def v2_runner_on_failed(self, result, ignore_errors=False):
# FIXME: Add verbosity for exception/results output.
host_start, end_time, duration = self._get_result_timing_data(result)
event_data = dict(
host=result._host.get_name(),
remote_addr=result._host.address,
res=result._result,
task=result._task,
start=host_start,
end=end_time,
duration=duration,
ignore_errors=ignore_errors,
event_loop=self._get_event_loop(result._task),
)
with self.capture_event_data('runner_on_failed', **event_data):
super(BaseCallbackModule, self).v2_runner_on_failed(result, ignore_errors)
def v2_runner_on_skipped(self, result):
host_start, end_time, duration = self._get_result_timing_data(result)
event_data = dict(
host=result._host.get_name(),
remote_addr=result._host.address,
task=result._task,
start=host_start,
end=end_time,
duration=duration,
event_loop=self._get_event_loop(result._task),
)
with self.capture_event_data('runner_on_skipped', **event_data):
super(BaseCallbackModule, self).v2_runner_on_skipped(result)
def v2_runner_on_unreachable(self, result):
host_start, end_time, duration = self._get_result_timing_data(result)
event_data = dict(
host=result._host.get_name(),
remote_addr=result._host.address,
task=result._task,
start=host_start,
end=end_time,
duration=duration,
res=result._result,
)
with self.capture_event_data('runner_on_unreachable', **event_data):
super(BaseCallbackModule, self).v2_runner_on_unreachable(result)
def v2_runner_on_no_hosts(self, task):
# NOTE: Not used by Ansible 2.x.
event_data = dict(
task=task,
)
with self.capture_event_data('runner_on_no_hosts', **event_data):
super(BaseCallbackModule, self).v2_runner_on_no_hosts(task)
def v2_runner_on_async_poll(self, result):
# NOTE: Not used by Ansible 2.x.
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
jid=result._result.get('ansible_job_id'),
)
with self.capture_event_data('runner_on_async_poll', **event_data):
super(BaseCallbackModule, self).v2_runner_on_async_poll(result)
def v2_runner_on_async_ok(self, result):
# NOTE: Not used by Ansible 2.x.
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
jid=result._result.get('ansible_job_id'),
)
with self.capture_event_data('runner_on_async_ok', **event_data):
super(BaseCallbackModule, self).v2_runner_on_async_ok(result)
def v2_runner_on_async_failed(self, result):
# NOTE: Not used by Ansible 2.x.
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
jid=result._result.get('ansible_job_id'),
)
with self.capture_event_data('runner_on_async_failed', **event_data):
super(BaseCallbackModule, self).v2_runner_on_async_failed(result)
def v2_runner_on_file_diff(self, result, diff):
# NOTE: Not used by Ansible 2.x.
event_data = dict(
host=result._host.get_name(),
task=result._task,
diff=diff,
)
with self.capture_event_data('runner_on_file_diff', **event_data):
super(BaseCallbackModule, self).v2_runner_on_file_diff(result, diff)
def v2_on_file_diff(self, result):
# NOTE: Logged as runner_on_file_diff.
event_data = dict(
host=result._host.get_name(),
task=result._task,
diff=result._result.get('diff'),
)
with self.capture_event_data('runner_on_file_diff', **event_data):
super(BaseCallbackModule, self).v2_on_file_diff(result)
def v2_runner_item_on_ok(self, result):
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
)
with self.capture_event_data('runner_item_on_ok', **event_data):
super(BaseCallbackModule, self).v2_runner_item_on_ok(result)
def v2_runner_item_on_failed(self, result):
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
)
with self.capture_event_data('runner_item_on_failed', **event_data):
super(BaseCallbackModule, self).v2_runner_item_on_failed(result)
def v2_runner_item_on_skipped(self, result):
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
)
with self.capture_event_data('runner_item_on_skipped', **event_data):
super(BaseCallbackModule, self).v2_runner_item_on_skipped(result)
def v2_runner_retry(self, result):
event_data = dict(
host=result._host.get_name(),
task=result._task,
res=result._result,
)
with self.capture_event_data('runner_retry', **event_data):
super(BaseCallbackModule, self).v2_runner_retry(result)
def v2_runner_on_start(self, host, task):
event_data = dict(
host=host.get_name(),
task=task
)
self._host_start[host.get_name()] = current_time()
with self.capture_event_data('runner_on_start', **event_data):
super(BaseCallbackModule, self).v2_runner_on_start(host, task)
class AWXDefaultCallbackModule(BaseCallbackModule, DefaultCallbackModule):
CALLBACK_NAME = 'awx_display'
class AWXMinimalCallbackModule(BaseCallbackModule, MinimalCallbackModule):
CALLBACK_NAME = 'minimal'
def v2_playbook_on_play_start(self, play):
pass
def v2_playbook_on_task_start(self, task, is_conditional):
self.set_task(task)

View File

@ -0,0 +1,11 @@
class AnsibleRunnerException(Exception):
""" Generic Runner Error """
class ConfigurationError(AnsibleRunnerException):
""" Misconfiguration of Runner """
class CallbackError(AnsibleRunnerException):
""" Exception occurred in Callback """

258
ansible_runner/interface.py Normal file
View File

@ -0,0 +1,258 @@
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import sys
import threading
import logging
from ansible_runner import output
from ansible_runner.runner_config import RunnerConfig
from ansible_runner.runner import Runner
from ansible_runner.streaming import Transmitter, Worker, Processor
from ansible_runner.utils import (
dump_artifacts,
check_isolation_executable_installed,
)
logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
def init_runner(**kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run() and run_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run`
'''
# If running via the transmit-worker-process method, we must only extract things as read-only
# inside of one of these commands. That could be either transmit or worker.
if not kwargs.get('cli_execenv_cmd') and (kwargs.get('streamer') not in ('worker', 'process')):
dump_artifacts(kwargs)
if kwargs.get('streamer'):
# undo any full paths that were dumped by dump_artifacts above in the streamer case
private_data_dir = kwargs['private_data_dir']
project_dir = os.path.join(private_data_dir, 'project')
playbook_path = kwargs.get('playbook') or ''
if os.path.isabs(playbook_path) and playbook_path.startswith(project_dir):
kwargs['playbook'] = os.path.relpath(playbook_path, project_dir)
inventory_path = kwargs.get('inventory') or ''
if os.path.isabs(inventory_path) and inventory_path.startswith(private_data_dir):
kwargs['inventory'] = os.path.relpath(inventory_path, private_data_dir)
roles_path = kwargs.get('envvars', {}).get('ANSIBLE_ROLES_PATH') or ''
if os.path.isabs(roles_path) and roles_path.startswith(private_data_dir):
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = os.path.relpath(roles_path, private_data_dir)
debug = kwargs.pop('debug', None)
logfile = kwargs.pop('logfile', None)
if not kwargs.pop("ignore_logging", True):
output.configure()
if debug in (True, False):
output.set_debug('enable' if debug is True else 'disable')
if logfile:
output.set_logfile(logfile)
if kwargs.get("process_isolation", False):
pi_executable = kwargs.get("process_isolation_executable", "podman")
if not check_isolation_executable_installed(pi_executable):
print(f'Unable to find process isolation executable: {pi_executable}')
sys.exit(1)
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
streamer = kwargs.pop('streamer', None)
if streamer:
if streamer == 'transmit':
stream_transmitter = Transmitter(**kwargs)
return stream_transmitter
if streamer == 'worker':
stream_worker = Worker(**kwargs)
return stream_worker
if streamer == 'process':
stream_processor = Processor(event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback,
**kwargs)
return stream_processor
kwargs.pop('_input', None)
kwargs.pop('_output', None)
rc = RunnerConfig(**kwargs)
rc.prepare()
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param playbook: The playbook (either supplied here as a list or string... or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
:param module_args: The module arguments that will be supplied to ad-hoc mode.
:param host_pattern: The host pattern to match when running in ad-hoc mode.
:param inventory: Overrides the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of
- Path to the inventory file in the ``private_data_dir``
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
- A list of inventory sources, or an empty list to disable passing inventory
:param roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param cmdline: Command line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param forks: Control Ansible parallel concurrency
:param verbosity: Control how verbose the output of ansible-playbook is
:param quiet: Disable all output
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param streamer: Optionally invoke ansible-runner as one of the steps in the streaming pipeline
:param _input: An optional file or file-like object for use as input in a streaming pipeline
:param _output: An optional file or file-like object for use as output in a streaming pipeline
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param process_isolation: Enable process isolation, using either a container engine (e.g. podman) or a sandbox (e.g. bwrap).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
:param process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
:param process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
:param process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir. (default: None)
:param container_options: List of container options to pass to execution engine.
:param resource_profiling: Enable collection of resource utilization data during playbook execution.
:param resource_profiling_base_cgroup: Name of existing cgroup which will be sub-grouped in order to measure resource utilization (default: ansible-runner)
:param resource_profiling_cpu_poll_interval: Interval (in seconds) between CPU polling for determining CPU usage (default: 0.25)
:param resource_profiling_memory_poll_interval: Interval (in seconds) between memory polling for determining memory usage (default: 0.25)
:param resource_profiling_pid_poll_interval: Interval (in seconds) between polling PID count for determining number of processes used (default: 0.25)
:param resource_profiling_results_dir: Directory where profiling data files should be saved (defaults to profiling_data folder inside private data dir)
:param directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
copied to this location which will then be used as the working directory during playbook execution.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param omit_event_data: Omits extra ansible event data from event payload (stdout and event still included)
:param only_failed_event_data: Omits extra ansible event data unless it's a failed event (stdout and event still included)
:param cli_execenv_cmd: Tells Ansible Runner to emulate the CLI of Ansible by prepping an Execution Environment and then passing the user provided cmdline
:type private_data_dir: str
:type ident: str
:type json_mode: bool
:type playbook: str or filename or list
:type inventory: str or dict or list
:type envvars: dict
:type extravars: dict
:type passwords: dict
:type settings: dict
:type ssh_key: str
:type artifact_dir: str
:type project_dir: str
:type rotate_artifacts: int
:type cmdline: str
:type limit: str
:type forks: int
:type quiet: bool
:type verbosity: int
:type streamer: str
:type _input: file
:type _output: file
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type process_isolation: bool
:type process_isolation_executable: str
:type process_isolation_path: str
:type process_isolation_hide_paths: str or list
:type process_isolation_show_paths: str or list
:type process_isolation_ro_paths: str or list
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type resource_profiling: bool
:type resource_profiling_base_cgroup: str
:type resource_profiling_cpu_poll_interval: float
:type resource_profiling_memory_poll_interval: float
:type resource_profiling_pid_poll_interval: float
:type resource_profiling_results_dir: str
:type directory_isolation_base_path: str
:type fact_cache: str
:type fact_cache_type: str
:type omit_event_data: bool
:type only_failed_event_data: bool
:type cli_execenv_cmd: str
:returns: A :py:class:`ansible_runner.runner.Runner` object, or a simple object containing `rc` if run remotely
'''
r = init_runner(**kwargs)
r.run()
return r
def run_async(**kwargs):
'''
Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r

185
ansible_runner/loader.py Normal file
View File

@ -0,0 +1,185 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import json
import codecs
from yaml import safe_load, YAMLError
from six import string_types
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.output import debug
class ArtifactLoader(object):
'''
Handles loading and caching file contents from disk
This class will load the file contents and attempt to deserialize the
contents as either JSON or YAML. If the file contents cannot be
deserialized, the contents will be returned to the caller as a string.
The deserialized file contents are stored as a cached object in the
instance to avoid any additional reads from disk for subsequent calls
to load the same file.
'''
def __init__(self, base_path):
self._cache = {}
self.base_path = base_path
def _load_json(self, contents):
'''
Attempts to deserialize the contents of a JSON object
Args:
contents (string): The contents to deserialize
Returns:
dict: If the contents are JSON serialized
None: If the contents are not JSON serialized
'''
try:
return json.loads(contents)
except ValueError:
pass
def _load_yaml(self, contents):
'''
Attempts to deserialize the contents of a YAML object
Args:
contents (string): The contents to deserialize
Returns:
dict: If the contents are YAML serialized
None: If the contents are not YAML serialized
'''
try:
return safe_load(contents)
except YAMLError:
pass
def get_contents(self, path):
'''
Loads the contents of the file specified by path
Args:
path (string): The relative or absolute path to the file to
be loaded. If the path is relative, then it is combined
with the base_path to generate a full path string
Returns:
string: The contents of the file as a string
Raises:
ConfigurationError: If the file cannot be loaded
'''
try:
if not os.path.exists(path):
raise ConfigurationError('specified path does not exist %s' % path)
with codecs.open(path, encoding='utf-8') as f:
data = f.read()
return data
except (IOError, OSError) as exc:
raise ConfigurationError('error trying to load file contents: %s' % exc)
def abspath(self, path):
'''
Transform the path to an absolute path
Args:
path (string): The path to transform to an absolute path
Returns:
string: The absolute path to the file
'''
if not path.startswith(os.path.sep) or path.startswith('~'):
path = os.path.expanduser(os.path.join(self.base_path, path))
return path
def isfile(self, path):
'''
Check if the path is a file
:params path: The path to the file to check. If the path is relative
it will be exanded to an absolute path
:returns: boolean
'''
return os.path.isfile(self.abspath(path))
def load_file(self, path, objtype=None, encoding='utf-8'):
'''
Load the file specified by path
This method will first try to load the file contents from cache and
if there is a cache miss, it will load the contents from disk
Args:
path (string): The full or relative path to the file to be loaded
encoding (string): The file contents text encoding
objtype (object): The object type of the file contents. This
is used to type check the deserialized content against the
contents loaded from disk.
Ignore serializing if objtype is string_types
Returns:
object: The deserialized file contents which could be either a
string object or a dict object
Raises:
ConfigurationError:
'''
path = self.abspath(path)
debug('file path is %s' % path)
if path in self._cache:
return self._cache[path]
try:
debug('cache miss, attempting to load file from disk: %s' % path)
contents = parsed_data = self.get_contents(path)
if encoding:
parsed_data = contents.encode(encoding)
except ConfigurationError as exc:
debug(exc)
raise
except UnicodeEncodeError:
raise ConfigurationError('unable to encode file contents')
if objtype is not string_types:
for deserializer in (self._load_json, self._load_yaml):
parsed_data = deserializer(contents)
if parsed_data:
break
if objtype and not isinstance(parsed_data, objtype):
debug('specified file %s is not of type %s' % (path, objtype))
raise ConfigurationError('invalid file serialization type for contents')
self._cache[path] = parsed_data
return parsed_data

91
ansible_runner/output.py Normal file
View File

@ -0,0 +1,91 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import logging
DEBUG_ENABLED = False
TRACEBACK_ENABLED = True
_display_logger = logging.getLogger('ansible-runner.display')
_debug_logger = logging.getLogger('ansible-runner.debug')
def display(msg, log_only=False):
if not log_only:
_display_logger.log(70, msg)
_debug_logger.log(10, msg)
def debug(msg):
if DEBUG_ENABLED:
if isinstance(msg, Exception):
if TRACEBACK_ENABLED:
_debug_logger.exception(msg)
display(msg)
def set_logfile(filename):
handlers = [h.get_name() for h in _debug_logger.handlers]
if 'logfile' not in handlers:
logfile_handler = logging.FileHandler(filename)
logfile_handler.set_name('logfile')
formatter = logging.Formatter('%(asctime)s: %(message)s')
logfile_handler.setFormatter(formatter)
_debug_logger.addHandler(logfile_handler)
def set_debug(value):
global DEBUG_ENABLED
if value.lower() not in ('enable', 'disable'):
raise ValueError('value must be one of `enable` or `disable`, got %s' % value)
DEBUG_ENABLED = value.lower() == 'enable'
def set_traceback(value):
global TRACEBACK_ENABLED
if value.lower() not in ('enable', 'disable'):
raise ValueError('value must be one of `enable` or `disable`, got %s' % value)
TRACEBACK_ENABLED = value.lower() == 'enable'
def configure():
'''
Configures the logging facility
This function will setup an initial logging facility for handling display
and debug outputs. The default facility will send display messages to
stdout and the default debug facility will do nothing.
:returns: None
'''
root_logger = logging.getLogger()
root_logger.addHandler(logging.NullHandler())
root_logger.setLevel(99)
_display_logger.setLevel(70)
_debug_logger.setLevel(10)
display_handlers = [h.get_name() for h in _display_logger.handlers]
if 'stdout' not in display_handlers:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.set_name('stdout')
formatter = logging.Formatter('%(message)s')
stdout_handler.setFormatter(formatter)
_display_logger.addHandler(stdout_handler)

View File

472
ansible_runner/runner.py Normal file
View File

@ -0,0 +1,472 @@
import os
import stat
import time
import json
import errno
import signal
from subprocess import Popen, PIPE
import shutil
import codecs
import collections
import datetime
import logging
import six
import pexpect
import ansible_runner.plugins
from .utils import OutputEventFilter, cleanup_artifact_dir, ensure_str, collect_new_events
from .exceptions import CallbackError, AnsibleRunnerException
from ansible_runner.output import debug
logger = logging.getLogger('ansible-runner')
class Runner(object):
def __init__(self, config, cancel_callback=None, remove_partials=True, event_handler=None,
artifacts_handler=None, finished_callback=None, status_handler=None):
self.config = config
self.cancel_callback = cancel_callback
self.event_handler = event_handler
self.artifacts_handler = artifacts_handler
self.finished_callback = finished_callback
self.status_handler = status_handler
self.canceled = False
self.timed_out = False
self.errored = False
self.status = "unstarted"
self.rc = None
self.remove_partials = remove_partials
def event_callback(self, event_data):
'''
Invoked for every Ansible event to collect stdout with the event data and store it for
later use
'''
self.last_stdout_update = time.time()
if 'uuid' in event_data:
filename = '{}-partial.json'.format(event_data['uuid'])
partial_filename = os.path.join(self.config.artifact_dir,
'job_events',
filename)
full_filename = os.path.join(self.config.artifact_dir,
'job_events',
'{}-{}.json'.format(event_data['counter'],
event_data['uuid']))
try:
event_data.update(dict(runner_ident=str(self.config.ident)))
try:
with codecs.open(partial_filename, 'r', encoding='utf-8') as read_file:
partial_event_data = json.load(read_file)
event_data.update(partial_event_data)
if self.remove_partials:
os.remove(partial_filename)
except IOError:
debug("Failed to open ansible stdout callback plugin partial data file {}".format(partial_filename))
# prefer 'created' from partial data, but verbose events set time here
if 'created' not in event_data:
event_data['created'] = datetime.datetime.utcnow().isoformat()
if self.event_handler is not None:
should_write = self.event_handler(event_data)
else:
should_write = True
for plugin in ansible_runner.plugins:
ansible_runner.plugins[plugin].event_handler(self.config, event_data)
if should_write:
with codecs.open(full_filename, 'w', encoding='utf-8') as write_file:
os.chmod(full_filename, stat.S_IRUSR | stat.S_IWUSR)
json.dump(event_data, write_file)
except IOError as e:
debug("Failed writing event data: {}".format(e))
def status_callback(self, status):
self.status = status
status_data = {'status': status, 'runner_ident': str(self.config.ident)}
if status == 'starting':
status_data.update({'command': self.config.command, 'env': self.config.env, 'cwd': self.config.cwd})
for plugin in ansible_runner.plugins:
ansible_runner.plugins[plugin].status_handler(self.config, status_data)
if self.status_handler is not None:
self.status_handler(status_data, runner_config=self.config)
def run(self):
'''
Launch the Ansible task configured in self.config (A RunnerConfig object), returns once the
invocation is complete
'''
self.status_callback('starting')
stdout_filename = os.path.join(self.config.artifact_dir, 'stdout')
command_filename = os.path.join(self.config.artifact_dir, 'command')
try:
os.makedirs(self.config.artifact_dir, mode=0o700)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(self.config.artifact_dir):
pass
else:
raise
os.close(os.open(stdout_filename, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
job_events_path = os.path.join(self.config.artifact_dir, 'job_events')
if not os.path.exists(job_events_path):
os.mkdir(job_events_path, 0o700)
command = self.config.command
with codecs.open(command_filename, 'w', encoding='utf-8') as f:
os.chmod(command_filename, stat.S_IRUSR | stat.S_IWUSR)
json.dump(
{'command': command,
'cwd': self.config.cwd,
'env': self.config.env}, f, ensure_ascii=False
)
if self.config.ident is not None:
cleanup_artifact_dir(os.path.join(self.config.artifact_dir, ".."), self.config.rotate_artifacts)
stdout_handle = codecs.open(stdout_filename, 'w', encoding='utf-8')
stdout_handle = OutputEventFilter(stdout_handle, self.event_callback, self.config.suppress_ansible_output, output_json=self.config.json_mode)
if not isinstance(self.config.expect_passwords, collections.OrderedDict):
# We iterate over `expect_passwords.keys()` and
# `expect_passwords.values()` separately to map matched inputs to
# patterns and choose the proper string to send to the subprocess;
# enforce usage of an OrderedDict so that the ordering of elements in
# `keys()` matches `values()`.
expect_passwords = collections.OrderedDict(self.config.expect_passwords)
password_patterns = list(expect_passwords.keys())
password_values = list(expect_passwords.values())
# pexpect needs all env vars to be utf-8 encoded bytes
# https://github.com/pexpect/pexpect/issues/512
# Use a copy so as not to cause problems when serializing the job_env.
if self.config.containerized:
# We call the actual docker or podman executable right where we are
cwd = os.getcwd()
# If this is containerized, the shell environment calling podman has little
# to do with the actual job environment, but still needs PATH, auth, etc.
pexpect_env = os.environ.copy()
# But we still rely on env vars to pass secrets
pexpect_env.update(self.config.env)
# Write the keys to pass into container to expected file in artifacts dir
# option expecting should have already been written in ansible_runner.runner_config
env_file_host = os.path.join(self.config.artifact_dir, 'env.list')
with open(env_file_host, 'w') as f:
f.write('\n'.join(list(self.config.env.keys())))
else:
cwd = self.config.cwd
pexpect_env = self.config.env
env = {
ensure_str(k): ensure_str(v) if k != 'PATH' and isinstance(v, six.text_type) else v
for k, v in pexpect_env.items()
}
# Prepare to collect performance data
if self.config.resource_profiling:
cgroup_path = '{0}/{1}'.format(self.config.resource_profiling_base_cgroup, self.config.ident)
import getpass
import grp
user = getpass.getuser()
group = grp.getgrgid(os.getgid()).gr_name
cmd = 'cgcreate -a {user}:{group} -t {user}:{group} -g cpuacct,memory,pids:{}'.format(cgroup_path, user=user, group=group)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
_, stderr = proc.communicate()
if proc.returncode:
# Unable to create cgroup
logger.error('Unable to create cgroup: {}'.format(stderr))
raise RuntimeError('Unable to create cgroup: {}'.format(stderr))
else:
logger.info("Created cgroup '{}'".format(cgroup_path))
self.status_callback('running')
self.last_stdout_update = time.time()
try:
child = pexpect.spawn(
command[0],
command[1:],
cwd=cwd,
env=env,
ignore_sighup=True,
encoding='utf-8',
codec_errors='replace',
echo=False,
use_poll=self.config.pexpect_use_poll,
)
child.logfile_read = stdout_handle
except pexpect.exceptions.ExceptionPexpect as e:
child = collections.namedtuple(
'MissingProcess', 'exitstatus isalive close'
)(
exitstatus=127,
isalive=lambda: False,
close=lambda: None,
)
def _decode(x):
return x.decode('utf-8') if six.PY2 else x
# create the events directory (the callback plugin won't run, so it
# won't get created)
events_directory = os.path.join(self.config.artifact_dir, 'job_events')
if not os.path.exists(events_directory):
os.mkdir(events_directory, 0o700)
stdout_handle.write(_decode(str(e)))
stdout_handle.write(_decode('\n'))
job_start = time.time()
while child.isalive():
result_id = child.expect(password_patterns,
timeout=self.config.pexpect_timeout,
searchwindowsize=100)
password = password_values[result_id]
if password is not None:
child.sendline(password)
self.last_stdout_update = time.time()
if self.cancel_callback:
try:
self.canceled = self.cancel_callback()
except Exception as e:
# TODO: logger.exception('Could not check cancel callback - cancelling immediately')
#if isinstance(extra_update_fields, dict):
# extra_update_fields['job_explanation'] = "System error during job execution, check system logs"
raise CallbackError("Exception in Cancel Callback: {}".format(e))
if self.config.job_timeout and not self.canceled and (time.time() - job_start) > self.config.job_timeout:
self.timed_out = True
# if isinstance(extra_update_fields, dict):
# extra_update_fields['job_explanation'] = "Job terminated due to timeout"
if self.canceled or self.timed_out or self.errored:
self.kill_container()
Runner.handle_termination(child.pid, is_cancel=self.canceled)
if self.config.idle_timeout and (time.time() - self.last_stdout_update) > self.config.idle_timeout:
self.kill_container()
Runner.handle_termination(child.pid, is_cancel=False)
self.timed_out = True
stdout_handle.flush()
stdout_handle.close()
child.close()
if self.canceled:
self.status_callback('canceled')
elif child.exitstatus == 0 and not self.timed_out:
self.status_callback('successful')
elif self.timed_out:
self.status_callback('timeout')
else:
self.status_callback('failed')
self.rc = child.exitstatus if not (self.timed_out or self.canceled) else 254
for filename, data in [
('status', self.status),
('rc', self.rc),
]:
artifact_path = os.path.join(self.config.artifact_dir, filename)
if not os.path.exists(artifact_path):
os.close(os.open(artifact_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
with open(artifact_path, 'w') as f:
f.write(str(data))
if self.config.directory_isolation_path and self.config.directory_isolation_cleanup:
shutil.rmtree(self.config.directory_isolation_path)
if self.config.process_isolation and self.config.process_isolation_path_actual:
def _delete(retries=15):
try:
shutil.rmtree(self.config.process_isolation_path_actual)
except OSError as e:
res = False
if e.errno == 16 and retries > 0:
time.sleep(1)
res = _delete(retries=retries - 1)
if not res:
raise
return True
_delete()
if self.config.resource_profiling:
cmd = 'cgdelete -g cpuacct,memory,pids:{}'.format(cgroup_path)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
_, stderr = proc.communicate()
if proc.returncode:
logger.error('Failed to delete cgroup: {}'.format(stderr))
raise RuntimeError('Failed to delete cgroup: {}'.format(stderr))
if self.artifacts_handler is not None:
try:
self.artifacts_handler(self.config.artifact_dir)
except Exception as e:
raise CallbackError("Exception in Artifact Callback: {}".format(e))
if self.finished_callback is not None:
try:
self.finished_callback(self)
except Exception as e:
raise CallbackError("Exception in Finished Callback: {}".format(e))
return self.status, self.rc
@property
def stdout(self):
'''
Returns an open file handle to the stdout representing the Ansible run
'''
stdout_path = os.path.join(self.config.artifact_dir, 'stdout')
if not os.path.exists(stdout_path):
raise AnsibleRunnerException("stdout missing")
return open(os.path.join(self.config.artifact_dir, 'stdout'), 'r')
@property
def events(self):
'''
A generator that will return all ansible job events in the order that they were emitted from Ansible
Example:
{
"event":"runner_on_ok",
"uuid":"00a50d9c-161a-4b74-b978-9f60becaf209",
"stdout":"ok: [localhost] => {\\r\\n \\" msg\\":\\"Test!\\"\\r\\n}",
"counter":6,
"pid":740,
"created":"2018-04-05T18:24:36.096725",
"end_line":10,
"start_line":7,
"event_data":{
"play_pattern":"all",
"play":"all",
"task":"debug",
"task_args":"msg=Test!",
"remote_addr":"localhost",
"res":{
"msg":"Test!",
"changed":false,
"_ansible_verbose_always":true,
"_ansible_no_log":false
},
"pid":740,
"play_uuid":"0242ac11-0002-443b-cdb1-000000000006",
"task_uuid":"0242ac11-0002-443b-cdb1-000000000008",
"event_loop":null,
"playbook_uuid":"634edeee-3228-4c17-a1b4-f010fdd42eb2",
"playbook":"test.yml",
"task_action":"debug",
"host":"localhost",
"task_path":"/tmp/demo/project/test.yml:3"
}
}
'''
# collection of all the events that were yielded
old_events = {}
event_path = os.path.join(self.config.artifact_dir, 'job_events')
# Wait for events dir to be created
now = datetime.datetime.now()
while not os.path.exists(event_path):
time.sleep(0.05)
wait_time = datetime.datetime.now() - now
if wait_time.total_seconds() > 60:
raise AnsibleRunnerException("events directory is missing: %s" % event_path)
while self.status == "running":
for event, old_evnts in collect_new_events(event_path, old_events):
old_events = old_evnts
yield event
# collect new events that were written after the playbook has finished
for event, old_evnts in collect_new_events(event_path, old_events):
old_events = old_evnts
yield event
@property
def stats(self):
'''
Returns the final high level stats from the Ansible run
Example:
{'dark': {}, 'failures': {}, 'skipped': {}, 'ok': {u'localhost': 2}, 'processed': {u'localhost': 1}}
'''
last_event = list(filter(lambda x: 'event' in x and x['event'] == 'playbook_on_stats',
self.events))
if not last_event:
return None
last_event = last_event[0]['event_data']
return dict(skipped=last_event.get('skipped',{}),
ok=last_event.get('ok',{}),
dark=last_event.get('dark',{}),
failures=last_event.get('failures',{}),
ignored=last_event.get('ignored', {}),
rescued=last_event.get('rescued', {}),
processed=last_event.get('processed',{}),
changed=last_event.get('changed',{}))
def host_events(self, host):
'''
Given a host name, this will return all task events executed on that host
'''
all_host_events = filter(lambda x: 'event_data' in x and 'host' in x['event_data'] and x['event_data']['host'] == host,
self.events)
return all_host_events
def kill_container(self):
'''
Internal method to terminate a container being used for job isolation
'''
container_name = self.config.container_name
if container_name:
container_cli = self.config.process_isolation_executable
cmd = '{} kill {}'.format(container_cli, container_name)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
_, stderr = proc.communicate()
if proc.returncode:
logger.info('Error from {} kill {} command:\n{}'.format(container_cli, container_name, stderr))
else:
logger.info("Killed container {}".format(container_name))
@classmethod
def handle_termination(cls, pid, pidfile=None, is_cancel=True):
'''
Internal method to terminate a subprocess spawned by `pexpect` representing an invocation of runner.
:param pid: the process id of the running the job.
:param pidfile: the daemon's PID file
:param is_cancel: flag showing whether this termination is caused by
instance's cancel_flag.
'''
try:
pgroup = os.getpgid(pid)
os.killpg(pgroup, signal.SIGKILL)
except (OSError, ProcessLookupError):
pass
try:
os.remove(pidfile)
except (TypeError, OSError):
pass
def get_fact_cache(self, host):
'''
Get the entire fact cache only if the fact_cache_type is 'jsonfile'
'''
if self.config.fact_cache_type != 'jsonfile':
raise Exception('Unsupported fact cache type. Only "jsonfile" is supported for reading and writing facts from ansible-runner')
fact_cache = os.path.join(self.config.fact_cache, host)
if os.path.exists(fact_cache):
with open(fact_cache) as f:
return json.loads(f.read())
return {}
def set_fact_cache(self, host, data):
'''
Set the entire fact cache data only if the fact_cache_type is 'jsonfile'
'''
if self.config.fact_cache_type != 'jsonfile':
raise Exception('Unsupported fact cache type. Only "jsonfile" is supported for reading and writing facts from ansible-runner')
fact_cache = os.path.join(self.config.fact_cache, host)
if not os.path.exists(os.path.dirname(fact_cache)):
os.makedirs(os.path.dirname(fact_cache), mode=0o700)
with open(fact_cache, 'w') as f:
return f.write(json.dumps(data))

View File

@ -0,0 +1,881 @@
############################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import logging
import os
import pexpect
import re
import shlex
import stat
import tempfile
import six
from uuid import uuid4
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from distutils.dir_util import copy_tree
from six import iteritems, string_types, text_type
from ansible_runner import defaults
from ansible_runner import output
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.loader import ArtifactLoader
from ansible_runner.output import debug
from ansible_runner.utils import (
open_fifo_write,
args2cmdline,
sanitize_container_name
)
logger = logging.getLogger('ansible-runner')
class ExecutionMode():
NONE = 0
ANSIBLE = 1
ANSIBLE_PLAYBOOK = 2
RAW = 3
CLI_EXECENV = 4
class RunnerConfig(object):
"""
A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
:py:mod:`ansible_runner.runner.Runner` object to launch and manage the invocation of ``ansible``
and ``ansible-playbook``
Typically this object is initialized for you when using the standard ``run`` interfaces in :py:mod:`ansible_runner.interface`
but can be used to construct the ``Runner`` configuration to be invoked elsewhere. It can also be overridden to provide different
functionality to the Runner object.
:Example:
>>> rc = RunnerConfig(...)
>>> r = Runner(config=rc)
>>> r.run()
"""
def __init__(self,
private_data_dir=None, playbook=None, ident=None,
inventory=None, roles_path=None, limit=None, module=None, module_args=None,
verbosity=None, quiet=False, json_mode=False, artifact_dir=None,
rotate_artifacts=0, host_pattern=None, binary=None, extravars=None, suppress_ansible_output=False,
process_isolation=False, process_isolation_executable=None, process_isolation_path=None,
process_isolation_hide_paths=None, process_isolation_show_paths=None, process_isolation_ro_paths=None,
container_image=None, container_volume_mounts=None, container_options=None,
resource_profiling=False, resource_profiling_base_cgroup='ansible-runner', resource_profiling_cpu_poll_interval=0.25,
resource_profiling_memory_poll_interval=0.25, resource_profiling_pid_poll_interval=0.25,
resource_profiling_results_dir=None,
tags=None, skip_tags=None, fact_cache_type='jsonfile', fact_cache=None, ssh_key=None,
project_dir=None, directory_isolation_base_path=None, envvars=None, forks=None, cmdline=None, omit_event_data=False,
only_failed_event_data=False, cli_execenv_cmd=""):
self.private_data_dir = os.path.abspath(private_data_dir)
if ident is None:
self.ident = str(uuid4())
else:
self.ident = ident
self.json_mode = json_mode
self.playbook = playbook
self.inventory = inventory
self.roles_path = roles_path
self.limit = limit
self.module = module
self.module_args = module_args
self.cli_execenv_cmd = cli_execenv_cmd
self.host_pattern = host_pattern
self.binary = binary
self.rotate_artifacts = rotate_artifacts
self.artifact_dir = os.path.abspath(artifact_dir or self.private_data_dir)
if artifact_dir is None:
self.artifact_dir = os.path.join(self.private_data_dir, 'artifacts')
else:
self.artifact_dir = os.path.abspath(artifact_dir)
if self.ident is not None:
self.artifact_dir = os.path.join(self.artifact_dir, "{}".format(self.ident))
self.extra_vars = extravars
self.process_isolation = process_isolation
self.process_isolation_executable = process_isolation_executable or defaults.default_process_isolation_executable
self.process_isolation_path = process_isolation_path
self.container_name = None # like other properties, not accurate until prepare is called
self.process_isolation_path_actual = None
self.process_isolation_hide_paths = process_isolation_hide_paths
self.process_isolation_show_paths = process_isolation_show_paths
self.process_isolation_ro_paths = process_isolation_ro_paths
self.container_image = container_image or defaults.default_container_image
self.container_volume_mounts = container_volume_mounts
self.container_options = container_options
self.resource_profiling = resource_profiling
self.resource_profiling_base_cgroup = resource_profiling_base_cgroup
self.resource_profiling_cpu_poll_interval = resource_profiling_cpu_poll_interval
self.resource_profiling_memory_poll_interval = resource_profiling_memory_poll_interval
self.resource_profiling_pid_poll_interval = resource_profiling_pid_poll_interval
self.resource_profiling_results_dir = resource_profiling_results_dir
self.directory_isolation_path = directory_isolation_base_path
if not project_dir:
self.project_dir = os.path.join(self.private_data_dir, 'project')
else:
self.project_dir = project_dir
self.verbosity = verbosity
self.quiet = quiet
self.suppress_ansible_output = suppress_ansible_output
self.loader = ArtifactLoader(self.private_data_dir)
self.tags = tags
self.skip_tags = skip_tags
self.fact_cache_type = fact_cache_type
self.fact_cache = os.path.join(self.artifact_dir, fact_cache or 'fact_cache') if self.fact_cache_type == 'jsonfile' else None
self.ssh_key_data = ssh_key
self.execution_mode = ExecutionMode.NONE
self.envvars = envvars
self.forks = forks
self.cmdline_args = cmdline
self.omit_event_data = omit_event_data
self.only_failed_event_data = only_failed_event_data
_CONTAINER_ENGINES = ('docker', 'podman')
@property
def sandboxed(self):
return self.process_isolation and self.process_isolation_executable not in self._CONTAINER_ENGINES
@property
def containerized(self):
return self.process_isolation and self.process_isolation_executable in self._CONTAINER_ENGINES
def prepare(self):
"""
Performs basic checks and then properly invokes
- prepare_inventory
- prepare_env
- prepare_command
It's also responsible for wrapping the command with the proper ssh agent invocation
and setting early ANSIBLE_ environment variables.
"""
# ansible_path = find_executable('ansible')
# if ansible_path is None or not os.access(ansible_path, os.X_OK):
# raise ConfigurationError("Ansible not found. Make sure that it is installed.")
if self.private_data_dir is None:
raise ConfigurationError("Runner Base Directory is not defined")
if self.module and self.playbook:
raise ConfigurationError("Only one of playbook and module options are allowed")
if not os.path.exists(self.artifact_dir):
os.makedirs(self.artifact_dir, mode=0o700)
if self.sandboxed and self.directory_isolation_path is not None:
self.directory_isolation_path = tempfile.mkdtemp(prefix='runner_di_', dir=self.directory_isolation_path)
if os.path.exists(self.project_dir):
output.debug("Copying directory tree from {} to {} for working directory isolation".format(self.project_dir,
self.directory_isolation_path))
copy_tree(self.project_dir, self.directory_isolation_path, preserve_symlinks=True)
self.prepare_env()
self.prepare_inventory()
self.prepare_command()
if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK and self.playbook is None:
raise ConfigurationError("Runner playbook required when running ansible-playbook")
elif self.execution_mode == ExecutionMode.ANSIBLE and self.module is None:
raise ConfigurationError("Runner module required when running ansible")
elif self.execution_mode == ExecutionMode.CLI_EXECENV and self.cmdline_args is None:
raise ConfigurationError("Runner requires arguments to pass to ansible, try '-h' for ansible help output")
elif self.execution_mode == ExecutionMode.NONE:
raise ConfigurationError("No executable for runner to run")
# write the SSH key data into a fifo read by ssh-agent
if self.ssh_key_data:
self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
open_fifo_write(self.ssh_key_path, self.ssh_key_data)
self.command = self.wrap_args_with_ssh_agent(self.command, self.ssh_key_path)
# Use local callback directory
if not self.containerized:
callback_dir = self.env.get('AWX_LIB_DIRECTORY', os.getenv('AWX_LIB_DIRECTORY'))
if callback_dir is None:
callback_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], "callbacks")
python_path = self.env.get('PYTHONPATH', os.getenv('PYTHONPATH', ''))
self.env['PYTHONPATH'] = ':'.join([python_path, callback_dir])
if python_path and not python_path.endswith(':'):
python_path += ':'
self.env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(filter(None,(self.env.get('ANSIBLE_CALLBACK_PLUGINS'), callback_dir)))
if 'AD_HOC_COMMAND_ID' in self.env:
self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
else:
self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
if 'ANSIBLE_HOST_KEY_CHECKING' not in self.env:
self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
if not self.containerized:
self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir
if self.resource_profiling:
callback_whitelist = os.environ.get('ANSIBLE_CALLBACK_WHITELIST', '').strip()
self.env['ANSIBLE_CALLBACK_WHITELIST'] = ','.join(filter(None, [callback_whitelist, 'cgroup_perf_recap']))
self.env['CGROUP_CONTROL_GROUP'] = '{}/{}'.format(self.resource_profiling_base_cgroup, self.ident)
if self.resource_profiling_results_dir:
cgroup_output_dir = self.resource_profiling_results_dir
else:
cgroup_output_dir = os.path.normpath(os.path.join(self.private_data_dir, 'profiling_data'))
# Create results directory if it does not exist
if not os.path.isdir(cgroup_output_dir):
os.mkdir(cgroup_output_dir, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
self.env['CGROUP_OUTPUT_DIR'] = cgroup_output_dir
self.env['CGROUP_OUTPUT_FORMAT'] = 'json'
self.env['CGROUP_CPU_POLL_INTERVAL'] = str(self.resource_profiling_cpu_poll_interval)
self.env['CGROUP_MEMORY_POLL_INTERVAL'] = str(self.resource_profiling_memory_poll_interval)
self.env['CGROUP_PID_POLL_INTERVAL'] = str(self.resource_profiling_pid_poll_interval)
self.env['CGROUP_FILE_PER_TASK'] = 'True'
self.env['CGROUP_WRITE_FILES'] = 'True'
self.env['CGROUP_DISPLAY_RECAP'] = 'False'
if self.roles_path:
if isinstance(self.roles_path, list):
self.env['ANSIBLE_ROLES_PATH'] = ':'.join(self.roles_path)
else:
self.env['ANSIBLE_ROLES_PATH'] = self.roles_path
if self.sandboxed:
debug('sandbox enabled')
self.command = self.wrap_args_for_sandbox(self.command)
else:
debug('sandbox disabled')
if self.resource_profiling and self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
self.command = self.wrap_args_with_cgexec(self.command)
if self.fact_cache_type == 'jsonfile':
self.env['ANSIBLE_CACHE_PLUGIN'] = 'jsonfile'
if not self.containerized:
self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = self.fact_cache
self.env["RUNNER_OMIT_EVENTS"] = str(self.omit_event_data)
self.env["RUNNER_ONLY_FAILED_EVENTS"] = str(self.only_failed_event_data)
if self.containerized:
debug('containerization enabled')
self.command = self.wrap_args_for_containerization(self.command)
else:
debug('containerization disabled')
debug('env:')
for k,v in sorted(self.env.items()):
debug(f' {k}: {v}')
if hasattr(self, 'command') and isinstance(self.command, list):
debug(f"command: {' '.join(self.command)}")
def prepare_inventory(self):
"""
Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
"""
if self.containerized:
self.inventory = '/runner/inventory/hosts'
return
if self.inventory is None:
if os.path.exists(os.path.join(self.private_data_dir, "inventory")):
self.inventory = os.path.join(self.private_data_dir, "inventory")
def prepare_env(self):
"""
Manages reading environment metadata files under ``private_data_dir`` and merging/updating
with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
"""
try:
passwords = self.loader.load_file('env/passwords', Mapping)
self.expect_passwords = {
re.compile(pattern, re.M): password
for pattern, password in iteritems(passwords)
}
except ConfigurationError:
output.debug('Not loading passwords')
self.expect_passwords = dict()
self.expect_passwords[pexpect.TIMEOUT] = None
self.expect_passwords[pexpect.EOF] = None
try:
self.settings = self.loader.load_file('env/settings', Mapping)
except ConfigurationError:
output.debug("Not loading settings")
self.settings = dict()
self.process_isolation = self.settings.get('process_isolation', self.process_isolation)
self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable)
if self.containerized:
self.container_name = "ansible_runner_{}".format(sanitize_container_name(self.ident))
self.env = {}
# Special flags to convey info to entrypoint or process in container
self.env['LAUNCHED_BY_RUNNER'] = '1'
artifact_dir = os.path.join("/runner/artifacts", "{}".format(self.ident))
self.env['AWX_ISOLATED_DATA_DIR'] = artifact_dir
if self.fact_cache_type == 'jsonfile':
self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = os.path.join(artifact_dir, 'fact_cache')
else:
# seed env with existing shell env
self.env = os.environ.copy()
if self.envvars and isinstance(self.envvars, dict):
self.env.update(self.envvars)
try:
envvars = self.loader.load_file('env/envvars', Mapping)
if envvars:
self.env.update({str(k):str(v) for k, v in envvars.items()})
except ConfigurationError:
output.debug("Not loading environment vars")
# Still need to pass default environment to pexpect
try:
if self.ssh_key_data is None:
self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)
except ConfigurationError:
output.debug("Not loading ssh key")
self.ssh_key_data = None
self.idle_timeout = self.settings.get('idle_timeout', None)
self.job_timeout = self.settings.get('job_timeout', None)
self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
self.process_isolation_path = self.settings.get('process_isolation_path', self.process_isolation_path)
self.process_isolation_hide_paths = self.settings.get('process_isolation_hide_paths', self.process_isolation_hide_paths)
self.process_isolation_show_paths = self.settings.get('process_isolation_show_paths', self.process_isolation_show_paths)
self.process_isolation_ro_paths = self.settings.get('process_isolation_ro_paths', self.process_isolation_ro_paths)
self.directory_isolation_cleanup = bool(self.settings.get('directory_isolation_cleanup', True))
self.container_image = self.settings.get('container_image', self.container_image)
self.container_volume_mounts = self.settings.get('container_volume_mounts', self.container_volume_mounts)
self.container_options = self.settings.get('container_options', self.container_options)
self.resource_profiling = self.settings.get('resource_profiling', self.resource_profiling)
self.resource_profiling_base_cgroup = self.settings.get('resource_profiling_base_cgroup', self.resource_profiling_base_cgroup)
self.resource_profiling_cpu_poll_interval = self.settings.get('resource_profiling_cpu_poll_interval', self.resource_profiling_cpu_poll_interval)
self.resource_profiling_memory_poll_interval = self.settings.get('resource_profiling_memory_poll_interval',
self.resource_profiling_memory_poll_interval)
self.resource_profiling_pid_poll_interval = self.settings.get('resource_profiling_pid_poll_interval', self.resource_profiling_pid_poll_interval)
self.resource_profiling_results_dir = self.settings.get('resource_profiling_results_dir', self.resource_profiling_results_dir)
self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)
if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(self.project_dir):
self.cwd = self.private_data_dir
else:
if self.directory_isolation_path is not None:
self.cwd = self.directory_isolation_path
else:
self.cwd = self.project_dir
if 'fact_cache' in self.settings:
if 'fact_cache_type' in self.settings:
if self.settings['fact_cache_type'] == 'jsonfile':
self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
else:
self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
def prepare_command(self):
"""
Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given
and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command`
"""
if not self.cli_execenv_cmd:
try:
cmdline_args = self.loader.load_file('args', string_types, encoding=None)
if six.PY2 and isinstance(cmdline_args, text_type):
cmdline_args = cmdline_args.encode('utf-8')
self.command = shlex.split(cmdline_args)
self.execution_mode = ExecutionMode.RAW
except ConfigurationError:
self.command = self.generate_ansible_command()
else:
if self.cli_execenv_cmd:
if self.cli_execenv_cmd == 'adhoc':
self.command = ['ansible'] + self.cmdline_args
elif self.cli_execenv_cmd == 'playbook':
self.command = ['ansible-playbook'] + self.cmdline_args
self.execution_mode = ExecutionMode.CLI_EXECENV
def generate_ansible_command(self):
"""
Given that the ``RunnerConfig`` preparation methods have been run to gather the inputs this method
will generate the ``ansible`` or ``ansible-playbook`` command that will be used by the
:py:class:`ansible_runner.runner.Runner` object to start the process
"""
# FIXME - this never happens because the conditional in prepare_command
# "branches around it" and I need to figure out if that's the
# correct course of action or not.
if self.cli_execenv_cmd:
if self.cli_execenv_cmd == 'adhoc':
base_command = 'ansible'
elif self.cli_execenv_cmd == 'playbook':
base_command = 'ansible-playbook'
self.execution_mode = ExecutionMode.CLI_EXECENV
elif self.binary is not None:
base_command = self.binary
self.execution_mode = ExecutionMode.RAW
elif self.module is not None:
base_command = 'ansible'
self.execution_mode = ExecutionMode.ANSIBLE
else:
base_command = 'ansible-playbook'
self.execution_mode = ExecutionMode.ANSIBLE_PLAYBOOK
exec_list = [base_command]
if self.cli_execenv_cmd:
# Provide dummy data for Tower/AWX vars so that playbooks won't
# fail with undefined var errors
awx_tower_vars = {
'awx_job_id': 1,
'tower_job_id': 1,
'awx_job_launch_type': 'workflow',
'tower_job_launch_type': 'workflow',
'awx_workflow_job_name': 'workflow-job',
'tower_workflow_job_name': 'workflow-job',
'awx_workflow_job_id': 1,
'tower_workflow_job_id': 1,
'awx_parent_job_schedule_id': 1,
'tower_parent_job_schedule_id': 1,
'awx_parent_job_schedule_name': 'job-schedule',
'tower_parent_job_schedule_name': 'job-schedule',
}
for k,v in awx_tower_vars.items():
exec_list.append('-e')
exec_list.append('"{}={}"'.format(k, v))
try:
if self.cmdline_args:
cmdline_args = self.cmdline_args
else:
cmdline_args = self.loader.load_file('env/cmdline', string_types, encoding=None)
if six.PY2 and isinstance(cmdline_args, text_type):
cmdline_args = cmdline_args.encode('utf-8')
args = shlex.split(cmdline_args)
exec_list.extend(args)
except ConfigurationError:
pass
if self.inventory is None:
pass
elif isinstance(self.inventory, list):
for i in self.inventory:
exec_list.append("-i")
exec_list.append(i)
else:
exec_list.append("-i")
exec_list.append(self.inventory)
if self.limit is not None:
exec_list.append("--limit")
exec_list.append(self.limit)
if self.loader.isfile('env/extravars'):
if self.containerized:
extravars_path = '/runner/env/extravars'
else:
extravars_path = self.loader.abspath('env/extravars')
exec_list.extend(['-e', '@{}'.format(extravars_path)])
if self.extra_vars:
if isinstance(self.extra_vars, dict) and self.extra_vars:
extra_vars_list = []
for k in self.extra_vars:
extra_vars_list.append("\"{}\":{}".format(k, json.dumps(self.extra_vars[k])))
exec_list.extend(
[
'-e',
'{%s}' % ','.join(extra_vars_list)
]
)
elif self.loader.isfile(self.extra_vars):
exec_list.extend(['-e', '@{}'.format(self.loader.abspath(self.extra_vars))])
if self.verbosity:
v = 'v' * self.verbosity
exec_list.append('-{}'.format(v))
if self.tags:
exec_list.extend(['--tags', '{}'.format(self.tags)])
if self.skip_tags:
exec_list.extend(['--skip-tags', '{}'.format(self.skip_tags)])
if self.forks:
exec_list.extend(['--forks', '{}'.format(self.forks)])
# Other parameters
if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
exec_list.append(self.playbook)
elif self.execution_mode == ExecutionMode.ANSIBLE:
exec_list.append("-m")
exec_list.append(self.module)
if self.module_args is not None:
exec_list.append("-a")
exec_list.append(self.module_args)
if self.host_pattern is not None:
exec_list.append(self.host_pattern)
return exec_list
def build_process_isolation_temp_dir(self):
'''
Create a temporary directory for process isolation to use.
'''
path = tempfile.mkdtemp(prefix='ansible_runner_pi_', dir=self.process_isolation_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return path
def wrap_args_with_cgexec(self, args):
'''
Wrap existing command line with cgexec in order to profile resource usage
'''
new_args = ['cgexec', '--sticky', '-g', 'cpuacct,memory,pids:{}/{}'.format(self.resource_profiling_base_cgroup, self.ident)]
new_args.extend(args)
return new_args
def wrap_args_for_sandbox(self, args):
'''
Wrap existing command line with bwrap to restrict access to:
- self.process_isolation_path (generally, /tmp) (except for own /tmp files)
'''
cwd = os.path.realpath(self.cwd)
self.process_isolation_path_actual = self.build_process_isolation_temp_dir()
new_args = [self.process_isolation_executable or 'bwrap', '--die-with-parent', '--unshare-pid', '--dev-bind', '/', '/', '--proc', '/proc']
for path in sorted(set(self.process_isolation_hide_paths or [])):
if not os.path.exists(path):
logger.debug('hide path not found: {0}'.format(path))
continue
path = os.path.realpath(path)
if os.path.isdir(path):
new_path = tempfile.mkdtemp(dir=self.process_isolation_path_actual)
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
else:
handle, new_path = tempfile.mkstemp(dir=self.process_isolation_path_actual)
os.close(handle)
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR)
new_args.extend(['--bind', '{0}'.format(new_path), '{0}'.format(path)])
if self.private_data_dir:
show_paths = [self.private_data_dir]
else:
show_paths = [cwd]
for path in sorted(set(self.process_isolation_ro_paths or [])):
if not os.path.exists(path):
logger.debug('read-only path not found: {0}'.format(path))
continue
path = os.path.realpath(path)
new_args.extend(['--ro-bind', '{0}'.format(path), '{0}'.format(path)])
show_paths.extend(self.process_isolation_show_paths or [])
for path in sorted(set(show_paths)):
if not os.path.exists(path):
logger.debug('show path not found: {0}'.format(path))
continue
path = os.path.realpath(path)
new_args.extend(['--bind', '{0}'.format(path), '{0}'.format(path)])
if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
# playbook runs should cwd to the SCM checkout dir
if self.directory_isolation_path is not None:
new_args.extend(['--chdir', os.path.realpath(self.directory_isolation_path)])
else:
new_args.extend(['--chdir', os.path.realpath(self.project_dir)])
elif self.execution_mode == ExecutionMode.ANSIBLE:
# ad-hoc runs should cwd to the root of the private data dir
new_args.extend(['--chdir', os.path.realpath(self.private_data_dir)])
new_args.extend(args)
return new_args
def wrap_args_for_containerization(self, args):
new_args = [self.process_isolation_executable]
new_args.extend(['run', '--rm', '--tty', '--interactive'])
container_workdir = "/runner/project"
new_args.extend(["--workdir", container_workdir])
self.cwd = container_workdir
def _ensure_path_safe_to_mount(path):
if path in ('/home', '/usr'):
raise ConfigurationError("When using containerized execution, cannot mount /home or /usr")
_ensure_path_safe_to_mount(self.private_data_dir)
def _parse_cli_execenv_cmd_playbook_args():
# Determine all inventory file paths, accounting for the possibility of multiple
# inventory files provided
_inventory_paths = []
_playbook = ""
_book_keeping_copy = self.cmdline_args.copy()
for arg in self.cmdline_args:
if arg == '-i':
_book_keeping_copy_inventory_index = _book_keeping_copy.index('-i')
_inventory_paths.append(self.cmdline_args[_book_keeping_copy_inventory_index + 1])
_book_keeping_copy.pop(_book_keeping_copy_inventory_index)
_book_keeping_copy.pop(_book_keeping_copy_inventory_index)
if len(_book_keeping_copy) == 1:
# it's probably safe to assume this is the playbook
_playbook = _book_keeping_copy[0]
elif _book_keeping_copy[0][0] != '-':
# this should be the playbook, it's the only "naked" arg
_playbook = _book_keeping_copy[0]
else:
# parse everything beyond the first arg because we checked that
# in the previous case already
for arg in _book_keeping_copy[1:]:
if arg[0] == '-':
continue
elif _book_keeping_copy[(_book_keeping_copy.index(arg) - 1)][0] != '-':
_playbook = arg
break
return (_playbook, _inventory_paths)
if self.cli_execenv_cmd:
_parsed_playbook_path, _parsed_inventory_paths = _parse_cli_execenv_cmd_playbook_args()
if self.cli_execenv_cmd == 'playbook':
playbook_file_path = _parsed_playbook_path
_ensure_path_safe_to_mount(playbook_file_path)
if os.path.isabs(playbook_file_path) and (os.path.dirname(playbook_file_path) != '/'):
new_args.extend([
"-v", "{}:{}".format(
os.path.dirname(playbook_file_path),
os.path.dirname(playbook_file_path),
)
])
else:
new_args.extend([
"-v", "{}:/runner/project/{}".format(
os.path.dirname(os.path.abspath(playbook_file_path)),
os.path.dirname(playbook_file_path),
)
])
# volume mount inventory into the exec env container if provided at cli
if '-i' in self.cmdline_args:
inventory_file_paths = _parsed_inventory_paths
inventory_playbook_share_parent = False
for inventory_file_path in inventory_file_paths:
_ensure_path_safe_to_mount(inventory_file_path)
if self.cli_execenv_cmd == 'playbook':
if os.path.dirname(os.path.abspath(inventory_file_path)) == \
os.path.dirname(os.path.abspath(playbook_file_path)):
inventory_playbook_share_parent = True
if not inventory_file_path.endswith(',') and not inventory_playbook_share_parent:
if os.path.isabs(inventory_file_path) and (os.path.dirname(inventory_file_path) != '/'):
new_args.extend([
"-v", "{}:{}".format(
os.path.dirname(inventory_file_path),
os.path.dirname(inventory_file_path),
)
])
else:
new_args.extend([
"-v", "{}:/runner/project/{}".format(
os.path.dirname(os.path.abspath(inventory_file_path)),
os.path.dirname(inventory_file_path),
)
])
# Handle automounts
cli_automounts = [
{
'ENVS': ['SSH_AUTH_SOCK'],
'PATHS': [
{
'src': '{}/.ssh/'.format(os.environ['HOME']),
'dest': '/home/runner/.ssh/'
},
{
'src': '/etc/ssh/ssh_known_hosts',
'dest': '/etc/ssh/ssh_known_hosts'
}
]
},
{
"ENVS": ['K8S_AUTH_KUBECONFIG'],
"PATHS": [
{
'src': '{}/.kube/'.format(os.environ['HOME']),
'dest': '/home/runner/.kube/'
},
]
},
{
"ENVS": [
'AWS_URL', 'EC2_URL', 'AWS_ACCESS_KEY_ID', 'AWS_ACCESS_KEY',
'EC2_ACCESS_KEY', 'AWS_SECRET_ACCESS_KEY', 'AWS_SECRET_KEY', 'EC2_SECRET_KEY',
'AWS_SECURITY_TOKEN', 'EC2_SECURITY_TOKEN', 'AWS_REGION', 'EC2_REGION'
],
"PATHS": [
{
'src': '{}/.boto/'.format(os.environ['HOME']),
'dest': '/home/runner/.boto/'
},
]
},
{
"ENVS": [
'AZURE_SUBSCRIPTION_ID', 'AZURE_CLIENT_ID', 'AZURE_SECRET', 'AZURE_TENANT',
'AZURE_AD_USER', 'AZURE_PASSWORD'
],
"PATHS": [
{
'src': '{}/.azure/'.format(os.environ['HOME']),
'dest': '/home/runner/.azure/'
},
]
},
{
"ENVS": [
'gcp_service_account_file', 'GCP_SERVICE_ACCOUNT_FILE', 'GCP_SERVICE_ACCOUNT_CONTENTS',
'GCP_SERVICE_ACCOUNT_EMAIL', 'GCP_AUTH_KIND', 'GCP_SCOPES'
],
"PATHS": [
{
'src': '{}/.gcp/'.format(os.environ['HOME']),
'dest': '/home/runner/.gcp/'
},
]
}
]
for cli_automount in cli_automounts:
for env in cli_automount['ENVS']:
if env in os.environ:
dest_path = os.environ[env]
if os.path.exists(os.environ[env]):
if os.environ[env].startswith(os.environ['HOME']):
dest_path = '/home/runner/{}'.format(os.environ[env].lstrip(os.environ['HOME']))
elif os.environ[env].startswith('~'):
dest_path = '/home/runner/{}'.format(os.environ[env].lstrip('~/'))
else:
dest_path = os.environ[env]
new_args.extend(["-v", "{}:{}".format(os.environ[env], dest_path)])
new_args.extend(["-e", "{}={}".format(env, dest_path)])
for paths in cli_automount['PATHS']:
if os.path.exists(paths['src']):
new_args.extend(["-v", "{}:{}".format(paths['src'], paths['dest'])])
if 'podman' in self.process_isolation_executable:
# container namespace stuff
new_args.extend(["--group-add=root"])
new_args.extend(["--userns=keep-id"])
new_args.extend(["--ipc=host"])
# the playbook / adhoc cases (cli_execenv_cmd) are handled separately
# because they have pre-existing mounts already in new_args
if self.cli_execenv_cmd:
# Relative paths are mounted relative to /runner/project
for subdir in ('project', 'artifacts'):
subdir_path = os.path.join(self.private_data_dir, subdir)
if not os.path.exists(subdir_path):
os.mkdir(subdir_path, 0o700)
# playbook / adhoc commands need artifacts mounted to output data
new_args.extend(["-v", "{}/artifacts:/runner/artifacts:Z".format(self.private_data_dir)])
else:
subdir_path = os.path.join(self.private_data_dir, 'artifacts')
if not os.path.exists(subdir_path):
os.mkdir(subdir_path, 0o700)
# Mount the entire private_data_dir
# custom show paths inside private_data_dir do not make sense
new_args.extend(["-v", "{}:/runner:Z".format(self.private_data_dir)])
container_volume_mounts = self.container_volume_mounts
if container_volume_mounts:
for mapping in container_volume_mounts:
host_path, container_path = mapping.split(':', 1)
_ensure_path_safe_to_mount(host_path)
new_args.extend(["-v", "{}:{}".format(host_path, container_path)])
# Reference the file with list of keys to pass into container
# this file will be written in ansible_runner.runner
env_file_host = os.path.join(self.artifact_dir, 'env.list')
new_args.extend(['--env-file', env_file_host])
if 'podman' in self.process_isolation_executable:
# docker doesnt support this option
new_args.extend(['--quiet'])
if 'docker' in self.process_isolation_executable:
new_args.extend([f'--user={os.getuid()}'])
new_args.extend(['--name', self.container_name])
if self.container_options:
new_args.extend(self.container_options)
new_args.extend([self.container_image])
new_args.extend(args)
debug(f"container engine invocation: {' '.join(new_args)}")
return new_args
def wrap_args_with_ssh_agent(self, args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False):
"""
Given an existing command line and parameterization this will return the same command line wrapped with the
necessary calls to ``ssh-agent``
"""
if self.containerized:
artifact_dir = os.path.join("/runner/artifacts", "{}".format(self.ident))
ssh_key_path = os.path.join(artifact_dir, "ssh_key_data")
if ssh_key_path:
ssh_add_command = args2cmdline('ssh-add', ssh_key_path)
if silence_ssh_add:
ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
ssh_key_cleanup_command = 'rm -f {}'.format(ssh_key_path)
# The trap ensures the fifo is cleaned up even if the call to ssh-add fails.
# This prevents getting into certain scenarios where subsequent reads will
# hang forever.
cmd = ' && '.join([args2cmdline('trap', ssh_key_cleanup_command, 'EXIT'),
ssh_add_command,
ssh_key_cleanup_command,
args2cmdline(*args)])
args = ['ssh-agent']
if ssh_auth_sock:
args.extend(['-a', ssh_auth_sock])
args.extend(['sh', '-c', cmd])
return args

258
ansible_runner/streaming.py Normal file
View File

@ -0,0 +1,258 @@
import codecs
import json
import os
import stat
import sys
import tempfile
import uuid
import traceback
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import ansible_runner
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.loader import ArtifactLoader
import ansible_runner.plugins
from ansible_runner import utils
class UUIDEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, uuid.UUID):
return obj.hex
return json.JSONEncoder.default(self, obj)
class MockConfig(object):
def __init__(self, settings):
self.settings = settings
class Transmitter(object):
def __init__(self, _output=None, **kwargs):
if _output is None:
_output = sys.stdout.buffer
self._output = _output
self.private_data_dir = os.path.abspath(kwargs.pop('private_data_dir'))
self.only_transmit_kwargs = kwargs.pop('only_transmit_kwargs', False)
self.kwargs = kwargs
self.status = "unstarted"
self.rc = None
def run(self):
self._output.write(
json.dumps({'kwargs': self.kwargs}, cls=UUIDEncoder).encode('utf-8')
)
self._output.write(b'\n')
self._output.flush()
if not self.only_transmit_kwargs:
self._output.write(utils.stream_dir(self.private_data_dir))
self._output.write(json.dumps({'eof': True}).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
return self.status, self.rc
class Worker(object):
def __init__(self, _input=None, _output=None, **kwargs):
if _input is None:
_input = sys.stdin.buffer
if _output is None:
_output = sys.stdout.buffer
self._input = _input
self._output = _output
self.kwargs = kwargs
self.job_kwargs = None
private_data_dir = kwargs.get('private_data_dir')
if private_data_dir is None:
private_data_dir = tempfile.TemporaryDirectory().name
self.private_data_dir = private_data_dir
self.status = "unstarted"
self.rc = None
def update_paths(self, kwargs):
if kwargs.get('envvars'):
if 'ANSIBLE_ROLES_PATH' in kwargs['envvars']:
roles_path = kwargs['envvars']['ANSIBLE_ROLES_PATH']
roles_dir = os.path.join(self.private_data_dir, 'roles')
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = os.path.join(roles_dir, roles_path)
if kwargs.get('inventory'):
kwargs['inventory'] = os.path.join(self.private_data_dir, kwargs['inventory'])
return kwargs
def run(self):
while True:
try:
line = self._input.readline()
data = json.loads(line)
except (json.decoder.JSONDecodeError, IOError):
self.status_handler({'status': 'error', 'job_explanation': 'Failed to JSON parse a line from transmit stream.'}, None)
self.finished_callback(None) # send eof line
return self.status, self.rc
if 'kwargs' in data:
self.job_kwargs = self.update_paths(data['kwargs'])
elif 'zipfile' in data:
zip_data = self._input.read(data['zipfile'])
try:
utils.unstream_dir(zip_data, self.private_data_dir)
except Exception:
self.status_handler({
'status': 'error',
'job_explanation': 'Failed to extract private data directory on worker.',
'result_traceback': traceback.format_exc()
}, None)
self.finished_callback(None) # send eof line
return self.status, self.rc
elif 'eof' in data:
break
self.kwargs.update(self.job_kwargs)
self.kwargs['quiet'] = True
self.kwargs['suppress_ansible_output'] = True
self.kwargs['private_data_dir'] = self.private_data_dir
self.kwargs['status_handler'] = self.status_handler
self.kwargs['event_handler'] = self.event_handler
self.kwargs['artifacts_handler'] = self.artifacts_handler
self.kwargs['finished_callback'] = self.finished_callback
r = ansible_runner.interface.run(**self.kwargs)
self.status, self.rc = r.status, r.rc
# FIXME: do cleanup on the tempdir
return self.status, self.rc
def status_handler(self, status_data, runner_config):
self.status = status_data['status']
self._output.write(json.dumps(status_data).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
def event_handler(self, event_data):
self._output.write(json.dumps(event_data).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
def artifacts_handler(self, artifact_dir):
self._output.write(utils.stream_dir(artifact_dir))
self._output.flush()
def finished_callback(self, runner_obj):
self._output.write(json.dumps({'eof': True}).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
class Processor(object):
def __init__(self, _input=None, status_handler=None, event_handler=None,
artifacts_handler=None, cancel_callback=None, finished_callback=None, **kwargs):
if _input is None:
_input = sys.stdin.buffer
self._input = _input
self.quiet = kwargs.get('quiet')
private_data_dir = kwargs.get('private_data_dir')
if private_data_dir is None:
private_data_dir = tempfile.TemporaryDirectory().name
self.private_data_dir = private_data_dir
self._loader = ArtifactLoader(self.private_data_dir)
settings = kwargs.get('settings')
if settings is None:
try:
settings = self._loader.load_file('env/settings', Mapping)
except ConfigurationError:
settings = {}
self.config = MockConfig(settings)
artifact_dir = kwargs.get('artifact_dir')
self.artifact_dir = os.path.abspath(
artifact_dir or os.path.join(self.private_data_dir, 'artifacts'))
self.status_handler = status_handler
self.event_handler = event_handler
self.artifacts_handler = artifacts_handler
self.cancel_callback = cancel_callback # FIXME: unused
self.finished_callback = finished_callback
self.status = "unstarted"
self.rc = None
def status_callback(self, status_data):
self.status = status_data['status']
if self.status == 'starting':
self.config.command = status_data.get('command')
self.config.env = status_data.get('env')
self.config.cwd = status_data.get('cwd')
for plugin in ansible_runner.plugins:
ansible_runner.plugins[plugin].status_handler(self.config, status_data)
if self.status_handler is not None:
self.status_handler(status_data, runner_config=self.config)
def event_callback(self, event_data):
full_filename = os.path.join(self.artifact_dir,
'job_events',
'{}-{}.json'.format(event_data['counter'],
event_data['uuid']))
if not self.quiet and 'stdout' in event_data:
print(event_data['stdout'])
if self.event_handler is not None:
should_write = self.event_handler(event_data)
else:
should_write = True
for plugin in ansible_runner.plugins:
ansible_runner.plugins[plugin].event_handler(self.config, event_data)
if should_write:
with codecs.open(full_filename, 'w', encoding='utf-8') as write_file:
os.chmod(full_filename, stat.S_IRUSR | stat.S_IWUSR)
json.dump(event_data, write_file)
def artifacts_callback(self, artifacts_data):
zip_data = self._input.read(artifacts_data['zipfile'])
utils.unstream_dir(zip_data, self.artifact_dir)
if self.artifacts_handler is not None:
self.artifacts_handler(self.artifact_dir)
def run(self):
job_events_path = os.path.join(self.artifact_dir, 'job_events')
if not os.path.exists(job_events_path):
os.makedirs(job_events_path, 0o700, exist_ok=True)
while True:
try:
line = self._input.readline()
data = json.loads(line)
except (json.decoder.JSONDecodeError, IOError):
self.status_callback({'status': 'error', 'job_explanation': 'Failed to JSON parse a line from worker stream.'})
break
if 'status' in data:
self.status_callback(data)
elif 'zipfile' in data:
self.artifacts_callback(data)
elif 'eof' in data:
break
else:
self.event_callback(data)
if self.finished_callback is not None:
self.finished_callback(self)
return self.status, self.rc

431
ansible_runner/utils.py Normal file
View File

@ -0,0 +1,431 @@
import json
import sys
import re
import os
import stat
import fcntl
import shutil
import hashlib
import tempfile
import subprocess
import base64
import threading
import pipes
import uuid
import codecs
import zipfile
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping
from io import BytesIO, StringIO
from six import string_types, PY2, PY3, text_type, binary_type
class Bunch(object):
'''
Collect a bunch of variables together in an object.
This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern.
'''
def __init__(self, **kwargs):
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
def get(self, key):
return self.__dict__.get(key)
def isplaybook(obj):
'''
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
'''
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping))
def isinventory(obj):
'''
Inspects the object and returns if it is an inventory
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is an inventory dict and False if it is not
'''
return isinstance(obj, Mapping) or isinstance(obj, string_types)
def check_isolation_executable_installed(isolation_executable):
'''
Check that process isolation executable (e.g. podman, docker, bwrap) is installed.
'''
cmd = [isolation_executable, '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
raise RuntimeError(f'{isolation_executable} unavailable for unexpected reason.')
return False
def stream_dir(directory):
buf = BytesIO()
with zipfile.ZipFile(buf, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as archive:
if directory:
for dirpath, dirs, files in os.walk(directory):
relpath = os.path.relpath(dirpath, directory)
if relpath == ".":
relpath = ""
for fname in files:
archive.write(os.path.join(dirpath, fname), arcname=os.path.join(relpath, fname))
archive.close()
payload = base64.b85encode(buf.getvalue())
return b'\n'.join((json.dumps({'zipfile': len(payload)}).encode('utf-8'), payload))
def unstream_dir(data, directory):
# NOTE: caller needs to process exceptions
data = base64.b85decode(data)
buf = BytesIO(data)
with zipfile.ZipFile(buf, 'r') as archive:
# Fancy extraction in order to preserve permissions
# https://www.burgundywall.com/post/preserving-file-perms-with-python-zipfile-module
for info in archive.infolist():
archive.extract(info.filename, path=directory)
out_path = os.path.join(directory, info.filename)
perm = info.external_attr >> 16
os.chmod(out_path, perm)
def dump_artifact(obj, path, filename=None):
'''
Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated
'''
p_sha1 = None
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
else:
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if filename is None:
fd, fn = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn
def cleanup_artifact_dir(path, num_keep=0):
# 0 disables artifact dir cleanup/rotation
if num_keep < 1:
return
all_paths = sorted([os.path.join(path, p) for p in os.listdir(path)],
key=lambda x: os.path.getmtime(x))
total_remove = len(all_paths) - num_keep
for f in range(total_remove):
shutil.rmtree(all_paths[f])
def dump_artifacts(kwargs):
'''
Introspect the kwargs and dump objects to disk
'''
private_data_dir = kwargs.get('private_data_dir')
if not private_data_dir:
private_data_dir = tempfile.mkdtemp()
kwargs['private_data_dir'] = private_data_dir
if not os.path.exists(private_data_dir):
raise ValueError('private_data_dir path is either invalid or does not exist')
if 'role' in kwargs:
role = {'name': kwargs.pop('role')}
if 'role_vars' in kwargs:
role['vars'] = kwargs.pop('role_vars')
play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}]
if kwargs.pop('role_skip_facts', False):
play[0]['gather_facts'] = False
kwargs['playbook'] = play
if 'envvars' not in kwargs:
kwargs['envvars'] = {}
roles_path = kwargs.pop('roles_path', None)
if not roles_path:
roles_path = os.path.join(private_data_dir, 'roles')
else:
roles_path += ':{}'.format(os.path.join(private_data_dir, 'roles'))
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path
obj = kwargs.get('playbook')
if obj and isplaybook(obj):
path = os.path.join(private_data_dir, 'project')
kwargs['playbook'] = dump_artifact(json.dumps(obj), path, 'main.json')
obj = kwargs.get('inventory')
if obj and isinventory(obj):
path = os.path.join(private_data_dir, 'inventory')
if isinstance(obj, Mapping):
kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json')
elif isinstance(obj, string_types):
if not os.path.exists(obj):
kwargs['inventory'] = dump_artifact(obj, path, 'hosts')
for key in ('envvars', 'extravars', 'passwords', 'settings'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(json.dumps(obj), path, key)
kwargs.pop(key)
for key in ('ssh_key', 'cmdline'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(str(kwargs[key]), path, key)
kwargs.pop(key)
def collect_new_events(event_path,old_events):
'''
Collect new events for the 'events' generator property
'''
dir_events = os.listdir(event_path)
dir_events_actual = []
for each_file in dir_events:
if re.match("^[0-9]+-.+json$", each_file):
if '-partial' not in each_file and each_file not in old_events.keys() :
dir_events_actual.append(each_file)
dir_events_actual.sort(key=lambda filenm: int(filenm.split("-", 1)[0]))
for event_file in dir_events_actual:
with codecs.open(os.path.join(event_path, event_file), 'r', encoding='utf-8') as event_file_actual:
try:
event = json.load(event_file_actual)
except ValueError:
break
old_events[event_file] = True
yield event, old_events
class OutputEventFilter(object):
'''
File-like object that looks for encoded job events in stdout data.
'''
EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')
def __init__(self, handle, event_callback,
suppress_ansible_output=False, output_json=False):
self._event_callback = event_callback
self._counter = 0
self._start_line = 0
self._handle = handle
self._buffer = StringIO()
self._last_chunk = ''
self._current_event_data = None
self.output_json = output_json
self.suppress_ansible_output = suppress_ansible_output
def flush(self):
self._handle.flush()
def write(self, data):
self._buffer.write(data)
# keep a sliding window of the last chunk written so we can detect
# event tokens and determine if we need to perform a search of the full
# buffer
should_search = '\x1b[K' in (self._last_chunk + data)
self._last_chunk = data
# Only bother searching the buffer if we recently saw a start/end
# token (\x1b[K)
while should_search:
value = self._buffer.getvalue()
match = self.EVENT_DATA_RE.search(value)
if not match:
break
try:
base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
event_data = json.loads(base64.b64decode(base64_data).decode('utf-8'))
except ValueError:
event_data = {}
event_data = self._emit_event(value[:match.start()], event_data)
if not self.output_json:
stdout_actual = event_data['stdout'] if 'stdout' in event_data else None
else:
stdout_actual = json.dumps(event_data)
remainder = value[match.end():]
self._buffer = StringIO()
self._buffer.write(remainder)
if stdout_actual and stdout_actual != "{}":
if not self.suppress_ansible_output:
sys.stdout.write(
stdout_actual.encode('utf-8') if PY2 else stdout_actual
)
sys.stdout.write("\n")
sys.stdout.flush()
self._handle.write(stdout_actual + "\n")
self._handle.flush()
self._last_chunk = remainder
else:
# Verbose stdout outside of event data context
if data and '\n' in data and self._current_event_data is None:
# emit events for all complete lines we know about
lines = self._buffer.getvalue().splitlines(True) # keep ends
remainder = None
# if last line is not a complete line, then exclude it
if '\n' not in lines[-1]:
remainder = lines.pop()
# emit all complete lines
for line in lines:
self._emit_event(line)
if not self.suppress_ansible_output:
sys.stdout.write(
line.encode('utf-8') if PY2 else line
)
self._handle.write(line)
self._handle.flush()
self._buffer = StringIO()
# put final partial line back on buffer
if remainder:
self._buffer.write(remainder)
def close(self):
value = self._buffer.getvalue()
if value:
self._emit_event(value)
self._buffer = StringIO()
self._event_callback(dict(event='EOF'))
self._handle.close()
def _emit_event(self, buffered_stdout, next_event_data=None):
next_event_data = next_event_data or {}
if self._current_event_data:
event_data = self._current_event_data
stdout_chunks = [buffered_stdout]
elif buffered_stdout:
event_data = dict(event='verbose')
stdout_chunks = buffered_stdout.splitlines(True)
else:
event_data = dict()
stdout_chunks = []
for stdout_chunk in stdout_chunks:
if event_data.get('event') == 'verbose':
event_data['uuid'] = str(uuid.uuid4())
self._counter += 1
event_data['counter'] = self._counter
event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else ""
n_lines = stdout_chunk.count('\n')
event_data['start_line'] = self._start_line
event_data['end_line'] = self._start_line + n_lines
self._start_line += n_lines
if self._event_callback:
self._event_callback(event_data)
if next_event_data.get('uuid', None):
self._current_event_data = next_event_data
else:
self._current_event_data = None
return event_data
def open_fifo_write(path, data):
'''open_fifo_write opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)).start()
def args2cmdline(*args):
return ' '.join([pipes.quote(a) for a in args])
def ensure_str(s, encoding='utf-8', errors='strict'):
"""
Copied from six==1.12
Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def sanitize_container_name(original_name):
"""
Docker and podman will only accept certain characters in container names
This takes a given name from user-specified values and replaces the
invalid characters so it can be used in docker/podman CLI commands
"""
return re.sub('[^a-zA-Z0-9_-]', '_', text_type(original_name))

11
bindep.txt Normal file
View File

@ -0,0 +1,11 @@
# This is a cross-platform list tracking distribution packages needed by tests;
# see https://docs.openstack.org/infra/bindep/ for additional information.
gcc-c++ [test platform:rpm]
openssh-clients
podman [test platform:rpm]
python36 [test !platform:centos-7 !platform:fedora-28]
python38-six [platform:centos-8]
python38-yaml [platform:centos-8]
python3-devel [test !platform:centos-7 platform:rpm]
python3 [test !platform:centos-7 platform:rpm]

1
demo/inventory/hosts Normal file
View File

@ -0,0 +1 @@
localhost

View File

@ -0,0 +1,38 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@ -0,0 +1,2 @@
---
# defaults file for testrole

View File

@ -0,0 +1,2 @@
---
# handlers file for testrole

View File

@ -0,0 +1,58 @@
---
galaxy_info:
author: your name
description: your description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Some suggested licenses:
# - BSD (default)
# - MIT
# - GPLv2
# - GPLv3
# - Apache
# - CC-BY
license: license (GPLv2, CC-BY, etc)
min_ansible_version: 1.2
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
# Optionally specify the branch Galaxy will use when accessing the GitHub
# repo for this role. During role install, if no tags are available,
# Galaxy will use this branch. During import Galaxy will access files on
# this branch. If Travis integration is configured, only notifications for this
# branch will be accepted. Otherwise, in all cases, the repo's default branch
# (usually master) will be used.
# github_branch:
#
# platforms is a list of platforms, and each platform has a name and a list of versions.
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@ -0,0 +1,5 @@
---
# tasks file for testrole
- name: just print a message to stdout
debug:
msg: "hello from the ansible-runner testrole!"

View File

@ -0,0 +1,2 @@
localhost

View File

@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- testrole

View File

@ -0,0 +1,2 @@
---
# vars file for testrole

4
demo/project/test.yml Normal file
View File

@ -0,0 +1,4 @@
---
- hosts: all
tasks:
- debug: msg="Test!"

20
docs/Makefile Normal file
View File

@ -0,0 +1,20 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = ansible-runner
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

View File

@ -0,0 +1,30 @@
ansible\_runner.callbacks package
=================================
Submodules
----------
ansible\_runner.callbacks.awx\_display module
---------------------------------------------
.. automodule:: ansible_runner.callbacks.awx_display
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.callbacks.minimal module
----------------------------------------
.. automodule:: ansible_runner.callbacks.minimal
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: ansible_runner.callbacks
:members:
:undoc-members:
:show-inheritance:

View File

@ -0,0 +1,54 @@
ansible\_runner.display\_callback package
=========================================
Submodules
----------
ansible\_runner.display\_callback.cleanup module
------------------------------------------------
.. automodule:: ansible_runner.display_callback.cleanup
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.display\_callback.display module
------------------------------------------------
.. automodule:: ansible_runner.display_callback.display
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.display\_callback.events module
-----------------------------------------------
.. automodule:: ansible_runner.display_callback.events
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.display\_callback.minimal module
------------------------------------------------
.. automodule:: ansible_runner.display_callback.minimal
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.display\_callback.module module
-----------------------------------------------
.. automodule:: ansible_runner.display_callback.module
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: ansible_runner.display_callback
:members:
:undoc-members:
:show-inheritance:

70
docs/ansible_runner.rst Normal file
View File

@ -0,0 +1,70 @@
ansible\_runner package
=======================
Subpackages
-----------
.. toctree::
ansible_runner.callbacks
ansible_runner.display_callback
Submodules
----------
ansible\_runner.exceptions module
---------------------------------
.. automodule:: ansible_runner.exceptions
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.interface module
--------------------------------
.. automodule:: ansible_runner.interface
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.loader module
-----------------------------
.. automodule:: ansible_runner.loader
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.runner module
-----------------------------
.. automodule:: ansible_runner.runner
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.runner\_config module
-------------------------------------
.. automodule:: ansible_runner.runner_config
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.utils module
----------------------------
.. automodule:: ansible_runner.utils
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: ansible_runner
:members:
:undoc-members:
:show-inheritance:

175
docs/conf.py Normal file
View File

@ -0,0 +1,175 @@
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('./'))
# -- Project information -----------------------------------------------------
project = 'ansible-runner'
copyright = '2018, Red Hat Ansible'
author = 'Red Hat Ansible'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '2.0.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ansible-runnerdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ansible-runner.tex', 'ansible-runner Documentation',
'Red Hat Ansible', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ansible-runner', 'ansible-runner Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ansible-runner', 'ansible-runner Documentation',
author, 'ansible-runner', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True

49
docs/container.rst Normal file
View File

@ -0,0 +1,49 @@
.. _container:
Using Runner as a container interface to Ansible
================================================
The design of **Ansible Runner** makes it especially suitable for controlling the execution of **Ansible** from within a container for single-purpose
automation workflows. A reference container image definition is `provided <https://github.com/ansible/ansible-runner/blob/master/Dockerfile>`_ and
is also published to `DockerHub <https://hub.docker.com/r/ansible/ansible-runner/>`_ you can try it out for yourself
.. code-block:: console
$ docker run --rm -e RUNNER_PLAYBOOK=test.yml ansible/ansible-runner:latest
Unable to find image 'ansible/ansible-runner:latest' locally
latest: Pulling from ansible/ansible-runner
[...]
PLAY [all] *********************************************************************
TASK [Gathering Facts] *********************************************************
ok: [localhost]
TASK [debug] *******************************************************************
ok: [localhost] => {
"msg": "Test!"
}
PLAY RECAP *********************************************************************
localhost : ok=2 changed=0 unreachable=0 failed=0
The reference container image is purposefully light-weight and only containing the dependencies necessary to run ``ansible-runner`` itself. It's
intended to be overridden.
Overriding the reference container image
----------------------------------------
**TODO**
Gathering output from the reference container image
---------------------------------------------------
**TODO**
Changing the console output to emit raw events
----------------------------------------------
This can be useful when directing task-level event data to an external system by means of the container's console output.
See :ref:`outputjson`

View File

@ -0,0 +1,110 @@
.. _execution_environments:
Using Runner with Execution Environmnets
========================================
**Execution Environments** are meant to be a consistent, reproducible, portable,
and sharable method to run Ansible Automation jobs in the exact same way on
your laptop as they are executed in `Ansible AWX <https://github.com/ansible/awx/`_.
This aids in the development of automation jobs and Ansible Content that is
meant to be run in **Ansible AWX**, `Ansible Tower <https://www.ansible.com/products/tower>`_,
or via `Red Hat Ansible Automation Platform <https://www.ansible.com/products/automation-platform>`_
in a predictable way.
More specifically, the term **Execution Environments** within the context of
**Ansible Runner** refers to the container runtime execution of **Ansible** via
**Ansible Runner** within an `OCI Compliant Container Runtime
<https://github.com/opencontainers/runtime-spec>`_ using an `OCI Compliant
Container Image <https://github.com/opencontainers/image-spec/>`_ that
appropriately bundles `Ansible Base <https://github.com/ansible/ansible>`_,
`Ansible Collection Content <https://github.com/ansible-collections/overview>`_,
and the runtime dependencies required to support these contents. The base
image is the `Red Hat Enterprise Linux Universal Base Image
<https://developers.redhat.com/products/rhel/ubi>`_ and the build tooling
provided by `Ansible Builder <https://github.com/ansible/ansible-builder>`_
aids in the creation of these images.
All aspects of running **Ansible Runner** in standalone mode (see: :ref:`standalone`)
are true here with the exception that the process isolation is inherently a
container runtime (`podman <https://podman.io/>`_ by default).
Emulating the Ansible CLI
-------------------------
As previously mentioned, a primary goal of adding the Execution Environment CLI
interface is to aid in the creation of Ansible Automation jobs and content. The
approach here is to make it as similar as possible to the way **Ansible** users
are accustomed to using Ansible today. There are two subcommands, ``adhoc`` and
``playbook`` that have been added to accommodate this. The ``adhoc`` subcommand
to ``ansible-runner`` is synonymous with ``ansible`` and the ``playbook``
subcommand to ``ansible-runner`` is synonymous with ``ansible-playbook``.
Examples are below.
Running Ansible adhoc
^^^^^^^^^^^^^^^^^^^^^
An example invocation using the ``ping`` module and ``localhost`` as target::
$ ansible-runner adhoc localhost -m ping
Something to note here is that implicit ``localhost`` in this context is a containerized instantiation of an Ansible Execution Environment and as such you will not get Ansible Facts about your system if using the ``setup`` module.
Running Ansible ansible-playbook
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
An example invocation using the ``demo.yml`` playbook and ``inventory.ini`` inventory file::
$ ansible-runner playbook demo.yml -i inventory.ini
Something to note here is that implicit ``localhost`` in this context is a containerized instantiation of an Ansible Execution Environment and as such you will not get Ansible Facts about your system if using ``gather_facts: true`` and targeting ``localhost`` in your playbook without explicit host definition in your inventory.
Notes and Considerations
------------------------
There are some differences between using Ansible Runner and running Ansible directly from the
command line that have to do with configuration, content locality, and secret data.
Secrets
^^^^^^^
Typically with Ansible you are able to provide secret data via a series of
mechanisms, many of which are pluggable and configurable. When using
Ansible Runner, however, certain considerations need to be made; these are analogous to
how Ansible AWX and Tower manage this information.
See :ref:`inputdir` for more information
Container Names
^^^^^^^^^^^^^^^
Like all ansible-runner jobs, each job has an identifier associated with it
which is also the name of the artifacts subfolder where results are saved to.
When a container for job isolation is launched, it will be given a name
of ``ansible_runner_<job identifier>``. Some characters from the job
identifier may be replaced with underscores for compatibility with
names that Podman and Docker allow.
This name is used internally if a command needs to be ran against the container
at a later time (e.g., to stop the container when the job is canceled).
~/.ssh/ symlinks
^^^^^^^^^^^^^^^^
In order to make the ``adhoc`` and ``playbook`` container execution of Ansible
easier, Ansible Runner will automatically bind mount your local ssh agent
UNIX-domain socket (``SSH_AUTH_SOCK``) into the container runtime. However, this
does not work if files in your ``~/.ssh/`` directory happen to be symlinked to
another directory that is also not mounted into the container runtime. Ansible
Runner ``adhoc`` and ``playbook`` subcommands provide the ``--container-volume-mount``
option to address this, among other things.
Here is an example of an ssh config file that is a symlink:
::
$ $ ls -l ~/.ssh/config
lrwxrwxrwx. 1 myuser myuser 34 Jul 15 19:27 /home/myuser/.ssh/config -> /home/myuser/dotfiles/ssh_config
$ ansible-runner playbook \
--container-volume-mount /home/myuser/dotfiles/:/home/myuser/dotfiles/ \
my_playbook.yml -i my_inventory.ini

View File

@ -0,0 +1,78 @@
.. _externalintf:
Sending Runner Status and Events to External Systems
====================================================
**Runner** can store event and status data locally for retrieval, it can also emit this information via callbacks provided to the module interface.
Alternatively **Runner** can be configured to send events to an external system via installable plugins, there are currently two available
.. _plugineventstructure:
Event Structure
---------------
There are two types of events that are emitted via plugins:
* status events:
These are sent whenever Runner's status changes (see :ref:`runnerstatushandler`) for example::
{"status": "running", "runner_ident": "XXXX" }
* ansible events:
These are sent during playbook execution for every event received from **Ansible** (see :ref:`Playbook and Host Events<artifactevents>`) for example::
{"runner_ident": "XXXX", <rest of event structure }
.. _httpemitterplugin:
HTTP Status/Event Emitter Plugin
--------------------------------
This sends status and event data to a URL in the form of json encoded POST requests.
This plugin is available from the `ansible-runner-http github repo <https://github.com/ansible/ansible-runner-http>`_ and is also available to be installed from
pip::
$ pip install ansible-runner-http
In order to configure it, you can provide details in the Runner Settings file (see :ref:`runnersettings`):
* `runner_http_url`: The url to receive the ``POST``
* `runner_http_headers`: Headers to send along with the request.
The plugin also supports unix file-based sockets with:
* `runner_http_url`: The path to the unix socket
* `runner_http_path`: The path that will be included as part of the request to the socket
Some of these settings are also available as environment variables:
* RUNNER_HTTP_URL
* RUNNER_HTTP_PATH
.. _zmqemitterplugin:
ZeroMQ Status/Event Emitter Plugin
----------------------------------
TODO
Writing your own Plugin
-----------------------
In order to write your own plugin interface and have it be picked up and used by **Runner** there are a few things that you'll need to do.
* Declare the module as a Runner entrypoint in your setup file
(`ansible-runner-http has a good example of this <https://github.com/ansible/ansible-runner-http/blob/master/setup.py>`_)::
entry_points=('ansible_runner.plugins': 'modname = your_python_package_name'),
* Implement the ``status_handler()`` and ``event_handler()`` functions at the top of your package, for example see
`ansible-runner-http events.py <https://github.com/ansible/ansible-runner-http/blob/master/ansible_runner_http/events.py>`_ and the ``__init__``
import `at the top of the module package <https://github.com/ansible/ansible-runner-http/blob/master/ansible_runner_http/__init__.py>`_
After installing this, **Runner** will see the plugin and invoke the functions when status and events are sent. If there are any errors in your plugin
they will be raised immediately and **Runner** will fail.

55
docs/index.rst Normal file
View File

@ -0,0 +1,55 @@
.. ansible-runner documentation master file, created by
sphinx-quickstart on Tue May 1 10:47:37 2018.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Ansible Runner
==============
Ansible Runner is a tool and python library that helps when interfacing with Ansible directly or as part of another system
whether that be through a container image interface, as a standalone tool, or as a Python module that can be imported. The goal
is to provide a stable and consistent interface abstraction to Ansible. This allows **Ansible** to be embedded into other systems that don't
want to manage the complexities of the interface on their own (such as CI/CD platforms, Jenkins, or other automated tooling).
**Ansible Runner** represents the modularization of the part of `Ansible Tower/AWX <https://github.com/ansible/awx>`_ that is responsible
for running ``ansible`` and ``ansible-playbook`` tasks and gathers the output from it. It does this by presenting a common interface that doesn't
change, even as **Ansible** itself grows and evolves.
Part of what makes this tooling useful is that it can gather its inputs in a flexible way (See :ref:`intro`:). It also has a system for storing the
output (stdout) and artifacts (host-level event data, fact data, etc) of the playbook run.
There are 3 primary ways of interacting with **Runner**
* A standalone command line tool (``ansible-runner``) that can be started in the foreground or run in the background asynchronously
* A reference container image that can be used as a base for your own images and will work as a standalone container or running in
Openshift or Kubernetes
* A python module - library interface
**Ansible Runner** can also be configured to send status and event data to other systems using a plugin interface, see :ref:`externalintf`.
Examples of this could include:
* Sending status to Ansible Tower/AWX
* Sending events to an external logging service
.. toctree::
:maxdepth: 2
:caption: Contents:
intro
install
external_interface
standalone
python_interface
execution_environments
container
remote_jobs
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

88
docs/install.rst Normal file
View File

@ -0,0 +1,88 @@
.. _install:
Installing Ansible Runner
=========================
Ansible Runner is provided from several different locations depending on how you want to use it.
Using pip
---------
Python 2.7+ and 3.6+ are supported and installable via pip::
$ pip install ansible-runner
Fedora
------
To install from the latest Fedora sources::
$ dnf install python-ansible-runner
Debian
------
Add an ansible-runner repository::
$ apt-get update
$ echo 'deb https://releases.ansible.com/ansible-runner/deb/ <trusty|xenial|stretch> main' > /etc/apt/sources.list.d/ansible.list
Add a key::
$ apt-key adv --keyserver keyserver.ubuntu.com --recv 3DD29021
Install the package::
$ apt-get update
$ apt-get install ansible-runner
From source
-----------
Ansible Runner uses `python-poetry <https://python-poetry.org/`_ for
development and dependency management, once that is installed performed the
following steps.
Check out the source code from `github <https://github.com/ansible/ansible-runner>`_::
$ git clone git://github.com/ansible/ansible-runner
Or download from the `releases page <https://github.com/ansible/ansible-runner/releases>`_
Then install::
$ poetry install
.. _builddist:
Build the distribution
----------------------
To produce an installable ``wheel`` file::
make dist
To produce a distribution tarball::
make sdist
.. _buildcontimg:
Building the base container image
---------------------------------
Make sure the ``wheel`` distribution is built (see :ref:`builddist`) and run::
make image
Building the RPM
----------------
The RPM build uses a container image to bootstrap the environment in order to produce the RPM. Make sure you have docker
installed and proceed with::
make rpm
.. include:: ../CHANGES.rst

395
docs/intro.rst Normal file
View File

@ -0,0 +1,395 @@
.. _intro:
Introduction to Ansible Runner
==============================
**Runner** is intended to be most useful as part of automation and tooling that needs to invoke Ansible and consume its results.
Most of the parameterization of the **Ansible** command line is also available on the **Runner** command line but **Runner** also
can rely on an input interface that is mapped onto a directory structure, an example of which can be seen in `the source tree <https://github.com/ansible/ansible-runner/tree/devel/demo>`_.
Further sections in this document refer to the configuration and layout of that hierarchy. This isn't the only way to interface with **Runner**
itself. The Python module interface allows supplying these details as direct module parameters in many forms, and the command line interface allows
supplying them directly as arguments, mimicking the behavior of ``ansible-playbook``. Having the directory structure **does** allow gathering the inputs
from elsewhere and preparing them for consumption by **Runner**, then the tooling can come along and inspect the results after the run.
This is best seen in the way Ansible **AWX** uses **Runner** where most of the content comes from the database (and other content-management components) but
ultimately needs to be brought together in a single place when launching the **Ansible** task.
.. _inputdir:
Runner Input Directory Hierarchy
--------------------------------
This directory contains all necessary inputs. Here's a view of the `demo directory <https://github.com/ansible/ansible-runner/tree/devel/demo>`_ showing
an active configuration.
Note that not everything is required. Defaults will be used or values will be omitted if they are not provided.
.. code-block:: none
.
├── env
│   ├── envvars
│   ├── extravars
│   ├── passwords
│   ├── cmdline
│   ├── settings
│   └── ssh_key
├── inventory
│   └── hosts
└── project
   ├── test.yml
└── roles
└── testrole
├── defaults
├── handlers
├── meta
├── README.md
├── tasks
├── tests
└── vars
The ``env`` directory
---------------------
The **env** directory contains settings and sensitive files that inform certain aspects of the invocation of the **Ansible** process, an example of which can
be found in `the demo env directory <https://github.com/ansible/ansible-runner/tree/devel/demo/env>`_. Each of these files can also be represented by a named
pipe providing a bit of an extra layer of security. The formatting and expectation of these files differs slightly depending on what they are representing.
``env/envvars``
---------------
.. note::
For an example see `the demo envvars <https://github.com/ansible/ansible-runner/blob/devel/demo/env/envvars>`_.
**Ansible Runner** will inherit the environment of the launching shell (or container, or system itself). This file (which can be in json or yaml format) represents
the environment variables that will be added to the environment at run-time::
---
TESTVAR: exampleval
``env/extravars``
-----------------
.. note::
For an example see `the demo extravars <https://github.com/ansible/ansible-runner/blob/devel/demo/env/extravars>`_.
**Ansible Runner** gathers the extra vars provided here and supplies them to the **Ansible Process** itself. This file can be in either json or yaml format::
---
ansible_connection: local
test: val
``env/passwords``
-----------------
.. note::
For an example see `the demo passwords <https://github.com/ansible/ansible-runner/blob/devel/demo/env/passwords>`_.
.. warning::
We expect this interface to change/simplify in the future but will guarantee backwards compatibility. The goal is for the user of **Runner** to not
have to worry about the format of certain prompts emitted from **Ansible** itself. In particular, vault passwords need to become more flexible.
**Ansible** itself is set up to emit passwords to certain prompts, these prompts can be requested (``-k`` for example to prompt for the connection password).
Likewise, prompts can be emitted via `vars_prompt <https://docs.ansible.com/ansible/latest/user_guide/playbooks_prompts.html>`_ and also
`Ansible Vault <https://docs.ansible.com/ansible/2.5/user_guide/vault.html#vault-ids-and-multiple-vault-passwords>`_.
In order for **Runner** to respond with the correct password, it needs to be able to match the prompt and provide the correct password. This is currently supported
by providing a yaml or json formatted file with a regular expression and a value to emit, for example::
---
"^SSH password:\\s*?$": "some_password"
"^BECOME password.*:\\s*?$": "become_password"
``env/cmdline``
---------------
.. warning::
Current **Ansible Runner** does not validate the command line arguments passed using this method so it is up to the playbook writer to provide a valid set of options
The command line options provided by this method are lower priority than the ones set by **Ansible Runner**. For instance, this will not override `inventory` or `limit` values.
**Ansible Runner** gathers command line options provided here as a string and supplies them to the **Ansible Process** itself. This file should contain the arguments to be added, for example::
--tags one,two --skip-tags three -u ansible --become
``env/ssh_key``
---------------
.. note::
Currently only a single ssh key can be provided via this mechanism but this is set to `change soon <https://github.com/ansible/ansible-runner/issues/51>`_.
This file should contain the ssh private key used to connect to the host(s). **Runner** detects when a private key is provided and will wrap the call to
**Ansible** in ssh-agent.
.. _runnersettings:
``env/settings`` - Settings for Runner itself
---------------------------------------------
The **settings** file is a little different than the other files provided in this section in that its contents are meant to control **Runner** directly.
* ``idle_timeout``: ``600`` If no output is detected from ansible in this number of seconds the execution will be terminated.
* ``job_timeout``: ``3600`` The maximum amount of time to allow the job to run for, exceeding this and the execution will be terminated.
* ``pexpect_timeout``: ``10`` Number of seconds for the internal pexpect command to wait to block on input before continuing
* ``pexpect_use_poll``: ``True`` Use ``poll()`` function for communication with child processes instead of ``select()``. ``select()`` is used when the value is set to ``False``. ``select()`` has a known limitation of using only up to 1024 file descriptors.
* ``suppress_ansible_output``: ``False`` Allow output from ansible to not be printed to the screen
* ``fact_cache``: ``'fact_cache'`` The directory relative to ``artifacts`` where ``jsonfile`` fact caching will be stored. Defaults to ``fact_cache``. This is ignored if ``fact_cache_type`` is different than ``jsonfile``.
* ``fact_cache_type``: ``'jsonfile'`` The type of fact cache to use. Defaults to ``jsonfile``.
Process Isolation Settings for Runner
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The process isolation settings are meant to control the process isolation feature of **Runner**.
* ``process_isolation``: ``False`` Enable limiting what directories on the filesystem the playbook run has access to.
* ``process_isolation_executable``: ``bwrap`` Path to the executable that will be used to provide filesystem isolation.
* ``process_isolation_path``: ``/tmp`` Path that an isolated playbook run will use for staging.
* ``process_isolation_hide_paths``: ``None`` Path or list of paths on the system that should be hidden from the playbook run.
* ``process_isolation_show_paths``: ``None`` Path or list of paths on the system that should be exposed to the playbook run.
* ``process_isolation_ro_paths``: ``None`` Path or list of paths on the system that should be exposed to the playbook run as read-only.
Container-based Execution
^^^^^^^^^^^^^^^^^^^^^^^^^
The ``--containerized`` setting instructs **Ansible Runner** to execute **Ansible** tasks inside a container environment. A default execution environment is provided on Docker Hub at [ansible/ansible-runner](https://hub.docker.com/r/ansible/ansible-runner). Users also have the option of building their own container for executing playbooks, however.
To build an execution environment locally, run:
``docker build --rm=true -t custom-container -f Dockerfile .``
or, using `podman <https://podman.io/releases/>`_:
``podman build --rm=true -t custom-container -f Dockerfile .``
To run Ansible Runner with your custom container:
``ansible-runner run --container-image custom-container -p playbook.yml``
See ``ansible-runner -h`` for other container-related options.
Performance Data Collection Settings for Runner
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Runner** is capable of collecting performance data (namely cpu usage, memory usage, and pid count) during the execution of a playbook run.
Resource profiling is made possible by the use of control groups (often referred to simply as cgroups). When a process runs inside of a cgroup, the resources used by that specific process can be measured.
Before enabling Runner's resource profiling feature, users must create a cgroup that **Runner** can use. It is worth noting that only privileged users can create cgroups. The new cgroup should be associated with the same user (and related group) that will be invoking **Runner**. The following command accomplishes this on a RHEL system::
sudo yum install libcgroup-tools
sudo cgcreate -a `whoami` -t `whoami` -g cpuacct,memory,pids:ansible-runner
In the above command, ``cpuacct``, ``memory``, and ``pids`` refer to kernel resource controllers, while ``ansible-runner`` refers to the name of the cgroup being created. More detailed information on the structure of cgroups can be found in the RHEL guide on `Managing, monitoring, and updating the kernel <https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_monitoring_and_updating_the_kernel/setting-limits-for-applications_managing-monitoring-and-updating-the-kernel>`_
After a cgroup has been created, the following settings can be used to configure resource profiling. Note that ``resource_profiling_base_cgroup`` must match the name of the cgroup you create.
* ``resource_profiling``: ``False`` Enable performance data collection.
* ``resource_profiling_base_cgroup``: ``ansible-runner`` Top-level cgroup used to measure playbook resource utilization.
* ``resource_profiling_cpu_poll_interval``: ``0.25`` Polling interval in seconds for collecting cpu usage.
* ``resource_profiling_memory_poll_interval``: ``0.25`` Polling interval in seconds for collecting memory usage.
* ``resource_profiling_pid_poll_interval``: ``0.25`` Polling interval in seconds for measuring PID count.
* ``resource_profiling_results_dir``: ``None`` Directory where resource utilization data will be written (if not specified, will be placed in the ``profiling_data`` folder under the private data directory).
Inventory
---------
The **Runner** ``inventory`` location under the private data dir has the same expectations as inventory provided directly to ansible itself. It can
be either a single file or script or a directory containing static inventory files or scripts. This inventory is automatically loaded and provided to
**Ansible** when invoked and can be further overridden on the command line or via the ``ANSIBLE_INVENTORY`` environment variable to specify the hosts directly.
Giving an absolute path for the inventory location is best practice, because relative paths are interpreted relative to the ``current working directory``
which defaults to the ``project`` directory.
Project
--------
The **Runner** ``project`` directory is the playbook root containing playbooks and roles that those playbooks can consume directly. This is also the
directory that will be set as the ``current working directory`` when launching the **Ansible** process.
Modules
-------
**Runner** has the ability to execute modules directly using Ansible ad-hoc mode.
Roles
-----
**Runner** has the ability to execute `Roles <https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html>`_ directly without first needing
a playbook to reference them. This directory holds roles used for that. Behind the scenes, **Runner** will generate a playbook and invoke the ``Role``.
.. _artifactdir:
Runner Artifacts Directory Hierarchy
------------------------------------
This directory will contain the results of **Runner** invocation grouped under an ``identifier`` directory. This identifier can be supplied to **Runner** directly
and if not given, an identifier will be generated as a `UUID <https://docs.python.org/3/library/uuid.html#uuid.uuid4>`_. This is how the directory structure looks
from the top level::
.
├── artifacts
│   └── identifier
├── env
├── inventory
├── profiling_data
├── project
└── roles
The artifact directory itself contains a particular structure that provides a lot of extra detail from a running or previously-run invocation of Ansible/Runner::
.
├── artifacts
│   └── 37f639a3-1f4f-4acb-abee-ea1898013a25
│   ├── fact_cache
│   │   └── localhost
│   ├── job_events
│   │   ├── 1-34437b34-addd-45ae-819a-4d8c9711e191.json
│   │   ├── 2-8c164553-8573-b1e0-76e1-000000000006.json
│   │   ├── 3-8c164553-8573-b1e0-76e1-00000000000d.json
│   │   ├── 4-f16be0cd-99e1-4568-a599-546ab80b2799.json
│   │   ├── 5-8c164553-8573-b1e0-76e1-000000000008.json
│   │   ├── 6-981fd563-ec25-45cb-84f6-e9dc4e6449cb.json
│   │   └── 7-01c7090a-e202-4fb4-9ac7-079965729c86.json
│   ├── rc
│   ├── status
│   └── stdout
The **rc** file contains the actual return code from the **Ansible** process.
The **status** file contains one of three statuses suitable for displaying:
* success: The **Ansible** process finished successfully
* failed: The **Ansible** process failed
* timeout: The **Runner** timeout (see :ref:`runnersettings`)
The **stdout** file contains the actual stdout as it appears at that moment.
.. _artifactevents:
Runner Artifact Job Events (Host and Playbook Events)
-----------------------------------------------------
**Runner** gathers the individual task and playbook events that are emitted as part of the **Ansible** run. This is extremely helpful if you don't want
to process or read the stdout returned from **Ansible** as it contains much more detail and status than just the plain stdout.
It does some of the heavy lifting of assigning order to the events and stores them in json format under the ``job_events`` artifact directory.
It also takes it a step further than normal **Ansible** callback plugins in that it will store the ``stdout`` associated with the event alongside the raw
event data (along with stdout line numbers). It also generates dummy events for stdout that didn't have corresponding host event data::
{
"uuid": "8c164553-8573-b1e0-76e1-000000000008",
"parent_uuid": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
"counter": 5,
"stdout": "\r\nTASK [debug] *******************************************************************",
"start_line": 5,
"end_line": 7,
"event": "playbook_on_task_start",
"event_data": {
"playbook": "test.yml",
"playbook_uuid": "34437b34-addd-45ae-819a-4d8c9711e191",
"play": "all",
"play_uuid": "8c164553-8573-b1e0-76e1-000000000006",
"play_pattern": "all",
"task": "debug",
"task_uuid": "8c164553-8573-b1e0-76e1-000000000008",
"task_action": "debug",
"task_path": "\/home\/mjones\/ansible\/ansible-runner\/demo\/project\/test.yml:3",
"task_args": "msg=Test!",
"name": "debug",
"is_conditional": false,
"pid": 10640
},
"pid": 10640,
"created": "2018-06-07T14:54:58.410605"
}
If the playbook runs to completion without getting killed, the last event will always be the ``stats`` event::
{
"uuid": "01c7090a-e202-4fb4-9ac7-079965729c86",
"counter": 7,
"stdout": "\r\nPLAY RECAP *********************************************************************\r\n\u001b[0;32mlocalhost,\u001b[0m : \u001b[0;32mok=2 \u001b[0m changed=0 unreachable=0 failed=0 \r\n",
"start_line": 10,
"end_line": 14,
"event": "playbook_on_stats",
"event_data": {
"playbook": "test.yml",
"playbook_uuid": "34437b34-addd-45ae-819a-4d8c9711e191",
"changed": {
},
"dark": {
},
"failures": {
},
"ok": {
"localhost,": 2
},
"processed": {
"localhost,": 1
},
"skipped": {
},
"artifact_data": {
},
"pid": 10640
},
"pid": 10640,
"created": "2018-06-07T14:54:58.424603"
}
.. note::
The **Runner module interface** presents a programmatic interface to these events that allow getting the final status and performing host filtering of task events.
Runner Profiling Data Directory
-------------------------------
If resource profiling is enabled for **Runner** the ``profiling_data`` directory will be populated with a set of files containing the profiling data::
.
├── profiling_data
│   ├── 0-34437b34-addd-45ae-819a-4d8c9711e191-cpu.json
│   ├── 0-34437b34-addd-45ae-819a-4d8c9711e191-memory.json
│   ├── 0-34437b34-addd-45ae-819a-4d8c9711e191-pids.json
│   ├── 1-8c164553-8573-b1e0-76e1-000000000006-cpu.json
│   ├── 1-8c164553-8573-b1e0-76e1-000000000006-memory.json
│   └── 1-8c164553-8573-b1e0-76e1-000000000006-pids.json
Each file is in `JSON text format <https://tools.ietf.org/html/rfc7464#section-2.2>`_. Each line of the file will begin with a record separator (RS), continue with a JSON dictionary, and conclude with a line feed (LF) character. The following provides an example of what the resource files may look like. Note that that since the RS and LF are control characters, they are not actually printed below::
==> 0-525400c9-c704-29a6-4107-00000000000c-cpu.json <==
{"timestamp": 1568977988.6844425, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 97.12799768097156}
{"timestamp": 1568977988.9394386, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 94.17538298892688}
{"timestamp": 1568977989.1901696, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 64.38272588006255}
{"timestamp": 1568977989.4594045, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 83.77387744259856}
==> 0-525400c9-c704-29a6-4107-00000000000c-memory.json <==
{"timestamp": 1568977988.4281094, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 36.21484375}
{"timestamp": 1568977988.6842303, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 57.87109375}
{"timestamp": 1568977988.939303, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 66.60546875}
{"timestamp": 1568977989.1900482, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 71.4609375}
{"timestamp": 1568977989.4592078, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 38.25390625}
==> 0-525400c9-c704-29a6-4107-00000000000c-pids.json <==
{"timestamp": 1568977988.4284189, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 5}
{"timestamp": 1568977988.6845856, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 6}
{"timestamp": 1568977988.939547, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 8}
{"timestamp": 1568977989.1902773, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 13}
{"timestamp": 1568977989.4593227, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 6}
* Resource profiling data is grouped by playbook task.
* For each task, there will be three files, corresponding to cpu, memory and pid count data.
* Each file contains a set of data points collected over the course of a playbook task.
* If a task executes quickly and the polling rate for a given metric is large enough, it is possible that no profiling data may be collected during the task's execution. If this is the case, no data file will be created.

36
docs/make.bat Normal file
View File

@ -0,0 +1,36 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
set SPHINXPROJ=ansible-runner
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd

7
docs/modules.rst Normal file
View File

@ -0,0 +1,7 @@
ansible_runner
==============
.. toctree::
:maxdepth: 4
ansible_runner

141
docs/python_interface.rst Normal file
View File

@ -0,0 +1,141 @@
.. _python_interface:
Using Runner as a Python Module Interface to Ansible
====================================================
**Ansible Runner** is intended to provide a directly importable and usable API for interfacing with **Ansible** itself and exposes a few helper interfaces.
The modules center around the :class:`Runner <ansible_runner.runner.Runner>` object. The helper methods will return an instance of this object which provides an
interface to the results of executing the **Ansible** command.
**Ansible Runner** itself is a wrapper around **Ansible** execution and so adds plugins and interfaces to the system in order to gather extra information and
process/store it for use later.
Helper Interfaces
-----------------
The helper :mod:`interfaces <ansible_runner.interface>` provides a quick way of supplying the recommended inputs in order to launch a **Runner** process. These interfaces also allow overriding and providing inputs beyond the scope of what the standalone or container interfaces
support. You can see a full list of the inputs in the linked module documentation.
``run()`` helper function
-------------------------
:meth:`ansible_runner.interface.run`
When called, this function will take the inputs (either provided as direct inputs to the function or from the :ref:`inputdir`), and execute **Ansible**. It will run in the
foreground and return the :class:`Runner <ansible_runner.runner.Runner>` object when finished.
``run_async()`` helper function
-------------------------------
:meth:`ansible_runner.interface.run_async`
Takes the same arguments as :meth:`ansible_runner.interface.run` but will launch **Ansible** asynchronously and return a tuple containing
the ``thread`` object and a :class:`Runner <ansible_runner.runner.Runner>` object. The **Runner** object can be inspected during execution.
The ``Runner`` object
---------------------
The :class:`Runner <ansible_runner.runner.Runner>` object is returned as part of the execution of **Ansible** itself. Since it wraps both execution and output
it has some helper methods for inspecting the results. Other than the methods and indirect properties, the instance of the object itself contains two direct
properties:
* ``rc`` will represent the actual return code of the **Ansible** process
* ``status`` will represent the state and can be one of:
* ``unstarted``: This is a very brief state where the Runner task has been created but hasn't actually started yet.
* ``successful``: The ``ansible`` process finished successfully.
* ``failed``: The ``ansible`` process failed.
``Runner.stdout``
-----------------
The :class:`Runner <ansible_runner.runner.Runner>` object contains a property :attr:`ansible_runner.runner.Runner.stdout` which will return an open file
handle containing the ``stdout`` of the **Ansible** process.
``Runner.events``
-----------------
:attr:`ansible_runner.runner.Runner.events` is a ``generator`` that will return the :ref:`Playbook and Host Events<artifactevents>` as Python ``dict`` objects.
``Runner.stats``
----------------
:attr:`ansible_runner.runner.Runner.stats` is a property that will return the final ``playbook stats`` event from **Ansible** in the form of a Python ``dict``
``Runner.host_events``
----------------------
:meth:`ansible_runner.runner.Runner.host_events` is a method that, given a hostname, will return a list of only **Ansible** event data executed on that Host.
``Runner.get_fact_cache``
-------------------------
:meth:`ansible_runner.runner.Runner.get_fact_cache` is a method that, given a hostname, will return a dictionary containing the `Facts <https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variables-discovered-from-systems-facts>`_ stored for that host during execution.
``Runner.event_handler``
------------------------
A function passed to `__init__` of :class:`Runner <ansible_runner.runner.Runner>`, this is invoked every time an Ansible event is received. You can use this to
inspect/process/handle events as they come out of Ansible. This function should return `True` to keep the event, otherwise it will be discarded.
``Runner.cancel_callback``
--------------------------
A function passed to ``__init__`` of :class:`Runner <ansible_runner.runner.Runner>`, and to the :meth:`ansible_runner.interface.run` interface functions.
This function will be called for every iteration of the :meth:`ansible_runner.interface.run` event loop and should return `True`
to inform **Runner** cancel and shutdown the **Ansible** process or `False` to allow it to continue.
``Runner.finished_callback``
----------------------------
A function passed to ``__init__`` of :class:`Runner <ansible_runner.runner.Runner>`, and to the :meth:`ansible_runner.interface.run` interface functions.
This function will be called immediately before the **Runner** event loop finishes once **Ansible** has been shut down.
.. _runnerstatushandler:
``Runner.status_handler``
-------------------------
A function passed to ``__init__`` of :class:`Runner <ansible_runner.runner.Runner>` and to the :meth:`ansible_runner.interface.run` interface functions.
This function will be called any time the ``status`` changes, expected values are:
* `starting`: Preparing to start but hasn't started running yet
* `running`: The **Ansible** task is running
* `canceled`: The task was manually canceled either via callback or the cli
* `timeout`: The timeout configured in Runner Settings was reached (see :ref:`runnersettings`)
* `failed`: The **Ansible** process failed
Usage examples
--------------
.. code-block:: python
import ansible_runner
r = ansible_runner.run(private_data_dir='/tmp/demo', playbook='test.yml')
print("{}: {}".format(r.status, r.rc))
# successful: 0
for each_host_event in r.events:
print(each_host_event['event'])
print("Final status:")
print(r.stats)
.. code-block:: python
import ansible_runner
r = ansible_runner.run(private_data_dir='/tmp/demo', host_pattern='localhost', module='shell', module_args='whoami')
print("{}: {}".format(r.status, r.rc))
# successful: 0
for each_host_event in r.events:
print(each_host_event['event'])
print("Final status:")
print(r.stats)
Providing custom behavior and inputs
------------------------------------
**TODO**
The helper methods are just one possible entrypoint, extending the classes used by these helper methods can allow a lot more custom behavior and functionality.
Show:
* How :class:`Runner Config <ansible_runner.runner_config.RunnerConfig>` is used and how overriding the methods and behavior can work
* Show how custom cancel and status callbacks can be supplied.

37
docs/remote_jobs.rst Normal file
View File

@ -0,0 +1,37 @@
.. _remote_jobs:
Remote job execution
====================
Ansible Runner supports the concept that a job run may be requested on one host but executed on another.
This capability is primarily intended to be used by `Receptor <http://www.github.com/project-receptor/receptor>`_.
Support for this in Runner involves a three phase process.
- **Transmit**: Convert the job to a binary format that can be sent to the worker node.
- **Worker**: Actually execute the job.
- **Process**: Receive job results and process them.
The following command illustrates how the three phases work together::
$ ansible-runner transmit ./demo -p test.yml | ansible-runner worker | ansible-runner process ./demo
In this example, the `ansible-runner transmit` command is given a private data directory of `./demo` and told to select
the `test.yml` playbook from it. Instead of executing the playbook as `ansible-runner run` would do, the data dir
and command line parameters are converted to a compressed binary stream that is emitted as stdout. The `transmit`
command generally takes the same command line parameters as the `run` command.
The `ansible-runner worker` command accepts this stream, runs the playbook, and generates a new compressed binary
stream of the resulting job events and artifacts. The `worker` command takes no parameters.
The `ansible-runner process` command accepts the result stream from the worker, and fires all the normal callbacks
and does job event processing. In the command above, this results in printing the playbook output and saving
artifacts to the data dir. The `process` command takes a data dir as a parameter, to know where to save artifacts.
Python API
----------
Python code importing Ansible Runner can make use of these facilities by setting the `streamer` parameter to
`ansible_runner.interface.run`. This parameter can be set to `transmit`, `worker` or `process` to invoke
each of the three stages. Other parameters are as normal in the CLI.

View File

@ -0,0 +1,30 @@
ansible\_runner.callbacks package
=================================
Submodules
----------
ansible\_runner.callbacks.awx\_display module
---------------------------------------------
.. automodule:: ansible_runner.callbacks.awx_display
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.callbacks.minimal module
----------------------------------------
.. automodule:: ansible_runner.callbacks.minimal
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: ansible_runner.callbacks
:members:
:undoc-members:
:show-inheritance:

View File

@ -0,0 +1,54 @@
ansible\_runner.display\_callback package
=========================================
Submodules
----------
ansible\_runner.display\_callback.cleanup module
------------------------------------------------
.. automodule:: ansible_runner.display_callback.cleanup
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.display\_callback.display module
------------------------------------------------
.. automodule:: ansible_runner.display_callback.display
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.display\_callback.events module
-----------------------------------------------
.. automodule:: ansible_runner.display_callback.events
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.display\_callback.minimal module
------------------------------------------------
.. automodule:: ansible_runner.display_callback.minimal
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.display\_callback.module module
-----------------------------------------------
.. automodule:: ansible_runner.display_callback.module
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: ansible_runner.display_callback
:members:
:undoc-members:
:show-inheritance:

View File

@ -0,0 +1,62 @@
ansible\_runner package
=======================
Subpackages
-----------
.. toctree::
ansible_runner.callbacks
ansible_runner.display_callback
Submodules
----------
ansible\_runner.exceptions module
---------------------------------
.. automodule:: ansible_runner.exceptions
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.interface module
--------------------------------
.. automodule:: ansible_runner.interface
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.runner module
-----------------------------
.. automodule:: ansible_runner.runner
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.runner\_config module
-------------------------------------
.. automodule:: ansible_runner.runner_config
:members:
:undoc-members:
:show-inheritance:
ansible\_runner.utils module
----------------------------
.. automodule:: ansible_runner.utils
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: ansible_runner
:members:
:undoc-members:
:show-inheritance:

7
docs/source/modules.rst Normal file
View File

@ -0,0 +1,7 @@
ansible_runner
==============
.. toctree::
:maxdepth: 4
ansible_runner

136
docs/standalone.rst Normal file
View File

@ -0,0 +1,136 @@
.. _standalone:
Using Runner as a standalone command line tool
==============================================
The **Ansible Runner** command line tool can be used as a standard command line interface to **Ansible** itself but is primarily intended
to fit into automation and pipeline workflows. Because of this, it has a bit of a different workflow than **Ansible** itself because you can select between a few different modes to launch the command.
While you can launch **Runner** and provide it all of the inputs as arguments to the command line (as you do with **Ansible** itself),
there is another interface where inputs are gathered into a single location referred to in the command line parameters as ``private_data_dir``.
(see :ref:`inputdir`)
To view the parameters accepted by ``ansible-runner``::
$ ansible-runner --help
An example invocation of the standalone ``ansible-runner`` utility::
$ ansible-runner -p playbook.yml run /tmp/private
Where playbook.yml is the playbook from the ``/tmp/private/projects`` directory, and ``run`` is the command mode you want to invoke **Runner** with
The different **commands** that runner accepts are:
* ``run`` starts ``ansible-runner`` in the foreground and waits until the underlying **Ansible** process completes before returning
* ``start`` starts ``ansible-runner`` as a background daemon process and generates a pid file
* ``stop`` terminates an ``ansible-runner`` process that was launched in the background with ``start``
* ``is-alive`` checks the status of an ``ansible-runner`` process that was started in the background with ``start``
* ``adhoc`` will run ad-hoc ``ansible`` commands inside a containerized Ansible Execution Environment
* ``playbook`` will run ``ansible-playbook`` commands inside a containerized Ansible Execution Environment
While **Runner** is running it creates an ``artifacts`` directory (see :ref:`artifactdir`) regardless of what mode it was started
in. The resulting output and status from **Ansible** will be located here. You can control the exact location underneath the ``artifacts`` directory
with the ``-i IDENT`` argument to ``ansible-runner``, otherwise a random UUID will be generated.
Executing **Runner** in the foreground
--------------------------------------
When launching **Runner** with the ``run`` command, as above, the program will stay in the foreground and you'll see output just as you expect from a normal
**Ansible** process. **Runner** will still populate the ``artifacts`` directory, as mentioned in the previous section, to preserve the output and allow processing
of the artifacts after exit.
Executing **Runner** in the background
--------------------------------------
When launching **Runner** with the ``start`` command, the program will generate a pid file and move to the background. You can check its status with the
``is-alive`` command, or terminate it with the ``stop`` command. You can find the stdout, status, and return code in the ``artifacts`` directory.
Running Playbooks
-----------------
An example invocation using ``demo`` as private directory::
$ ansible-runner --playbook test.yml run demo
Running Modules Directly
------------------------
An example invocating the ``debug`` module with ``demo`` as a private directory::
$ ansible-runner -m debug --hosts localhost -a msg=hello run demo
Running Roles Directly
----------------------
An example invocation using ``demo`` as private directory and ``localhost`` as target::
$ ansible-runner --role testrole --hosts localhost run demo
Ansible roles directory can be provided with ``--roles-path`` option. Role variables can be passed with ``--role-vars`` at runtime.
Running Ansible adhoc Commands with Execution Environments
----------------------------------------------------------
An example invocation using the ``ping`` module and ``localhost`` as target::
$ ansible-runner adhoc localhost -m ping
Something to note here is that implicit ``localhost`` in this context is a containerized instantiation of an Ansible Execution Environment and as such you will not get Ansible Facts about your system if using the ``setup`` module.
For more information, see :ref:`execution_environments`
Running Ansible ansible-playbook Commands with Execution Environments
---------------------------------------------------------------------
An example invocation using the ``demo.yml`` playbook and ``inventory.ini`` inventory file::
$ ansible-runner playbook demo.yml -i inventory.ini
Something to note here is that implicit ``localhost`` in this context is a containerized instantiation of an Ansible Execution Environment and as such you will not get Ansible Facts about your system if using ``gather_facts: true`` and targeting ``localhost`` in your playbook without explicit host definition in your inventory.
For more information, see :ref:`execution_environments`
.. _outputjson:
Running with Process Isolation
------------------------------
**Runner** supports process isolation. Process isolation creates a new mount namespace where the root is on a tmpfs that is invisible from the host
and is automatically cleaned up when the last process exits. You can enable process isolation by providing the ``--process-isolation`` argument on
the command line. **Runner** as of version 2.0 defaults to using ``podman`` as the process isolation executable, but supports
using any executable that is compatible with the ``bubblewrap`` CLI arguments by passing in the ``--process-isolation-executable`` argument::
$ ansible-runner --process-isolation ...
**Runner** supports various process isolation arguments that allow you to provide configuration details to the process isolation executable. To view the complete
list of arguments accepted by ``ansible-runner``::
$ ansible-runner --help
Running with Directory Isolation
--------------------------------
If you need to be able to execute multiple tasks in parallel that might conflict with each other or if you want to make sure a single invocation of
Ansible/Runner doesn't pollute or overwrite the playbook content you can give a base path::
$ ansible-runner --directory-isolation-base-path /tmp/runner
**Runner** will copy the project directory to a temporary directory created under that path, set it as the working directory, and execute from that location.
After running that temp directory will be cleaned up and removed.
Outputting json (raw event data) to the console instead of normal output
------------------------------------------------------------------------
**Runner** supports outputting json event data structure directly to the console (and stdout file) instead of the standard **Ansible** output, thus
mimicing the behavior of the ``json`` output plugin. This is in addition to the event data that's already present in the artifact directory. All that is needed
is to supply the ``-j`` argument on the command line::
$ ansible-runner ... -j ...
Cleaning up artifact directories
--------------------------------
Using the command line argument ``--rotate-artifacts`` allows you to control the number of artifact directories that are present. Given a number as the parameter
for this argument will cause **Runner** to clean up old artifact directories. The default value of ``0`` disables artifact directory cleanup.

View File

@ -0,0 +1,6 @@
ansible-runner (%VERSION%-%RELEASE%) unstable; urgency=low
* %VERSION% release
-- Ansible, Inc. <info@ansible.com> %DATE%

1
packaging/debian/compat Normal file
View File

@ -0,0 +1 @@
9

19
packaging/debian/control Normal file
View File

@ -0,0 +1,19 @@
Source: ansible-runner
Section: admin
Priority: optional
Maintainer: Ansible <info@ansible.com>
Build-Depends: debhelper (>= 9), dh-python, python-all, python-setuptools
Standards-Version: 4.1.3
Homepage: https://www.ansible.com
Vcs-Git: https://github.com/ansible/ansible-runner.git
Package: ansible-runner
Architecture: all
Depends: ${python:Depends}, ${misc:Depends}, python-pexpect (>= 4.5)
Recommends: ansible (>= 2.1)
Description: interfaces with Ansible from other systems (Python 2)
A tool and python library that helps when interfacing with Ansible
directly or as part of another system whether that be through a
container image interface, as a standalone tool, or as a Python
module that can be imported. The goal is to provide a stable and
consistent interface abstraction to Ansible.

172
packaging/debian/copyright Normal file
View File

@ -0,0 +1,172 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: ansible-runner
Source: https://github.com/ansible/ansible-runner
Files: *
Copyright: 2016 Ansible by Red Hat <info@ansible.com>
License: Apache
_Version 2.0, January 2004_
_&lt;<http://www.apache.org/licenses/>&gt;_
.
### Terms and Conditions for use, reproduction, and distribution
.
#### 1. Definitions
.
“License” shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
.
“Licensor” shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
.
“Legal Entity” shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, “control” means **(i)** the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
outstanding shares, or **(iii)** beneficial ownership of such entity.
.
“You” (or “Your”) shall mean an individual or Legal Entity exercising
permissions granted by this License.
.
“Source” form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
.
“Object” form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
.
“Work” shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
.
“Derivative Works” shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
.
“Contribution” shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
“submitted” means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as “Not a Contribution.”
.
“Contributor” shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
.
#### 2. Grant of Copyright License
.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
.
#### 3. Grant of Patent License
.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
.
#### 4. Redistribution
.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
.
* **(a)** You must give any other recipients of the Work or Derivative Works a copy of
this License; and
* **(b)** You must cause any modified files to carry prominent notices stating that You
changed the files; and
* **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
.
#### 5. Submission of Contributions
.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
.
#### 6. Trademarks
.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
.
#### 7. Disclaimer of Warranty
.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
.
#### 8. Limitation of Liability
.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
.
#### 9. Accepting Warranty or Additional Liability
.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.

View File

@ -0,0 +1,10 @@
FROM debian:buster
RUN apt-get update
RUN apt-get install -y \
make debhelper dh-python devscripts python-all python-setuptools python-pip \
python-backports.functools-lru-cache pinentry-tty
RUN update-alternatives --config pinentry
RUN pip install -IU pip setuptools
RUN pip install -IU poetry ansible

View File

@ -0,0 +1,14 @@
---
version: '3'
services:
deb-builder:
build: .
environment:
RELEASE:
OFFICIAL:
volumes:
- ../../../:/ansible-runner
- ${GPG_SIGNING_KEY}:/signing_key.asc
entrypoint: ["/bin/bash", "-c"]
working_dir: /ansible-runner
privileged: true

View File

@ -0,0 +1 @@
pexpect python-pexpect (>= 4.5)

10
packaging/debian/rules Executable file
View File

@ -0,0 +1,10 @@
#!/usr/bin/make -f
# See debhelper(7) (uncomment to enable)
# output every command that modifies files on the build system.
#export DH_VERBOSE = 1
export PYBUILD_NAME=ansible-runner
export DEB_BUILD_OPTIONS=nocheck
%:
dh $@ --with python2 --buildsystem=pybuild

View File

@ -0,0 +1 @@
3.0 (quilt)

View File

@ -0,0 +1,47 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2019-12-18
# @Filename: create_setup.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
# Original code here:
# https://github.com/sdss/flicamera/blob/master/create_setup.py
# This is a temporary solution for the fact that pip install . fails with
# poetry when there is no setup.py and an extension needs to be compiled.
# See https://github.com/python-poetry/poetry/issues/1516. Running this
# script creates a setup.py filled out with information generated by
# poetry when parsing the pyproject.toml.
import os
import sys
import q
# If there is a global installation of poetry, prefer that.
poetry_python_lib = os.path.expanduser('~/.poetry/lib')
sys.path.append(os.path.realpath(poetry_python_lib))
try:
from poetry.core.masonry.builders.sdist import SdistBuilder
from poetry.factory import Factory
except (ImportError, ModuleNotFoundError) as ee:
raise ImportError('install poetry by doing pip install poetry to use '
f'this script: {ee}')
# Generate a Poetry object that knows about the metadata in pyproject.toml
factory = Factory()
poetry = factory.create_poetry(os.path.dirname(__file__))
# Use the SdistBuilder to genrate a blob for setup.py
sdist_builder = SdistBuilder(poetry, None, None)
setuppy_blob = sdist_builder.build_setup()
q.q(setuppy_blob)
with open('setup.py', 'wb') as unit:
unit.write(setuppy_blob)
unit.write(b'\n# This setup.py was autogenerated using poetry.\n')

View File

@ -0,0 +1,9 @@
FROM centos:7
RUN yum install -y epel-release
RUN yum install -y make mock python3 which git gcc python3-devel
# Fix output of rpm --eval '%{?dist}'
RUN sed -i "s/.el7.centos/.el7/g" /etc/rpm/macros.dist
RUN pip3 install -IU poetry ansible

View File

@ -0,0 +1,6 @@
FROM centos:8
RUN dnf install -y epel-release
RUN yum install -y make mock python3-pip which git gcc python3-devel
RUN pip3 install -IU poetry ansible

View File

@ -0,0 +1,155 @@
%global pypi_name ansible-runner
%global python3_sitelib /usr/lib/python3.6/site-packages/
%define _python_bytecompile_errors_terminate_build 0
%if 0%{?fedora} || 0%{?rhel} > 7
%bcond_with python2
%bcond_without python3
%else
%bcond_without python2
%bcond_with python3
%endif
Name: %{pypi_name}
Version: {{ version }}
Release: {{ release }}%{?dist}
Summary: A tool and python library to interface with Ansible
License: ASL 2.0
URL: https://github.com/ansible/ansible-runner
Source0: https://github.com/ansible/%{name}/archive/%{version}.tar.gz?/%{name}-%{version}-{{ release }}.tar.gz
BuildArch: noarch
%if %{with python2}
BuildRequires: python-rpm-macros
BuildRequires: python2-setuptools
Requires: python2-%{pypi_name} = %{version}-%{release}
%endif
%if %{with python3}
BuildRequires: python3-devel
BuildRequires: python3-setuptools
Requires: python3-%{pypi_name} = %{version}-%{release}
%endif
%description
Ansible Runner is a tool and python library that helps when interfacing with
Ansible from other systems whether through a container image interface, as a
standalone tool, or imported into a python project.
%if %{with python2}
%package -n python2-%{pypi_name}
Summary: %{summary}
%{?python_provide:%python_provide python2-%{pypi_name}}
Requires: python-setuptools
Requires: python-daemon
Requires: pexpect >= 4.6
Requires: PyYAML
Requires: python-six
Requires: python-lockfile
%description -n python2-%{pypi_name}
Ansible Runner is a tool and python library that helps when interfacing with
Ansible from other systems whether through a container image interface, as a
standalone tool, or imported into a python project.
%endif
%if %{with python3}
%package -n python3-%{pypi_name}
Summary: %{summary}
%{?python_provide:%python_provide python3-%{pypi_name}}
Requires: python3-pyyaml
Requires: python3-setuptools
Requires: python3-daemon
Requires: python3-six
Requires: python3dist(pexpect) >= 4.6
Requires: python3dist(lockfile)
%description -n python3-%{pypi_name}
Ansible Runner is a tool and python library that helps when interfacing with
Ansible from other systems whether through a container image interface, as a
standalone tool, or imported into a python project.
%endif
%prep
%autosetup -n %{pypi_name}-%{version}
# Remove bundled egg-info
rm -rf %{pypi_name}.egg-info
%global py_setup setup.py
%build
%if %{with python2}
export RHEL_ALLOW_PYTHON2_FOR_BUILD=1
python2 setup.py build
%endif
%if %{with python3}
python3 setup.py build
%endif
%install
# Must do the subpackages' install first because the scripts in /usr/bin are
# overwritten with every setup.py install.
%if %{with python3}
python3 setup.py install -O1 --skip-build --root %{buildroot}
cp %{buildroot}/%{_bindir}/ansible-runner %{buildroot}/%{_bindir}/ansible-runner-%{python3_version}
ln -s %{_bindir}/ansible-runner-%{python3_version} %{buildroot}/%{_bindir}/ansible-runner-3
%endif
%if %{with python2}
export RHEL_ALLOW_PYTHON2_FOR_BUILD=1
python2 setup.py install -O1 --skip-build --root %{buildroot}
cp %{buildroot}/%{_bindir}/ansible-runner %{buildroot}/%{_bindir}/ansible-runner-%{python2_version}
ln -s %{_bindir}/ansible-runner-%{python2_version} %{buildroot}/%{_bindir}/ansible-runner-2
%endif
%files
%defattr(-,root,root)
%if %{with python2}
%files -n python2-%{pypi_name}
%{_bindir}/ansible-runner
%{_bindir}/ansible-runner-2
%{_bindir}/ansible-runner-%{python2_version}
%{python_sitelib}/*
%endif
%if %{with python3}
%files -n python3-%{pypi_name}
%{python3_sitelib}/*
%{_bindir}/ansible-runner
%{_bindir}/ansible-runner-3
%{_bindir}/ansible-runner-%{python3_version}
%endif
%changelog
* Thu Mar 19 2020 Ryan Petrello <rpetrell@redhat.com> - 1.4.6-1
- Ansible Runner 1.4.6-1
* Thu Mar 19 2020 Matthew Jones <matburt@redhat.com> - 1.4.5-1
- Ansible Runner 1.4.5-1
* Tue Feb 25 2020 Yanis Guenane <yguenane@redhat.com> - 1.4.4-3
- Ansible Runner 1.4.4-3
* Fri Oct 25 2019 Matthew Jones <matburt@redhat.com> - 1.4.4-1
- Ansible Runner 1.4.4-1
* Thu Oct 17 2019 Matthew Jones <matburt@redhat.com> - 1.4.2-1
- Ansible Runner 1.4.2-1
* Thu Oct 03 2019 Matthew Jones <matburt@redhat.com> - 1.4.1-1
- Ansible Runner 1.4.1-1
* Mon Sep 23 2019 Shane McDonald <shanemcd@redhat.com> - 1.4.0-1
- Ansible Runner 1.4.0-1
- Support for EL 7.7 (defaults to python2)
* Wed Apr 24 2019 Shane McDonald <shanemcd@redhat.com> - 1.3.4-1
- Ansible Runner 1.3.4-1
- Adopted modified upstream spec file for python3 support

View File

@ -0,0 +1,21 @@
---
version: '3'
services:
rpm-builder:
build:
dockerfile: Dockerfile.${MOCK_CONFIG}
context: .
image: runner-rpm-builder:${MOCK_CONFIG}
environment:
MOCK_BIN: "mock --old-chroot"
MOCK_CONFIG:
RELEASE:
OFFICIAL:
volumes:
- ../../:/ansible-runner
- mock-cache:/var/cache/mock
entrypoint: ["/bin/bash", "-c"]
working_dir: /ansible-runner
privileged: true
volumes:
mock-cache:

638
poetry.lock generated Normal file
View File

@ -0,0 +1,638 @@
[[package]]
category = "dev"
description = "apipkg: namespace control and lazy-import mechanism"
name = "apipkg"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "1.5"
[[package]]
category = "dev"
description = "Atomic file writes."
name = "atomicwrites"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "1.4.0"
[[package]]
category = "dev"
description = "Classes Without Boilerplate"
name = "attrs"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "19.3.0"
[package.extras]
azure-pipelines = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "pytest-azurepipelines"]
dev = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "sphinx", "pre-commit"]
docs = ["sphinx", "zope.interface"]
tests = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface"]
[[package]]
category = "dev"
description = "Cross-platform colored terminal text."
marker = "sys_platform == \"win32\""
name = "colorama"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
version = "0.4.3"
[[package]]
category = "dev"
description = "Updated configparser from Python 3.7 for Python 2.6+."
marker = "python_version < \"3.2\""
name = "configparser"
optional = false
python-versions = ">=2.6"
version = "4.0.2"
[package.extras]
docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"]
testing = ["pytest (>=3.5,<3.7.3 || >3.7.3)", "pytest-checkdocs (>=1.2)", "pytest-flake8", "pytest-black-multipy"]
[[package]]
category = "dev"
description = "Backports and enhancements for the contextlib module"
marker = "python_version < \"3.4\""
name = "contextlib2"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "0.6.0.post1"
[[package]]
category = "main"
description = "Docutils -- Python Documentation Utilities"
name = "docutils"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
version = "0.16"
[[package]]
category = "dev"
description = "Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4"
marker = "python_version < \"3.4\""
name = "enum34"
optional = false
python-versions = "*"
version = "1.1.10"
[[package]]
category = "dev"
description = "execnet: rapid multi-Python deployment"
name = "execnet"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "1.7.1"
[package.dependencies]
apipkg = ">=1.4"
[package.extras]
testing = ["pre-commit"]
[[package]]
category = "dev"
description = "the modular source code checker: pep8 pyflakes and co"
name = "flake8"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
version = "3.8.3"
[package.dependencies]
mccabe = ">=0.6.0,<0.7.0"
pycodestyle = ">=2.6.0a1,<2.7.0"
pyflakes = ">=2.2.0,<2.3.0"
[package.dependencies.configparser]
python = "<3.2"
version = "*"
[package.dependencies.enum34]
python = "<3.4"
version = "*"
[package.dependencies.functools32]
python = "<3.2"
version = "*"
[package.dependencies.importlib-metadata]
python = "<3.8"
version = "*"
[package.dependencies.typing]
python = "<3.5"
version = "*"
[[package]]
category = "dev"
description = "Python function signatures from PEP362 for Python 2.6, 2.7 and 3.2+"
name = "funcsigs"
optional = false
python-versions = "*"
version = "1.0.2"
[[package]]
category = "dev"
description = "Backport of the functools module from Python 3.2.3 for use on 2.7 and PyPy."
marker = "python_version < \"3.2\""
name = "functools32"
optional = false
python-versions = "*"
version = "3.2.3-2"
[[package]]
category = "dev"
description = "Read metadata from Python packages"
marker = "python_version < \"3.8\""
name = "importlib-metadata"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
version = "1.7.0"
[package.dependencies]
zipp = ">=0.5"
[package.dependencies.configparser]
python = "<3"
version = ">=3.5"
[package.dependencies.contextlib2]
python = "<3"
version = "*"
[package.dependencies.pathlib2]
python = "<3"
version = "*"
[package.extras]
docs = ["sphinx", "rst.linker"]
testing = ["packaging", "pep517", "importlib-resources (>=1.3)"]
[[package]]
category = "main"
description = "Platform-independent file locking module"
name = "lockfile"
optional = false
python-versions = "*"
version = "0.12.2"
[[package]]
category = "dev"
description = "McCabe checker, plugin for flake8"
name = "mccabe"
optional = false
python-versions = "*"
version = "0.6.1"
[[package]]
category = "dev"
description = "Rolling backport of unittest.mock for all Pythons"
name = "mock"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "3.0.5"
[package.dependencies]
six = "*"
[package.dependencies.funcsigs]
python = "<3.3"
version = ">=1"
[package.extras]
build = ["twine", "wheel", "blurb"]
docs = ["sphinx"]
test = ["pytest", "pytest-cov"]
[[package]]
category = "dev"
description = "Rolling backport of unittest.mock for all Pythons"
name = "mock"
optional = false
python-versions = ">=3.6"
version = "4.0.2"
[package.extras]
build = ["twine", "wheel", "blurb"]
docs = ["sphinx"]
test = ["pytest", "pytest-cov"]
[[package]]
category = "dev"
description = "More routines for operating on iterables, beyond itertools"
name = "more-itertools"
optional = false
python-versions = "*"
version = "5.0.0"
[package.dependencies]
six = ">=1.0.0,<2.0.0"
[[package]]
category = "dev"
description = "Object-oriented filesystem paths"
name = "pathlib2"
optional = false
python-versions = "*"
version = "2.3.5"
[package.dependencies]
six = "*"
[package.dependencies.scandir]
python = "<3.5"
version = "*"
[[package]]
category = "dev"
description = "Utility library for gitignore style pattern matching of file paths."
name = "pathspec"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
version = "0.8.0"
[[package]]
category = "main"
description = "Pexpect allows easy control of interactive console applications."
name = "pexpect"
optional = false
python-versions = "*"
version = "4.8.0"
[package.dependencies]
ptyprocess = ">=0.5"
[[package]]
category = "dev"
description = "plugin and hook calling mechanisms for python"
name = "pluggy"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "0.13.1"
[package.dependencies]
[package.dependencies.importlib-metadata]
python = "<3.8"
version = ">=0.12"
[package.extras]
dev = ["pre-commit", "tox"]
[[package]]
category = "main"
description = "Run a subprocess in a pseudo terminal"
name = "ptyprocess"
optional = false
python-versions = "*"
version = "0.6.0"
[[package]]
category = "dev"
description = "library with cross-python path, ini-parsing, io, code, log facilities"
name = "py"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "1.9.0"
[[package]]
category = "dev"
description = "Python style guide checker"
name = "pycodestyle"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "2.6.0"
[[package]]
category = "dev"
description = "passive checker of Python programs"
name = "pyflakes"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "2.2.0"
[[package]]
category = "dev"
description = "pytest: simple powerful testing with Python"
name = "pytest"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "4.4.0"
[package.dependencies]
atomicwrites = ">=1.0"
attrs = ">=17.4.0"
colorama = "*"
pluggy = ">=0.9"
py = ">=1.5.0"
setuptools = "*"
six = ">=1.10.0"
[[package.dependencies.more-itertools]]
python = "<2.8"
version = ">=4.0.0,<6.0.0"
[[package.dependencies.more-itertools]]
python = ">=2.8"
version = ">=4.0.0"
[package.dependencies.funcsigs]
python = "<3.0"
version = ">=1.0"
[package.dependencies.pathlib2]
python = "<3.6"
version = ">=2.2.0"
[package.extras]
testing = ["argcomplete", "hypothesis (>=3.56)", "nose", "requests", "mock"]
[[package]]
category = "dev"
description = "run tests in isolated forked subprocesses"
name = "pytest-forked"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
version = "1.3.0"
[package.dependencies]
py = "*"
pytest = ">=3.10"
[[package]]
category = "dev"
description = "pytest xdist plugin for distributed testing and loop-on-failing modes"
name = "pytest-xdist"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
version = "1.32.0"
[package.dependencies]
execnet = ">=1.1"
pytest = ">=4.4.0"
pytest-forked = "*"
six = "*"
[package.extras]
testing = ["filelock"]
[[package]]
category = "main"
description = "Library to implement a well-behaved Unix daemon process."
name = "python-daemon"
optional = false
python-versions = "*"
version = "2.2.4"
[package.dependencies]
docutils = "*"
lockfile = ">=0.10"
setuptools = "*"
[package.extras]
test = ["coverage", "docutils", "mock (>=1.3)", "testscenarios (>=0.4)", "testtools", "unittest2 (>=0.5.1)"]
[[package]]
category = "main"
description = "YAML parser and emitter for Python"
name = "pyyaml"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
version = "5.3.1"
[[package]]
category = "dev"
description = "scandir, a better directory iterator and faster os.walk()"
name = "scandir"
optional = false
python-versions = "*"
version = "1.10.0"
[[package]]
category = "main"
description = "Python 2 and 3 compatibility utilities"
name = "six"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
version = "1.15.0"
[[package]]
category = "dev"
description = "Type Hints for Python"
marker = "python_version < \"3.5\""
name = "typing"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "3.7.4.3"
[[package]]
category = "dev"
description = "A linter for YAML files."
name = "yamllint"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "1.24.2"
[package.dependencies]
pathspec = ">=0.5.3"
pyyaml = "*"
[[package]]
category = "dev"
description = "Backport of pathlib-compatible object wrapper for zip files"
marker = "python_version < \"3.8\""
name = "zipp"
optional = false
python-versions = ">=2.7"
version = "1.2.0"
[package.dependencies]
[package.dependencies.contextlib2]
python = "<3.4"
version = "*"
[package.extras]
docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"]
testing = ["pathlib2", "unittest2", "jaraco.itertools", "func-timeout"]
[[package]]
category = "dev"
description = "Backport of pathlib-compatible object wrapper for zip files"
marker = "python_version < \"3.8\""
name = "zipp"
optional = false
python-versions = ">=3.6"
version = "3.1.0"
[package.extras]
docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"]
testing = ["jaraco.itertools", "func-timeout"]
[metadata]
content-hash = "7fb57c4727acfa64f1966e43845e0cbeee01c1e39def2e1b9f49ac3c413114c1"
python-versions = "~2.7 || >=3.6"
[metadata.files]
apipkg = [
{file = "apipkg-1.5-py2.py3-none-any.whl", hash = "sha256:58587dd4dc3daefad0487f6d9ae32b4542b185e1c36db6993290e7c41ca2b47c"},
{file = "apipkg-1.5.tar.gz", hash = "sha256:37228cda29411948b422fae072f57e31d3396d2ee1c9783775980ee9c9990af6"},
]
atomicwrites = [
{file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"},
{file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"},
]
attrs = [
{file = "attrs-19.3.0-py2.py3-none-any.whl", hash = "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c"},
{file = "attrs-19.3.0.tar.gz", hash = "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"},
]
colorama = [
{file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"},
{file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"},
]
configparser = [
{file = "configparser-4.0.2-py2.py3-none-any.whl", hash = "sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c"},
{file = "configparser-4.0.2.tar.gz", hash = "sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df"},
]
contextlib2 = [
{file = "contextlib2-0.6.0.post1-py2.py3-none-any.whl", hash = "sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b"},
{file = "contextlib2-0.6.0.post1.tar.gz", hash = "sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e"},
]
docutils = [
{file = "docutils-0.16-py2.py3-none-any.whl", hash = "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af"},
{file = "docutils-0.16.tar.gz", hash = "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"},
]
enum34 = [
{file = "enum34-1.1.10-py2-none-any.whl", hash = "sha256:a98a201d6de3f2ab3db284e70a33b0f896fbf35f8086594e8c9e74b909058d53"},
{file = "enum34-1.1.10-py3-none-any.whl", hash = "sha256:c3858660960c984d6ab0ebad691265180da2b43f07e061c0f8dca9ef3cffd328"},
{file = "enum34-1.1.10.tar.gz", hash = "sha256:cce6a7477ed816bd2542d03d53db9f0db935dd013b70f336a95c73979289f248"},
]
execnet = [
{file = "execnet-1.7.1-py2.py3-none-any.whl", hash = "sha256:d4efd397930c46415f62f8a31388d6be4f27a91d7550eb79bc64a756e0056547"},
{file = "execnet-1.7.1.tar.gz", hash = "sha256:cacb9df31c9680ec5f95553976c4da484d407e85e41c83cb812aa014f0eddc50"},
]
flake8 = [
{file = "flake8-3.8.3-py2.py3-none-any.whl", hash = "sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c"},
{file = "flake8-3.8.3.tar.gz", hash = "sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208"},
]
funcsigs = [
{file = "funcsigs-1.0.2-py2.py3-none-any.whl", hash = "sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca"},
{file = "funcsigs-1.0.2.tar.gz", hash = "sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50"},
]
functools32 = [
{file = "functools32-3.2.3-2.tar.gz", hash = "sha256:f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d"},
{file = "functools32-3.2.3-2.zip", hash = "sha256:89d824aa6c358c421a234d7f9ee0bd75933a67c29588ce50aaa3acdf4d403fa0"},
]
importlib-metadata = [
{file = "importlib_metadata-1.7.0-py2.py3-none-any.whl", hash = "sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070"},
{file = "importlib_metadata-1.7.0.tar.gz", hash = "sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83"},
]
lockfile = [
{file = "lockfile-0.12.2-py2.py3-none-any.whl", hash = "sha256:6c3cb24f344923d30b2785d5ad75182c8ea7ac1b6171b08657258ec7429d50fa"},
{file = "lockfile-0.12.2.tar.gz", hash = "sha256:6aed02de03cba24efabcd600b30540140634fc06cfa603822d508d5361e9f799"},
]
mccabe = [
{file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
{file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
]
mock = [
{file = "mock-3.0.5-py2.py3-none-any.whl", hash = "sha256:d157e52d4e5b938c550f39eb2fd15610db062441a9c2747d3dbfa9298211d0f8"},
{file = "mock-3.0.5.tar.gz", hash = "sha256:83657d894c90d5681d62155c82bda9c1187827525880eda8ff5df4ec813437c3"},
{file = "mock-4.0.2-py3-none-any.whl", hash = "sha256:3f9b2c0196c60d21838f307f5825a7b86b678cedc58ab9e50a8988187b4d81e0"},
{file = "mock-4.0.2.tar.gz", hash = "sha256:dd33eb70232b6118298d516bbcecd26704689c386594f0f3c4f13867b2c56f72"},
]
more-itertools = [
{file = "more-itertools-5.0.0.tar.gz", hash = "sha256:38a936c0a6d98a38bcc2d03fdaaedaba9f412879461dd2ceff8d37564d6522e4"},
{file = "more_itertools-5.0.0-py2-none-any.whl", hash = "sha256:c0a5785b1109a6bd7fac76d6837fd1feca158e54e521ccd2ae8bfe393cc9d4fc"},
{file = "more_itertools-5.0.0-py3-none-any.whl", hash = "sha256:fe7a7cae1ccb57d33952113ff4fa1bc5f879963600ed74918f1236e212ee50b9"},
]
pathlib2 = [
{file = "pathlib2-2.3.5-py2.py3-none-any.whl", hash = "sha256:0ec8205a157c80d7acc301c0b18fbd5d44fe655968f5d947b6ecef5290fc35db"},
{file = "pathlib2-2.3.5.tar.gz", hash = "sha256:6cd9a47b597b37cc57de1c05e56fb1a1c9cc9fab04fe78c29acd090418529868"},
]
pathspec = [
{file = "pathspec-0.8.0-py2.py3-none-any.whl", hash = "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0"},
{file = "pathspec-0.8.0.tar.gz", hash = "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061"},
]
pexpect = [
{file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"},
{file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"},
]
pluggy = [
{file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"},
{file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"},
]
ptyprocess = [
{file = "ptyprocess-0.6.0-py2.py3-none-any.whl", hash = "sha256:d7cc528d76e76342423ca640335bd3633420dc1366f258cb31d05e865ef5ca1f"},
{file = "ptyprocess-0.6.0.tar.gz", hash = "sha256:923f299cc5ad920c68f2bc0bc98b75b9f838b93b599941a6b63ddbc2476394c0"},
]
py = [
{file = "py-1.9.0-py2.py3-none-any.whl", hash = "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2"},
{file = "py-1.9.0.tar.gz", hash = "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342"},
]
pycodestyle = [
{file = "pycodestyle-2.6.0-py2.py3-none-any.whl", hash = "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367"},
{file = "pycodestyle-2.6.0.tar.gz", hash = "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"},
]
pyflakes = [
{file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"},
{file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"},
]
pytest = [
{file = "pytest-4.4.0-py2.py3-none-any.whl", hash = "sha256:13c5e9fb5ec5179995e9357111ab089af350d788cbc944c628f3cde72285809b"},
{file = "pytest-4.4.0.tar.gz", hash = "sha256:f21d2f1fb8200830dcbb5d8ec466a9c9120e20d8b53c7585d180125cce1d297a"},
]
pytest-forked = [
{file = "pytest-forked-1.3.0.tar.gz", hash = "sha256:6aa9ac7e00ad1a539c41bec6d21011332de671e938c7637378ec9710204e37ca"},
{file = "pytest_forked-1.3.0-py2.py3-none-any.whl", hash = "sha256:dc4147784048e70ef5d437951728825a131b81714b398d5d52f17c7c144d8815"},
]
pytest-xdist = [
{file = "pytest-xdist-1.32.0.tar.gz", hash = "sha256:1d4166dcac69adb38eeaedb88c8fada8588348258a3492ab49ba9161f2971129"},
{file = "pytest_xdist-1.32.0-py2.py3-none-any.whl", hash = "sha256:ba5ec9fde3410bd9a116ff7e4f26c92e02fa3d27975ef3ad03f330b3d4b54e91"},
]
python-daemon = [
{file = "python-daemon-2.2.4.tar.gz", hash = "sha256:57c84f50a04d7825515e4dbf3a31c70cc44414394a71608dee6cfde469e81766"},
{file = "python_daemon-2.2.4-py2.py3-none-any.whl", hash = "sha256:a0d5dc0b435a02c7e0b401e177a7c17c3f4c7b4e22e2d06271122c8fec5f8946"},
]
pyyaml = [
{file = "PyYAML-5.3.1-cp27-cp27m-win32.whl", hash = "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f"},
{file = "PyYAML-5.3.1-cp27-cp27m-win_amd64.whl", hash = "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76"},
{file = "PyYAML-5.3.1-cp35-cp35m-win32.whl", hash = "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2"},
{file = "PyYAML-5.3.1-cp35-cp35m-win_amd64.whl", hash = "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c"},
{file = "PyYAML-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2"},
{file = "PyYAML-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648"},
{file = "PyYAML-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"},
{file = "PyYAML-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf"},
{file = "PyYAML-5.3.1-cp38-cp38-win32.whl", hash = "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97"},
{file = "PyYAML-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee"},
{file = "PyYAML-5.3.1.tar.gz", hash = "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d"},
]
scandir = [
{file = "scandir-1.10.0-cp27-cp27m-win32.whl", hash = "sha256:92c85ac42f41ffdc35b6da57ed991575bdbe69db895507af88b9f499b701c188"},
{file = "scandir-1.10.0-cp27-cp27m-win_amd64.whl", hash = "sha256:cb925555f43060a1745d0a321cca94bcea927c50114b623d73179189a4e100ac"},
{file = "scandir-1.10.0-cp34-cp34m-win32.whl", hash = "sha256:2c712840c2e2ee8dfaf36034080108d30060d759c7b73a01a52251cc8989f11f"},
{file = "scandir-1.10.0-cp34-cp34m-win_amd64.whl", hash = "sha256:2586c94e907d99617887daed6c1d102b5ca28f1085f90446554abf1faf73123e"},
{file = "scandir-1.10.0-cp35-cp35m-win32.whl", hash = "sha256:2b8e3888b11abb2217a32af0766bc06b65cc4a928d8727828ee68af5a967fa6f"},
{file = "scandir-1.10.0-cp35-cp35m-win_amd64.whl", hash = "sha256:8c5922863e44ffc00c5c693190648daa6d15e7c1207ed02d6f46a8dcc2869d32"},
{file = "scandir-1.10.0-cp36-cp36m-win32.whl", hash = "sha256:2ae41f43797ca0c11591c0c35f2f5875fa99f8797cb1a1fd440497ec0ae4b022"},
{file = "scandir-1.10.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7d2d7a06a252764061a020407b997dd036f7bd6a175a5ba2b345f0a357f0b3f4"},
{file = "scandir-1.10.0-cp37-cp37m-win32.whl", hash = "sha256:67f15b6f83e6507fdc6fca22fedf6ef8b334b399ca27c6b568cbfaa82a364173"},
{file = "scandir-1.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b24086f2375c4a094a6b51e78b4cf7ca16c721dcee2eddd7aa6494b42d6d519d"},
{file = "scandir-1.10.0.tar.gz", hash = "sha256:4d4631f6062e658e9007ab3149a9b914f3548cb38bfb021c64f39a025ce578ae"},
]
six = [
{file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"},
{file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"},
]
typing = [
{file = "typing-3.7.4.3-py2-none-any.whl", hash = "sha256:283d868f5071ab9ad873e5e52268d611e851c870a2ba354193026f2dfb29d8b5"},
{file = "typing-3.7.4.3.tar.gz", hash = "sha256:1187fb9c82fd670d10aa07bbb6cfcfe4bdda42d6fab8d5134f04e8c4d0b71cc9"},
]
yamllint = [
{file = "yamllint-1.24.2-py2.py3-none-any.whl", hash = "sha256:ad3b0d30317dca005d7af99ff27248d459cae2d931a2ff06a134b67bcd405b30"},
{file = "yamllint-1.24.2.tar.gz", hash = "sha256:40b68de6bacdccec1585dbd54072731b10da7fc2f9cfd96517a71f066208b61f"},
]
zipp = [
{file = "zipp-1.2.0-py2.py3-none-any.whl", hash = "sha256:e0d9e63797e483a30d27e09fffd308c59a700d365ec34e93cc100844168bf921"},
{file = "zipp-1.2.0.tar.gz", hash = "sha256:c70410551488251b0fee67b460fb9a536af8d6f9f008ad10ac51f615b6a521b1"},
{file = "zipp-3.1.0-py3-none-any.whl", hash = "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b"},
{file = "zipp-3.1.0.tar.gz", hash = "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"},
]

60
pyproject.toml Normal file
View File

@ -0,0 +1,60 @@
[tool.poetry]
name = "ansible-runner"
version = "2.0.0"
description = "Consistent Ansible Pythin API and CLI with container and process isoluation runtime capabilities"
homepage = ""
repository = "https://github.com/ansible/ansible-runner"
documentation = "https://ansible-runner.readthedocs.io/en/latest/"
license = "Apache-2.0"
authors = ["Red Hat Ansible"]
keywords = ["ansible", "runner"]
readme = "README.md"
[tool.black]
line-length = 100
target-version = ['py36', 'py37', 'py38']
exclude = '''
/(
\.eggs
| \.git
| \.hg
| \.mypy_cache
| \.tox
| \.venv
| _build
| buck-out
| build
| dist
| docs
| installer
| packaging
)/
'''
[tool.poetry.dependencies]
python = "~2.7 || >=3.6"
pexpect = ">=4.5"
python-daemon = "*"
pyyaml = "*"
six = "*"
[tool.poetry.scripts]
ansible-runner = 'ansible_runner.__main__:main'
[tool.poetry.plugins."receptor.worker"]
ansible_runner = 'ansible_runner.receptor_plugin'
[tool.poetry.dev-dependencies]
more-itertools = "==5.0.0"
pytest = "==4.4.0"
pytest-xdist = "==1.32.0"
"flake8" = "*"
yamllint = "*"
funcsigs = "*"
mock = "*"
pathlib2 = "*"
scandir = "*"
[build-system]
requires = ["setuptools", "poetry>=1.0.5", "ansible-base"]
build-backend = "poetry.masonry.api"

4
pytest.ini Normal file
View File

@ -0,0 +1,4 @@
[pytest]
markers =
serial: tests that cannot be reliably ran with pytest multiprocessing
timeout: used with pytest-timeout

0
requirements.txt Normal file
View File

25
setup.cfg Executable file
View File

@ -0,0 +1,25 @@
[pep8]
# E201 - Whitespace after '('
# E203 - Whitespace before ":"
# E221 - Multiple spaces after operator
# E225 - Missing whitespace around operator
# E231 - Missing whitespace after ','
# E241 - Multiple spaces after ','
# E251 - Unexpected spaces around keyword / parameter equals
# E261 - At least two spaces before inline comment
# E302 - Expected 2 blank lines found 0
# E303 - Too many blank lines
# W291 - Trailing whitespace
# W391 - Blank line at end of file
# W293 - Blank line contains whitespace
ignore=E201,E203,E221,E225,E231,E241,E251,E261,E265,E303,W291,W391,W293
exclude=.tox,venv
[flake8]
max-line-length=160
ignore=E201,E203,E221,E225,E231,E241,E251,E261,E265,E303,W291,W391,W293,E731,F405
exclude=.tox,venv
[metadata]
license_file=LICENSE.md
description-file = README.md

32
setup.py Normal file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env python
# Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
from setuptools import setup, find_packages
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name="ansible-runner",
version="2.0.0",
author='Red Hat Ansible',
url="https://github.com/ansible/ansible-runner",
license='Apache',
packages=find_packages(),
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=[
'pexpect>=4.5',
'python-daemon',
'PyYAML',
'six',
],
zip_safe=False,
entry_points={
'console_scripts': [
'ansible-runner = ansible_runner.__main__:main'
]
}
)

0
test/__init__.py Normal file
View File

30
test/conftest.py Normal file
View File

@ -0,0 +1,30 @@
import pytest
from distutils.version import LooseVersion
import pkg_resources
import os
@pytest.fixture(autouse=True)
def mock_env_user(monkeypatch):
monkeypatch.setenv("ANSIBLE_DEVEL_WARNING", "False")
@pytest.fixture(scope='session')
def is_pre_ansible28():
try:
if LooseVersion(pkg_resources.get_distribution('ansible').version) < LooseVersion('2.8'):
return True
except pkg_resources.DistributionNotFound:
# ansible-base (e.g. ansible 2.10 and beyond) is not accessible in this way
pass
@pytest.fixture(scope='session')
def skipif_pre_ansible28(is_pre_ansible28):
if is_pre_ansible28:
pytest.skip("Valid only on Ansible 2.8+")
@pytest.fixture
def test_data_dir():
return os.path.join(os.path.dirname(__file__), 'data')

View File

@ -0,0 +1 @@
host_1

View File

@ -0,0 +1 @@
host_2

View File

@ -0,0 +1,6 @@
---
- hosts: all
gather_facts: false
connection: local
tasks:
- debug: msg='Hello world!'

View File

@ -0,0 +1,7 @@
1_ok
2_skipped
3_changed
4_failed
5_ignored
6_rescued
7_unreachable

View File

@ -0,0 +1,5 @@
---
- hosts: all
gather_facts: false
tasks:
- include: gen_host_status_base.yml

View File

@ -0,0 +1,41 @@
---
- name: A debug msg all hosts will show except for skipped ones
debug:
msg: "Playing {{ ansible_host }}"
when: "'_skipped' not in ansible_host"
- name: Hosts haven't really changed, but we will say they have
debug:
msg: "I am a changed host."
changed_when: true
when: "'_changed' in ansible_host"
- name: All failhosts aboard the failboat
fail:
msg: "I did nothing to deserve this."
when: "'_failed' in ansible_host"
- name: Ignore this failure for some hosts
fail:
msg: "<insert inspirational quote about failure>"
ignore_errors: true
when: "'_ignored' in ansible_host"
- name: Fail and rescue - collection of tasks
block:
- fail:
msg: "HALP!!!"
when: "'_rescued' in ansible_host"
rescue:
- debug: msg="ε-(´・`) フ"
- name: Set unreachable fact
set_fact:
unreachable: true
when: "'_unreachable' in ansible_host"
- name: Reach out to the unreachable hosts
ping:
vars:
ansible_host: 'invalid.invalid'
when: unreachable is defined and unreachable

View File

@ -0,0 +1,8 @@
---
- hosts: all
gather_facts: false
tasks:
- include: gen_host_status_base.yml
loop:
- 1
- 2

View File

@ -0,0 +1,6 @@
---
- hosts: localhost
connection: local
gather_facts: false
tasks:
- debug: msg="{{ lookup('env', 'FOO') }}"

View File

@ -0,0 +1,21 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
import os
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
result['changed'] = result['failed'] = False
result['msg'] = ''
env_dict = dict(os.environ)
result['printenv'] = '\n'.join(
'{0}={1}'.format(k, v) for k, v in env_dict.items()
)
result['environment'] = env_dict
result['cwd'] = os.getcwd()
return result

View File

@ -0,0 +1,6 @@
---
- hosts: localhost
gather_facts: false
connection: local
tasks:
- look_at_environment:

View File

@ -0,0 +1,11 @@
---
- name: Sleep playbook for testing things while process is running
hosts: localhost
gather_facts: false
connection: local
vars:
sleep_interval: 30
tasks:
- name: sleep for a specified interval
command: 'sleep {{ sleep_interval }}'

View File

@ -0,0 +1,14 @@
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'other_callback'
def v2_playbook_on_play_start(self, play):
pass
def v2_runner_on_ok(self, result):
pass

View File

@ -0,0 +1,73 @@
import os
import shutil
import pytest
import pexpect
from ansible_runner.runner_config import RunnerConfig
@pytest.fixture(scope='function')
def rc(tmpdir):
rc = RunnerConfig(str(tmpdir))
rc.suppress_ansible_output = True
rc.expect_passwords = {
pexpect.TIMEOUT: None,
pexpect.EOF: None
}
rc.cwd = str(tmpdir)
rc.env = {}
rc.job_timeout = 10
rc.idle_timeout = 0
rc.pexpect_timeout = 2.
rc.pexpect_use_poll = True
return rc
# TODO: determine if we want to add docker / podman
# to zuul instances in order to run these tests
@pytest.fixture(scope="session", autouse=True)
def container_runtime_available():
import subprocess
import warnings
runtimes_available = True
for runtime in ('docker', 'podman'):
try:
subprocess.run([runtime, '-v'])
except FileNotFoundError:
warnings.warn(UserWarning(f"{runtime} not available"))
runtimes_available = False
return runtimes_available
# TODO: determine if we want to add docker / podman
# to zuul instances in order to run these tests
@pytest.fixture(scope="session")
def container_runtime_installed():
import subprocess
for runtime in ('podman', 'docker'):
try:
subprocess.run([runtime, '-v'])
return runtime
except FileNotFoundError:
pass
pytest.skip('No container runtime is available.')
@pytest.fixture(scope='session')
def clear_integration_artifacts(request):
'''Fixture is session scoped to allow parallel runs without error
'''
if 'PYTEST_XDIST_WORKER' in os.environ:
# we never want to clean artifacts if running parallel tests
# because we cannot know when all processes are finished and it is
# safe to clean up
return
def rm_integration_artifacts():
path = "test/integration/artifacts"
if os.path.exists(path):
shutil.rmtree(path)
request.addfinalizer(rm_integration_artifacts)

View File

@ -0,0 +1,71 @@
import json
import os
import subprocess
import yaml
from tempfile import NamedTemporaryFile
import pytest
@pytest.fixture
def tmp_file_maker():
"""Fixture to return temporary file maker."""
def tmp_file(text):
with NamedTemporaryFile(delete=False) as tempf:
tempf.write(bytes(text, 'UTF-8'))
return tempf.name
return tmp_file
class CompletedProcessProxy(object):
def __init__(self, result):
self.result = result
def __getattr__(self, attr):
return getattr(self.result, attr)
@property
def json(self):
try:
response_json = json.loads(self.stdout)
except json.JSONDecodeError:
pytest.fail(
f"Unable to convert the response to a valid json - stdout: {self.stdout}, stderr: {self.stderr}"
)
return response_json
@property
def yaml(self):
return yaml.safe_load(self.stdout)
@pytest.fixture(scope='function')
def cli(request):
def run(args, *a, **kw):
if not kw.pop('bare', None):
args = ['ansible-runner',] + args
kw['encoding'] = 'utf-8'
if 'check' not in kw:
# By default we want to fail if a command fails to run. Tests that
# want to skip this can pass check=False when calling this fixture
kw['check'] = True
if 'stdout' not in kw:
kw['stdout'] = subprocess.PIPE
if 'stderr' not in kw:
kw['stderr'] = subprocess.PIPE
kw.setdefault('env', os.environ.copy()).update({
'LANG': 'en_US.UTF-8'
})
try:
ret = CompletedProcessProxy(subprocess.run(' '.join(args), shell=True, *a, **kw))
except subprocess.CalledProcessError as err:
pytest.fail(
f"Running {err.cmd} resulted in a non-zero return code: {err.returncode} - stdout: {err.stdout}, stderr: {err.stderr}"
)
return ret
return run

Some files were not shown because too many files have changed in this diff Show More