diff --git a/CHANGELOG b/CHANGELOG index 352f343..f68c15a 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,15 @@ +v 0.9.0 (16 Nov 2017) +- IMPORTANT - The 'cinchpin' command has been REMOVED for linchpin 1.0.4 + compatibility. The new 'teardown' command replaces Jenkins slave + disconnection functionality previously handled by the 'cinchpin' command. +- The RHEL7 installer now creates two virtualenvs, one for linchpin and one for + cinch +- Removed 'latest tip' and Beaker python package installation options from + RHEL7 installer as they are no longer necessary +- Fixed a bug where Jenkins slaves would not be removed from the master during + a provisioning failure in our JJB example workflow + (ci-jslave-project-sample.yaml) + v 0.8.5 (10 Oct 2017) - Remove management of the executor setting on masters (GH #182) - Remove stale, unused repo key that began failing (GH #184) diff --git a/cinch/bin/entry_point.py b/cinch/bin/entry_point.py index f0f437c..d2f86a6 100644 --- a/cinch/bin/entry_point.py +++ b/cinch/bin/entry_point.py @@ -2,23 +2,12 @@ from __future__ import print_function from argparse import ArgumentParser, REMAINDER from os import getcwd, path -from wrappers import call_ansible, call_linchpin +from wrappers import call_ansible import sys -def cinch(): - """ - Entry point for the "cinch" CLI that merely wraps the ansible-playbook - command and pre-fills its path to the site.yml file for Cinch. The cinch - tool requires a single argument - the Ansible inventory file - and accepts - an arbitrary number of extra arguments that are passed through to the - ansible-playbook executable. - - :return: Exit code 0 if the execution is completed successfully, or 255 - if an unknown error occurs. If ansible-playbook exits with an error code, - this executable will exit with the same code. - """ +def cinch_generic(playbook): # Parse the command line arguments parser = ArgumentParser(description='A wrapper around Cinch for the most ' 'common use case') @@ -35,38 +24,35 @@ def cinch(): inventory = path.join(getcwd(), args.inventory) else: raise Exception('Inventory path needs to be non-empty') - exit_code = call_ansible(inventory, 'site.yml', args.args) + exit_code = call_ansible(inventory, playbook, args.args) sys.exit(exit_code) -def cinchpin(): +def cinch(): """ - Entry point for the "cinchpin" CLI that wraps the linchpin command and - loads the linch-pin PinFile to provision resources and then uses the - generated inventory file to pass to cinch. The cinchpin tool requires a - single argument - a valid linchpin subcommand - and accepts an arbitrary - number of extra arguments that are passed through to the linchpin - executable. If a linch-pin PinFile is not found in the current working - directory, a path to a linch-pin working directory may be optionally - provided. + Entry point for the "cinch" CLI that merely wraps the ansible-playbook + command and pre-fills its path to the site.yml file for Cinch. The cinch + tool requires a single argument - the Ansible inventory file - and accepts + an arbitrary number of extra arguments that are passed through to the + ansible-playbook executable. :return: Exit code 0 if the execution is completed successfully, or 255 - if an unknown error occurs. If linchpin exits with an error code, + if an unknown error occurs. If ansible-playbook exits with an error code, this executable will exit with the same code. """ - # Parse the command line arguments - parser = ArgumentParser(description='A wrapper around linchpin for the ' - 'most common use case') - # The linch-pin working directory containing a PinFile that the user - # provides which will get passed along to linchpin for its consumption - parser.add_argument('-w', '--workdir', default=getcwd(), - help='''path to linch-pin working directory containing a - PinFile''') - # All remaining arguments are passed through, untouched, to linchpin - parser.add_argument('arg', help='argument to pass to the linchpin command') - args = parser.parse_args() - exit_code = call_linchpin(args.workdir, args.arg) - sys.exit(exit_code) + cinch_generic('site.yml') + + +def teardown(): + """ + Entry point for the "teardown" CLI that wraps ansible-playbook commands and + pre-fills its path to the teardown.yml file. + + :return: Exit code 0 if the execution is completed successfully, or 255 if + an unknown error occurs. If ansible-playbook exits with an error code, this + executable will exit with the same code. + """ + cinch_generic('teardown.yml') if __name__ == '__main__': diff --git a/cinch/bin/wrappers.py b/cinch/bin/wrappers.py index 6ff7520..f9e0f15 100644 --- a/cinch/bin/wrappers.py +++ b/cinch/bin/wrappers.py @@ -6,7 +6,6 @@ import os import sys -import yaml BASE = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) @@ -36,7 +35,8 @@ def call_ansible(inventory, playbook, *args): os.path.join(BASE, playbook), '-i', inventory, '-v', - '--ssh-common-args=-o StrictHostKeyChecking=no' + '--ssh-common-args=-o StrictHostKeyChecking=no ' + + '-o UserKnownHostsFile=/dev/null' ] ansible_args.extend(args) ansible = local['ansible-playbook'] @@ -44,141 +44,6 @@ def call_ansible(inventory, playbook, *args): return exit_code -def call_linchpin(work_dir, arg): - """ - Wraps a call out to the linchpin executable, and then kicks off a cinch - Ansible playbook if necessary. - - :param work_dir: The linch-pin working directory that contains a PinFile - and associated configuration files - :param arg: A single argument to pass to the linchpin command - :return: The exit code returned from linchpin, or 255 if errors come from - elsewhere - """ - # cinch will only support a subset of linchpin subcommands - supported_cmds = ['up', 'destroy', 'init'] - if arg not in supported_cmds: - sys.exit('linchpin command "{0}" not ' - 'supported by cinch'.format(arg)) - - # If we are to ask linch-pin to interact with infrastructure we will check - # for some required configuration items and set up them for later use - if arg != 'init': - inventory_file = get_inventory(work_dir) - inventory_path = os.path.join(work_dir, 'inventories', inventory_file) - - # For destroy/teardown, we must run our teardown playbook(s) *before* - # linchpin terminates the instance(s) - if arg == 'destroy': - exit_code = call_ansible(inventory_path, 'teardown.yml') - - # Construct the arguments to pass to linch-pin by munging the arguments - # provided to this method - linchpin_args = [ - '-v', - '-w', work_dir, - '--creds-path', os.path.join(work_dir, 'credentials') - ] - linchpin_args.append(arg) - # Execute the 'linchpin' command - linchpin = local['linchpin'] - exit_code = command_handler(linchpin, linchpin_args) - - # Set up a linch-pin+cinch configuration skeleton for later use if the - # 'init' subcommand was executed previously - if arg == 'init': - cinchpin_init(work_dir) - - # If linchpin is asked to provision resources, we will then run our - # cinch provisioning playbook - if arg == 'up' and exit_code == 0: - exit_code = call_ansible(inventory_path, 'site.yml') - return exit_code - - -def cinchpin_init(work_dir): - """ - Set up a linch-pin+cinch configuration skeleton - - :param work_dir: The linch-pin working directory that contains a PinFile - and associated configuration files - """ - # Consistent filename to use for various linch-pin YAML configurations - # for 'cinchpin' - config_file = 'cinch.yml' - # Cinch layout and topology paths to be added to linch-pin PinFile - config_setup = { - 'cinch': { - 'topology': config_file, - 'layout': config_file - } - } - - # Ansible group_vars directory that will be created for later use - group_vars = os.path.join('inventories', 'group_vars') - - # Dictionary of workspace directories and target filenames where we will - # put our skeletons - local_paths = { - 'layouts': config_file, - 'topologies': config_file, - 'credentials': config_file, - group_vars: 'all' - } - - # Overwrite the PinFile that linch-pin created with our configuration - pin_file = os.path.join(work_dir, 'PinFile') - with open(pin_file, 'w') as f: - yaml.dump(config_setup, f, default_flow_style=False) - - # Create Ansible group_vars directory since linch-pin doesn't provide this - os.mkdir(os.path.join(work_dir, group_vars)) - - # Write out the skeletons and inform the user that they exist - for directory, filename in local_paths.items(): - path = os.path.join(work_dir, directory, filename) - with open(path, 'w') as f: - f.write(SKEL_TEXT.format(directory, DOCS)) - print('Please configure this file to use cinch: ' + path) - print('Example configurations: ' + DOCS) - - -def get_inventory(work_dir): - """ - Basic checks for cinch compatibility in the linch-pin working directory, - and if successful, we produce a topology file for cinch to use. - - :param work_dir: The linch-pin working directory as created by 'linchpin - init' or 'cinchpin init' - :return: The topology file to pass to the 'cinch' command - """ - # Attempt to open the linch-pin PinFile - try: - with open(os.path.join(work_dir, 'PinFile'), 'r') as f: - pin_file_yaml = yaml.safe_load(f) - except IOError: - sys.exit('linch-pin PinFile not found in ' + work_dir) - # We must find a topology section named 'cinch' to determine where our - # inventory file will live - try: - cinch_topology = 'cinch' - topology = pin_file_yaml[cinch_topology]['topology'] - except KeyError: - sys.exit('linch-pin PinFile must contain a topology ' - 'section named "{0}"'.format(cinch_topology)) - # The inventory file generated by linchpin that will be used by cinch for - # configuration - try: - topology_path = os.path.join(work_dir, 'topologies', topology) - with open(topology_path) as topology_file: - topology_yaml = yaml.safe_load(topology_file) - inventory_file = topology_yaml['topology_name'] + '.inventory' - except (IOError, TypeError): - sys.exit('linch-pin topology file not found or malformed: ' + - topology_path) - return inventory_file - - def command_handler(command, args): """ Generic function to run external programs. diff --git a/cinch/playbooks/install-rhel7.yml b/cinch/playbooks/install-rhel7.yml index 9c0b9b1..c9009ed 100644 --- a/cinch/playbooks/install-rhel7.yml +++ b/cinch/playbooks/install-rhel7.yml @@ -4,18 +4,16 @@ # on RHEL7 to work with newer Python libraries such as those required by # 'cinch'. # -# This playbook was tested with Ansible 1.8.4. +# This playbook was tested with Ansible 2.4.1.0 - name: install cinch into a virtualenv on RHEL7 hosts: localhost vars: jenkins_home: /var/lib/jenkins - venv_dir: "{{ jenkins_home }}/opt/cinch" - temp_dir: "{{ venv_dir }}/tmp" - python: "{{ venv_dir }}/bin/python" + venvs: + cinch: "{{ jenkins_home }}/opt/cinch" + linchpin: "{{ jenkins_home }}/opt/linchpin" delete_venv: false - latest_tip: false - beaker_kerberos: true tasks: - name: fail if we are not running this playbook on RHEL7 @@ -38,77 +36,48 @@ msg: "directory {{ jenkins_home }} must exist for this playbook to run" when: not jenkins_home_stat_result.stat.exists - - name: check for /var/lib/jenkins/opt/cinch directory + - name: check for existing virtualenvs stat: - path: "{{ venv_dir }}" + path: "{{ item.value }}" + with_dict: "{{ venvs }}" register: venv_stat_result - name: >- - fail if pre-existing cinch installation at - /var/lib/jenkins/opt/cinch is found and it cannot be deleted + fail if pre-existing virtualenvs are found and cannot be deleted because + delete_venv is set to 'false' fail: - msg: "directory {{ venv_dir }} exists, but 'delete_venv' setting is False" - when: venv_stat_result.stat.exists and not (delete_venv|bool) + msg: "directory {{ item.item.value }} exists, but 'delete_venv' setting is False" + with_items: "{{ venv_stat_result.results }}" + when: item.stat.exists == true and not (delete_venv|bool) - name: >- delete existing virtualenv directory (disabled by default, override with DELETE_VENV Jenkins job parameter or delete_venv playbook variable) file: - path: "{{ venv_dir }}" + path: "{{ item.value }}" state: absent + with_dict: "{{ venvs }}" when: (delete_venv|bool) - - name: create virtualenv - command: virtualenv --no-setuptools "{{ venv_dir }}" - args: - creates: "{{ venv_dir }}" - - - name: create temp dir in root of virtualenv - file: - path: "{{ temp_dir }}" - state: directory - - - name: download latest version of pip (version included with RHEL7 is too old) - get_url: - url: https://bootstrap.pypa.io/get-pip.py - dest: "{{ temp_dir }}" - - - name: install pip manually by running get-pip.py script - command: "{{ python }} {{ temp_dir }}/get-pip.py" - args: - creates: "{{ venv_dir }}/lib/python2.7/site-packages/setuptools" - - - name: install released versions of cinch+linch-pin using pip + - name: >- + create virtualenvs with --system-site-packages to allow for selinux + module compatibility, then upgrade setuptools and pip pip: - name: cinch - virtualenv: "{{ venv_dir }}" + name: setuptools pip + virtualenv: "{{ item.value }}" extra_args: -U - when: not (latest_tip|bool) + virtualenv_site_packages: true + with_dict: "{{ venvs }}" - # This pip install should be non-editable, but the pip module in Ansible - # 1.8.4. does not support that flag - - name: install latest tip of cinch+linch-pin instead of latest release from pypi - command: >- - "{{ venv_dir }}/bin/pip" install -U - https://github.com/CentOS-PaaS-SIG/linchpin/archive/develop.tar.gz - https://github.com/RedHatQE/cinch/archive/master.tar.gz - when: (latest_tip|bool) - - - name: install beaker-client and python-krbV with pip to use kerberos with Beaker + - name: install version 1.0.4 of linchpin using pip pip: - name: "{{ item }}" - virtualenv: "{{ venv_dir }}" + name: linchpin + virtualenv: "{{ venvs.linchpin }}" extra_args: -U - with_items: - - beaker-client - - python-krbV - when: (beaker_kerberos|bool) + version: 1.0.4 - ## https://dmsimard.com/2016/01/08/selinux-python-virtualenv-chroot-and-ansible-dont-play-nice/ - - name: >- - set up symlink in virtualenv for selinux module in system site-packages - since it's not pip installable - file: - src: /usr/lib64/python2.7/site-packages/selinux - dest: "{{ venv_dir }}/lib/python2.7/site-packages/selinux" - state: link + - name: install released version of cinch using pip + pip: + name: cinch + virtualenv: "{{ venvs.cinch }}" + extra_args: -U diff --git a/docs/source/users.rst b/docs/source/users.rst index a346636..4b4a5fe 100644 --- a/docs/source/users.rst +++ b/docs/source/users.rst @@ -123,8 +123,8 @@ provided an `Ansible playbook `_ that will install a newer version of the necessary Python packaging tools to allow for installation on RHEL7. This playbook is intended for use on Jenkins -masters and will install cinch into a virtualenv at -**/var/lib/jenkins/opt/cinch**. For convenience, an optional `Jenkins Job +masters and will install cinch and linchpin into a virtualenv at +**/var/lib/jenkins/opt/**. For convenience, an optional `Jenkins Job Builder template `_ is provided and will create a Jenkins job that will run the aforementioned @@ -133,39 +133,24 @@ playbook on your Jenkins master. Execution --------- -With linch-pin +With linchpin `````````````` -The ``cinchpin`` command can be used to call `linch-pin -`_ automatically to provision -instances and then configure the instances. ``cinchpin`` supports a subset of -linch-pin commands, such as **up**, **destroy**, and **init**. +If you'd like to automate the process of provisioning a host to later configure +with cinch, the `linchpin project +`_ can be used for this task. +linchpin can dynamically generate an Ansible inventory file that cinch can +consume for host configuration. In the following steps we will outline how to +configure cinch-specific values within a linchpin workspace. -In the following example we will provision a RHEL7 instance in OpenStack as a -Jenkins slave. - -First, generate a linch-pin working directory for use with cinch by running the -following commands: - -``mkdir /path/to/workdir`` - -``cinchpin init -w /path/to/workdir`` - -Next, create necessary credentials for linch-pin provisioning for your target -infrastructure in -**/path/to/workdir/credentials/cinch.yml**: :: - - --- - clouds: - openstack: - auth: - auth_url: 'http://openstack-api-endpoint.example.com:5000/v2.0' - project_name: 'myproject' - username: 'myuser' - password: 'mypass' +.. note:: For linchpin topology and workspace examples, including various host + environments, see the `linchpin documentation + `_. Create a layout file by saving the following example template as -**/path/to/workdir/layouts/cinch.yml** and edit to taste.:: +**/path/to/linchpin/workspace/layouts/mylayout.yml** and edit to taste based on +your cinch role requirements. In this example we configure a RHEL7 Jenkins +slave:: --- inventory_layout: @@ -178,8 +163,12 @@ Create a layout file by saving the following example template as - repositories - jenkins_slave -Create an Ansible group_vars file by saving the following example template as -**/path/to/workdir/inventories/group_vars/all** and edit to taste.:: +Create an Ansible **group\_vars** file by saving the following example template +as **/path/to/linchpin/workspace/inventories/group_vars/all** and edit to taste +based on your desired configuration parameters. In this example we configure a +RHEL7 Jenkins slave to attach to a Jenkins master which requires +authentication, along with some installed certificate authorities and +repositories:: --- ansible_user: root @@ -201,48 +190,11 @@ Create an Ansible group_vars file by saving the following example template as jenkins_slave_username: 'automation-user' jenkins_slave_password: 'jenkinsAPItoken' -Create a topology file by saving the following example template as -**/path/to/workdir/topologies/cinch.yml** and edit to taste:: +Finally, If you'd like to automate this process in Jenkins, please see our +example `Jenkins Job Builder workflow template +`_ +for guidance on putting it all together. - --- - topology_name: "cinch-test" - resource_groups: - - - resource_group_name: "cinch-group" - resource_group_type: "openstack" - resource_definitions: - - - name: "jslave" - flavor: "m1.small" - type: "os_server" - image: "rhel-7.2-server-x86_64-released" - count: 1 # Number of instances to create - keypair: "openstack-keypair-name" # Name of SSH keypair configured for OpenStack account - networks: - - "openstack-network-name" # OpenStack network name - # Name of credentials file to use for the OpenStack API - credentials: - filename: "cinch.yml" - profile: "openstack" - -.. note:: For more topology examples, including various host environments, see - the `linch-pin documentation - `_. - -Provision and configure your Jenkins slave automatically with the following -command: - -``cinchpin up -w /path/to/workdir`` - -To terminate the OpenStack instance and remove the Jenkins slave from the -Jenkins master, run the following command: - -``cinchpin destroy -w /path/to/workdir`` - -.. note:: Once the working directory is configured successfully, a common next - step would be to check this directory into source control where it can be - consumed by CI automation tools such as Jenkins Job Builder or Jenkins - Pipeline. Manual `````` @@ -269,6 +221,13 @@ will execute **ansible-playbook** from the **.venv/** virtualenv and point it to the **inventory/local/hosts** file to make executing against your own environment as easy as a single command. +The cinch project can be used as a standard Ansible project, by running +**ansible-playbook** and calling **site.yml** for Jenkins master or slave +configuration and **teardown.yml** for removing a Jenkins slave from a Jenkins +master. For convenience, we also provide CLI wrappers for these tasks, with the +**cinch** command running **site.yml** and the **teardown** command running +**teardown.yml**. + Support ------- diff --git a/jjb/ci-jslave-project-sample.yaml b/jjb/ci-jslave-project-sample.yaml index fb4aa13..7a34895 100644 --- a/jjb/ci-jslave-project-sample.yaml +++ b/jjb/ci-jslave-project-sample.yaml @@ -1,6 +1,7 @@ --- - job-template: - name: 'jslave-{project}-{topology}-1-provision' + name: '{project}-{topology}-provision' + description: '{description}' defaults: cinch-topology-setup node: master parameters: @@ -12,8 +13,13 @@ builders: - shell: | #!/bin/bash -ex + source "${{JENKINS_HOME}}/opt/linchpin/bin/activate" + linchpin -v --creds-path {topology_path}/${{PROVIDER}}/credentials -w {topology_path}/${{PROVIDER}} up + deactivate + source "${{JENKINS_HOME}}/opt/cinch/bin/activate" - cinchpin up -w {topology_path}/${{PROVIDER}} + cinch {topology_path}/${{PROVIDER}}/inventories/cinch-test.inventory + deactivate publishers: - archive: artifacts: '{topology_path}/${{PROVIDER}}/inventories/cinch-test.inventory' @@ -22,17 +28,18 @@ artifacts: '{topology_path}/${{PROVIDER}}/resources/cinch-test.output' allow-empty: 'true' - trigger-parameterized-builds: - - project: 'jslave-{project}-{topology}-2-runtest' + - project: '{project}-{topology}-runtest' current-parameters: true condition: 'SUCCESS' fail-on-missing: true - - project: 'jslave-{project}-{topology}-3-teardown' + - project: '{project}-{topology}-teardown' current-parameters: true condition: 'UNSTABLE_OR_WORSE' fail-on-missing: true - job-template: - name: 'jslave-{project}-{topology}-2-runtest' + name: '{project}-{topology}-runtest' + description: '{description}' node: '{jslave_name}' builders: - shell: | @@ -43,42 +50,58 @@ artifacts: 'test_artifact.txt' allow-empty: 'false' - trigger-parameterized-builds: - - project: 'jslave-{project}-{topology}-3-teardown' + - project: '{project}-{topology}-teardown' current-parameters: true - job-template: - name: 'jslave-{project}-{topology}-3-teardown' + name: '{project}-{topology}-teardown' + description: '{description}' defaults: cinch-topology-setup node: master builders: - copyartifact: - project: 'jslave-{project}-{topology}-1-provision' + project: '{project}-{topology}-provision' filter: '{topology_path}/${{PROVIDER}}/inventories/cinch-test.inventory' target: '{topology_path}/${{PROVIDER}}/inventories' flatten: true - copyartifact: - project: 'jslave-{project}-{topology}-1-provision' + project: '{project}-{topology}-provision' filter: '{topology_path}/${{PROVIDER}}/resources/cinch-test.output' target: '{topology_path}/${{PROVIDER}}/resources' flatten: true - shell: | #!/bin/bash -ex source "${{JENKINS_HOME}}/opt/cinch/bin/activate" - cinchpin destroy -w {topology_path}/${{PROVIDER}} + # Try to remove the Jenkins slave from the Jenkins master, but do not fail + # the entire teardown job if Jenkins slave disconnection cannot be done. + # This is for cases where the provision step failed to attach the slave, + # but the instance should still be destroyed by linchpin. + set +e + teardown {topology_path}/${{PROVIDER}}/inventories/cinch-test.inventory + set -e + deactivate + + source "${{JENKINS_HOME}}/opt/linchpin/bin/activate" + linchpin -v --creds-path {topology_path}/${{PROVIDER}}/credentials -w {topology_path}/${{PROVIDER}} destroy + deactivate - job-group: - name: jslave-provision-runtest-teardown + name: provision-runtest-teardown jobs: - - 'jslave-{project}-{topology}-1-provision' - - 'jslave-{project}-{topology}-2-runtest' - - 'jslave-{project}-{topology}-3-teardown' + - '{project}-{topology}-provision' + - '{project}-{topology}-runtest' + - '{project}-{topology}-teardown' - project: - name: jslave-cinch-jobs + name: cinch-jobs project: cinch topology: - example jobs: - - jslave-provision-runtest-teardown + - provision-runtest-teardown jslave_name: cinch-slave - topology_path: 'cinch-example/examples/linch-pin-topologies' + topology_path: 'cinch-example/examples/linchpin-topologies' + description: | + cinch Jenkins slave provisioning example workflow using Jenkins Job Builder + + https://github.com/RedHatQE/cinch/blob/master/jjb/ci-jslave-project-sample.yaml diff --git a/jjb/install-rhel7.yaml b/jjb/install-rhel7.yaml index 721fb28..b96475b 100644 --- a/jjb/install-rhel7.yaml +++ b/jjb/install-rhel7.yaml @@ -9,15 +9,7 @@ - bool: name: DELETE_VENV default: false - description: "Delete pre-existing cinch virtualenv and re-install" - - bool: - name: LATEST_TIP - default: false - description: "Install latest tip of cinch+linch-pin instead of latest release from pypi" - - bool: - name: BEAKER_KERBEROS - default: true - description: "Install kerberos module for Beaker" + description: "Delete pre-existing linchpin/cinch virtualenvs and re-install" wrappers: - ansicolor - workspace-cleanup @@ -38,8 +30,6 @@ export PYTHONUNBUFFERED=1 # Enable real-time output for Ansible ansible-playbook -i localhost, -c local \ "${WORKSPACE}/cinch/cinch/playbooks/install-rhel7.yml" \ - -e delete_venv="${DELETE_VENV}" \ - -e latest_tip="${LATEST_TIP}" \ - -e beaker_kerberos="${BEAKER_KERBEROS}" + -e delete_venv="${DELETE_VENV}" deactivate diff --git a/setup.py b/setup.py index ae976bd..18236ba 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ setup( name='cinch', - version='0.8.5', + version='0.9.0', description='Cinch continuous integration setup', long_description=description, url='https://github.com/RedHatQE/cinch', @@ -30,13 +30,12 @@ include_package_data=True, install_requires=[ 'ansible>=2.3.2', - 'plumbum>=1.6.0', - 'linchpin>=1.0.4' + 'plumbum>=1.6.0' ], entry_points={ 'console_scripts': [ 'cinch=cinch.bin.entry_point:cinch', - 'cinchpin=cinch.bin.entry_point:cinchpin' + 'teardown=cinch.bin.entry_point:teardown' ] }, extras_require={