diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md old mode 100755 new mode 100644 diff --git a/MIGRATING.md b/MIGRATING.md new file mode 100644 index 00000000..de60c6a6 --- /dev/null +++ b/MIGRATING.md @@ -0,0 +1,133 @@ +# Migrating from master to main + +This Git repository has two branches: `master`, which is now stable, and `main`, which is where new functionality will be implemented. Your playbooks need to be adjusted when switching from one branch to the other — and these adjustments are outlined in this document. + +## What's changing? + +A long-term goal of this project is to publish the code through [Ansible Galaxy](https://galaxy.ansible.com/). It became clear that changes to the project's directory structure would be inevitable to follow the conventions imposed by Galaxy (i.e. [Collections format](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html)) — and this was taken as an opportunity to also rename all existing roles and some variables for consistency. See [#570](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/570), [#572](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/572), and [#590](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/590) for details. + +All playbooks using the Ansible roles provided by this project need to adapt this new naming scheme, in order to use the latest updates implemented in the `main` branch. + +**Important**: The `master` branch (previous default) will stay with the current naming scheme. It is considered stable, which means that only critical bug fixes will be added. New functionality will solely be implemented in the `main` (new default) branch. + +## What do I need to do? + +The following steps need to be taken in order to consume the `main` branch in your own projects: + +- Repository contents need to be placed in a `collections/ansible_collections/ibm/spectrum_scale` directory, adjacent to your playbooks. The easiest way to do this is to clone the correct branch into the appropriate path: + + ```shell + $ git clone -b main https://github.com/IBM/ibm-spectrum-scale-install-infra.git collections/ansible_collections/ibm/spectrum_scale + ``` + + The resulting directory structure should look similar to this: + + ```shell + my_project/ + ├── collections/ + │ └── ansible_collections/ + │ └── ibm/ + │ └── spectrum_scale/ + │ └── ... + ├── hosts + └── playbook.yml + ``` + +- Once the repository contents are available in the appropriate path, roles can be referenced by using their Fully Qualified Collection Name (FQCN). A minimal playbook should look similar to this: + + ```yaml + # playbook.yml: + --- + - hosts: cluster01 + roles: + - ibm.spectrum_scale.core_prepare + - ibm.spectrum_scale.core_install + - ibm.spectrum_scale.core_configure + - ibm.spectrum_scale.core_verify + ``` + + Refer to the [Ansible User Guide](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html#using-collections-in-a-playbook) for details on using collections, including alternate syntax with the `collections` keyword. + + Note that all role names have changed: + + - Old naming: `[component]/[precheck|node|cluster|postcheck]` + - New naming: `[component]_[prepare|install|configure|verify]` + + Refer to the [name mapping table](#role-name-mapping-table) for a list of new role names. + +- Some variables have been renamed for consistency as well, but it's expected that these changes only affect very few users. See [#590](https://github.com/IBM/ibm-spectrum-scale-install-infra/pull/590) for details, and refer to [VARIABLES.md](VARIABLES.md) for a complete listing of all available variables. + +## Role Name Mapping Table + +| `master` branch | `main` branch | +| -------------------------------- | ---------------------------------------- | +| callhome/cluster | ibm.spectrum_scale.callhome_configure | +| callhome/node | ibm.spectrum_scale.callhome_install | +| callhome/postcheck | ibm.spectrum_scale.callhome_verify | +| callhome/precheck | ibm.spectrum_scale.callhome_prepare | +| core/cluster | ibm.spectrum_scale.core_configure | +| core/common | ibm.spectrum_scale.core_common | +| core/node | ibm.spectrum_scale.core_install | +| core/postcheck | ibm.spectrum_scale.core_verify | +| core/precheck | ibm.spectrum_scale.core_prepare | +| core/upgrade | ibm.spectrum_scale.core_upgrade | +| gui/cluster | ibm.spectrum_scale.gui_configure | +| gui/node | ibm.spectrum_scale.gui_install | +| gui/postcheck | ibm.spectrum_scale.gui_verify | +| gui/precheck | ibm.spectrum_scale.gui_prepare | +| gui/upgrade | ibm.spectrum_scale.gui_upgrade | +| nfs/cluster | ibm.spectrum_scale.nfs_configure | +| nfs/common | ibm.spectrum_scale.ces_common | +| nfs/node | ibm.spectrum_scale.nfs_install | +| nfs/postcheck | ibm.spectrum_scale.nfs_verify | +| nfs/precheck | ibm.spectrum_scale.nfs_prepare | +| nfs/upgrade | ibm.spectrum_scale.nfs_upgrade | +| remote_mount/ | ibm.spectrum_scale.remotemount_configure | +| scale_auth/upgrade | ibm.spectrum_scale.auth_upgrade | +| scale_ece/cluster | ibm.spectrum_scale.ece_configure | +| scale_ece/node | ibm.spectrum_scale.ece_install | +| scale_ece/precheck | ibm.spectrum_scale.ece_prepare | +| scale_ece/upgrade | ibm.spectrum_scale.ece_upgrade | +| scale_fileauditlogging/cluster | ibm.spectrum_scale.fal_configure | +| scale_fileauditlogging/node | ibm.spectrum_scale.fal_install | +| scale_fileauditlogging/postcheck | ibm.spectrum_scale.fal_verify | +| scale_fileauditlogging/precheck | ibm.spectrum_scale.fal_prepare | +| scale_fileauditlogging/upgrade | ibm.spectrum_scale.fal_upgrade | +| scale_hdfs/cluster | ibm.spectrum_scale.hdfs_configure | +| scale_hdfs/node | ibm.spectrum_scale.hdfs_install | +| scale_hdfs/postcheck | ibm.spectrum_scale.hdfs_verify | +| scale_hdfs/precheck | ibm.spectrum_scale.hdfs_prepare | +| scale_hdfs/upgrade | ibm.spectrum_scale.hdfs_upgrade | +| scale_hpt/node | ibm.spectrum_scale.afm_cos_install | +| scale_hpt/postcheck | ibm.spectrum_scale.afm_cos_verify | +| scale_hpt/precheck | ibm.spectrum_scale.afm_cos_prepare | +| scale_hpt/upgrade | ibm.spectrum_scale.afm_cos_upgrade | +| scale_object/cluster | ibm.spectrum_scale.obj_configure | +| scale_object/node | ibm.spectrum_scale.obj_install | +| scale_object/postcheck | ibm.spectrum_scale.obj_verify | +| scale_object/precheck | ibm.spectrum_scale.obj_prepare | +| scale_object/upgrade | ibm.spectrum_scale.obj_upgrade | +| smb/cluster | ibm.spectrum_scale.smb_configure | +| smb/node | ibm.spectrum_scale.smb_install | +| smb/postcheck | ibm.spectrum_scale.smb_verify | +| smb/precheck | ibm.spectrum_scale.smb_prepare | +| smb/upgrade | ibm.spectrum_scale.smb_upgrade | +| zimon/cluster | ibm.spectrum_scale.perfmon_configure | +| zimon/node | ibm.spectrum_scale.perfmon_install | +| zimon/postcheck | ibm.spectrum_scale.perfmon_verify | +| zimon/precheck | ibm.spectrum_scale.perfmon_prepare | +| zimon/upgrade | ibm.spectrum_scale.perfmon_upgrade | + +## Migration script + +If you have existing playbooks which reference roles provided by this project, and you wish to migrate to the new format, then there is a [migration script](migrate.sh) available to replace all occurrences of role names in a given file. You can use the migration script like so: + +```shell +$ ./migrate.sh playbook.yml +``` + +Note that the script will create a backup of the file prior to making any changes. Further note that the script does not perform any kind of syntax checking, so you will need to manually verify that the resulting code is syntactically correct. + +## What if I need help? + +Create a [new issue](https://github.com/IBM/ibm-spectrum-scale-install-infra/issues/new) and provide (the relevant parts of) your playbook, along with the exact error message. diff --git a/README.md b/README.md index 17c202fb..b37d74d8 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,11 @@ -IBM Spectrum Scale (GPFS) Deployment using Ansible Roles -======================================================== +**Important**: You are viewing the `main` branch of this repository. If you've previously used the `master` branch in your own playbooks then you will need to make some changes in order to switch to the `main` branch. See [MIGRATING.md](MIGRATING.md) for details. -Ansible project with multiple roles for installing and configuring IBM Spectrum Scale (GPFS). +--- + +IBM Storage Scale (GPFS) Deployment using Ansible Roles +======================================================= + +Ansible project with multiple roles for installing and configuring IBM Storage Scale (GPFS) software defined storage. **Table of Contents** @@ -19,94 +23,96 @@ Ansible project with multiple roles for installing and configuring IBM Spectrum - [Disclaimer](#disclaimer) - [Copyright and License](#copyright-and-license) - Features -------- #### Infrastructure minimal tested configuration + - [x] Pre-built infrastructure (using a static inventory file) - [ ] Dynamic inventory file #### OS support + - [x] Support for RHEL 7 on x86_64, PPC64 and PPC64LE - [x] Support for RHEL 8 on x86_64 and PPC64LE - [x] Support for UBUNTU 20 on x86_64 and PPC64LE - [x] Support for SLES 15 on x86_64 and PPC64LE #### Common prerequisites + - [x] Disable SELinux (`scale_prepare_disable_selinux: true`), by default false -- [x] Disable firewall (`scale_prepare_disable_firewall: true`), by default true. -- [ ] Disable firewall ports +- [x] Disable firewall (`scale_prepare_disable_firewall: true`), by default false. - [ ] Install and start NTP - [ ] Create /etc/hosts mappings - [ ] Open firewall ports -- [x] Generate SSH key +- [x] Generate SSH keys - [x] User must set up base OS repositories -#### Core Spectrum Scale prerequisites +#### Core IBM Storage Scale prerequisites + - [x] Install yum-utils package - [x] Install gcc-c++, kernel-devel, make - [x] Install elfutils,elfutils-devel (RHEL8 specific) -#### Core Spectrum Scale Cluster features -- [x] Install core Spectrum Scale packages on Linux nodes -- [x] Install Spectrum Scale license packages on Linux nodes +#### Core IBM Storage Scale Cluster features + +- [x] Install core IBM Storage Scale packages on Linux nodes +- [x] Install IBM Storage Scale license package on Linux nodes - [x] Compile or install pre-compiled Linux kernel extension (mmbuildgpl) - [x] Configure client and server license - [x] Assign default quorum (maximum 7 quorum nodes) if user has not defined in the inventory -- [x] Assign default manager nodes(all nodes will act as manager node) if user has not defined in the inventory +- [x] Assign default manager nodes (all nodes will act as manager nodes) if user has not defined in the inventory - [x] Create new cluster (mmcrcluster -N /var/mmfs/tmp/NodeFile -C {{ scale_cluster_clustername }}) - [x] Create cluster with profiles -- [x] Create Cluster with daemon and admin network +- [x] Create cluster with daemon and admin network - [x] Add new node into existing cluster - [x] Configure node classes - [x] Define configuration parameters based on node classes - [x] Configure NSDs and file system - [ ] Configure NSDs without file system -- [x] Extend NSDs and file system -- [x] Add disks to existing file systems +- [x] Add NSDs +- [x] Add disks to existing file system -#### Spectrum Scale Management GUI features -- [x] Install Spectrum Scale management GUI packages on GUI designated nodes -- [x] maximum 3 management GUI nodes to be configured +#### IBM Storage Scale Management GUI features + +- [x] Install IBM Storage Scale management GUI packages on designated GUI nodes +- [x] Maximum 3 GUI nodes to be configured - [x] Install performance monitoring sensor packages on all Linux nodes -- [x] Install performance monitoring packages on all GUI designated nodes +- [x] Install performance monitoring collector on all designated GUI nodes - [x] Configure performance monitoring and collectors - [ ] Configure HA federated mode collectors -#### Spectrum Scale Callhome features -- [x] Install Spectrum Scale callhome packages on all cluster nodes -- [x] Configure callhome +#### IBM Storage Scale Call Home features + +- [x] Install IBM Storage Scale Call Home packages on all cluster nodes +- [x] Configure Call Home -#### Spectrum Scale CES (SMB and NFS) Protocol supported features (5.0.5.2) -- [x] Install Spectrum Scale SMB or NFS on selected cluster nodes -- [x] Install Spectrum Scale OBJECT on selected cluster nodes (5.1.1.0) +#### IBM Storage Scale CES (SMB and NFS) Protocol supported features + +- [x] Install IBM Storage Scale SMB or NFS on selected cluster nodes (5.0.5.2 and above) +- [x] Install IBM Storage Scale Object on selected cluster nodes (5.1.1.0 and above) - [x] CES IPV4 or IPV6 support - [x] CES interface mode support - Minimal tested Versions ----------------------- The following Ansible versions are tested: - 2.9 and above +- **Refer to the [Release Notes](https://github.com/IBM/ibm-spectrum-scale-install-infra/releases) for details** -The following IBM Spectrum Scale versions are tested: +The following IBM Storage Scale versions are tested: -- 5.0.4.0 -- 5.0.4.1 -- 5.0.4.2 -- 5.0.5.X -- 5.0.5.2 For CES (SMB and NFS) -- 5.1.0.0 -- 5.1.1.0 with Object +- 5.0.4.0 and above +- 5.0.5.2 and above for CES (SMB and NFS) +- 5.1.1.0 and above for CES (Object) +- **Refer to the [Release Notes](https://github.com/IBM/ibm-spectrum-scale-install-infra/releases) for details** Specific OS requirements: -- For CES (SMB/NFS) on SLES15, Python 3 is required. -- For CES (OBJECT) RhedHat 8.x is required. - +- For CES (SMB/NFS) on SLES15: Python 3 is required. +- For CES (Object): RhedHat 8.x is required. Prerequisites ------------- @@ -125,15 +131,15 @@ Users need to have a basic understanding of the [Ansible concepts](https://docs. Note that [Python 3](https://docs.ansible.com/ansible/latest/reference_appendices/python_3_support.html) is required for certain functionality of this project to work. Ansible should automatically detect and use Python 3 on managed machines, refer to the [Ansible documentation](https://docs.ansible.com/ansible/latest/reference_appendices/python_3_support.html#using-python-3-on-the-managed-machines-with-commands-and-playbooks) for details and workarounds. -- **Download Spectrum Scale packages** +- **Download IBM Storage Scale packages** - A Developer Edition Free Trial is available at this site: https://www.ibm.com/account/reg/us-en/signup?formid=urx-41728 - - Customers who have previously purchased Spectrum Scale can obtain entitled versions from IBM Fix Central. Visit https://www.ibm.com/support/fixcentral and search for 'IBM Spectrum Scale (Software defined storage)'. + - Customers who have previously purchased IBM Storage Scale can obtain entitled versions from IBM Fix Central. Visit https://www.ibm.com/support/fixcentral and search for 'IBM Storage Scale (Software defined storage)'. -- **Create password-less SSH keys between all Spectrum Scale nodes in the cluster** +- **Create password-less SSH keys between all nodes in the cluster** - A pre-requisite for installing Spectrum Scale is that password-less SSH must be configured among all nodes in the cluster. Password-less SSH must be configured and verified with [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name), hostname, and IP of every node to every node. + A pre-requisite for installing IBM Storage Scale is that password-less SSH must be configured among all nodes in the cluster. Password-less SSH must be configured and verified with [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name), hostname, and IP of every node to every node. Example: @@ -146,7 +152,6 @@ Users need to have a basic understanding of the [Ansible concepts](https://docs. Repeat this process for all nodes to themselves and to all other nodes. - Installation Instructions ------------------------- @@ -157,7 +162,7 @@ Installation Instructions ```shell $ mkdir my_project $ cd my_project - $ git clone https://github.com/IBM/ibm-spectrum-scale-install-infra.git collections/ansible_collections/ibm/spectrum_scale + $ git clone -b main https://github.com/IBM/ibm-spectrum-scale-install-infra.git collections/ansible_collections/ibm/spectrum_scale ``` Be sure to clone the project under the correct subdirectory: @@ -173,24 +178,9 @@ Installation Instructions └── playbook.yml ``` - - **Alternatives - now deprecated!** - - Alternatively, you can clone the project repository and create your [Ansible playbook](https://docs.ansible.com/ansible/latest/user_guide/playbooks.html) inside the repository's directory structure: - - ```shell - $ git clone https://github.com/IBM/ibm-spectrum-scale-install-infra.git - $ cd ibm-spectrum-scale-install-infra - ``` - - Yet another alternative, you can also define an [Ansible environment variable](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#envvar-ANSIBLE_ROLES_PATH) to make the roles accessible in any external project directory: - - ```shell - $ export ANSIBLE_ROLES_PATH=$(pwd)/ibm-spectrum-scale-install-infra/roles/ - ``` - - **Create Ansible inventory** - Define Spectrum Scale nodes in the [Ansible inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) (e.g. `hosts`) in the following format: + Define IBM Storage Scale nodes in the [Ansible inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) (e.g. `hosts`) in the following format: ```yaml # hosts: @@ -219,11 +209,11 @@ Installation Instructions vars: - scale_install_localpkg_path: /path/to/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - ``` + - core_prepare + - core_install + - core_configure + - core_verify + ``` Again, this is just a minimal example. There are different installation methods available, each offering a specific set of options: @@ -234,7 +224,7 @@ Installation Instructions Refer to [VARIABLES.md](VARIABLES.md) for a full list of all supported configuration options. -- **Run the playbook to install and configure the Spectrum Scale cluster** +- **Run the playbook to install and configure the IBM Storage Scale cluster** - Using the `ansible-playbook` command: @@ -247,10 +237,10 @@ Installation Instructions ```shell $ cd samples/ $ ./ansible.sh - ``` + ``` > **Note:** - An advantage of using the automation script is that it will generate log files based on the date and the time in the `/tmp` directory. + > An advantage of using the automation script is that it will generate log files based on the date and the time in the `/tmp` directory. - **Playbook execution screen** @@ -271,7 +261,7 @@ Installation Instructions ok: [scale04] ok: [scale05] - TASK [common : check | Check Spectrum Scale version] + TASK [common : check | Check Spectrum Scale version] ********************************************************************************************************* ok: [scale01] ok: [scale02] @@ -294,7 +284,6 @@ Installation Instructions scale05 : ok=0 changed=59 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ``` - Optional Role Variables ----------------------- @@ -303,75 +292,87 @@ Users can define [variables](https://docs.ansible.com/ansible/latest/user_guide/ Additional functionality can be enabled by defining further variables. Browse the examples in the [samples/](samples/) directory to learn how to: - Configure storage and file systems (see [samples/playbook_storage.yml](samples/playbook_storage.yml)) -- Configure node classes and Spectrum Scale configuration attributes (see [samples/playbook_nodeclass.yml](samples/playbook_nodeclass.yml)) -- Deploy Spectrum Scale using JSON inventory (see [samples/playbook_json_ces.yml](samples/playbook_json_ces.yml)) - +- Configure node classes and configuration attributes (see [samples/playbook_nodeclass.yml](samples/playbook_nodeclass.yml)) +- Deploy IBM Storage Scale using JSON inventory (see [samples/playbook_json_ces.yml](samples/playbook_json_ces.yml)) Available Roles --------------- The following [roles](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html) are available for you to reuse when assembling your own [playbook](https://docs.ansible.com/ansible/latest/user_guide/playbooks.html): -- [Core GPFS](roles/core)* -- [GPFS GUI](roles/gui) -- [GPFS SMB](roles/smb) -- [GPFS NFS](roles/nfs) -- [GPFS OBJECT](roles/scale_object) -- [GPFS HDFS](roles/scale_hdfs) -- [GPFS Call Home](roles/callhome) -- [GPFS File Audit Logging](roles/scale_fileauditlogging) +- Core GPFS (`roles/core_*`)\* +- GUI (`roles/gui_*`) +- SMB (`roles/smb_*`) +- NFS (`roles/nfs_*`) +- Object (`roles/obj_*`) +- HDFS (`roles/hdfs_*`) +- Call Home (`roles/callhome_*`) +- File Audit Logging (`roles/fal_*`) +- ... Note that [Core GPFS](roles/core) is the only mandatory role, all other roles are optional. Each of the optional roles requires additional configuration variables. Browse the examples in the [samples/](samples/) directory to learn how to: - Configure Graphical User Interface (GUI) (see [samples/playbook_gui.yml](samples/playbook_gui.yml)) - Configure Protocol Services (SMB & NFS) (see [samples/playbook_ces.yml](samples/playbook_ces.yml)) - Configure Protocol Services (HDFS) (see [samples/playbook_ces_hdfs.yml](samples/playbook_ces_hdfs.yml)) -- Configure Protocol Services (OBJECT) (see [samples/playbook_ces_object.yml](samples/playbook_ces_object.yml)) +- Configure Protocol Services (Object) (see [samples/playbook_ces_object.yml](samples/playbook_ces_object.yml)) - Configure Call Home (see [samples/playbook_callhome.yml](samples/playbook_callhome.yml)) - Configure File Audit Logging (see [samples/playbook_fileauditlogging.yml](samples/playbook_fileauditlogging.yml)) -- Configure cluster with daemon and admin network (see samples/daemon_admin_network) +- Configure cluster with daemon and admin network (see [samples/daemon_admin_network](samples/daemon_admin_network)) +- Configure remotely mounted filesystems (see [samples/playbook_remote_mount.yml](samples/playbook_remote_mount.yml)) Cluster Membership ------------------ -All hosts in the play are configured as nodes in the same Spectrum Scale cluster. If you want to add hosts to an existing cluster then add at least one node from that existing cluster to the play. +All hosts in the play are configured as nodes in the same IBM Storage Scale cluster. If you want to add hosts to an existing cluster then add at least one node from that existing cluster to the play. -You can create multiple clusters by running multiple plays. +You can create multiple clusters by running multiple plays. Note that you will need to [reload the inventory](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/meta_module.html) to clear dynamic groups added by the IBM Storage Scale roles: +```yaml +- name: Create one cluster + hosts: cluster01 + roles: ... + +- name: Refresh inventory to clear dynamic groups + hosts: localhost + connection: local + gather_facts: false + tasks: + - meta: refresh_inventory + +- name: Create another cluster + hosts: cluster02 + roles: ... +``` Limitations ----------- -The roles in this project can (currently) be used to create new clusters or extend existing clusters. Similarly, new file systems can be created or extended. But this role does *not* remove existing nodes, disks, file systems or node classes. This is done on purpose — and this is also the reason why it can not be used, for example, to change the file system pool of a disk. Changing the pool requires you to remove and then re-add the disk from a file system, which is not currently in the scope of this role. - -Furthermore, upgrades are not currently in scope of this role. Spectrum Scale supports rolling online upgrades (by taking down one node at a time), but this requires careful planning and monitoring and might require manual intervention in case of unforeseen problems. +The roles in this project can (currently) be used to create new clusters or extend existing clusters. Similarly, new file systems can be created or extended. But this project does _not_ remove existing nodes, disks, file systems or node classes. This is done on purpose — and this is also the reason why it can not be used, for example, to change the file system pool of a disk. Changing the pool requires you to remove and then re-add the disk from a file system, which is not currently in the scope of this project. +Furthermore, upgrades are not currently in scope of this role. IBM Storage Scale supports rolling online upgrades (by taking down one node at a time), but this requires careful planning and monitoring and might require manual intervention in case of unforeseen problems. Troubleshooting --------------- -The roles in this project store configuration files in `/var/mmfs/tmp` on the first host in the play. These configuration files are kept to determine if definitions have changed since the previous run, and to decide if it's necessary to run certain Spectrum Scale commands (again). When experiencing problems one can simply delete these configuration files from `/var/mmfs/tmp` in order to clear the cache — doing so forces re-application of all definitions upon the next run. As a downside, the next run may take longer than expected as it might re-run unnecessary Spectrum Scale commands. This will automatically re-generate the cache. - +The roles in this project store configuration files in `/var/mmfs/tmp` on the first host in the play. These configuration files are kept to determine if definitions have changed since the previous run, and to decide if it's necessary to run certain IBM Storage Scale commands (again). When experiencing problems one can simply delete these configuration files from `/var/mmfs/tmp` in order to clear the cache — doing so forces re-application of all definitions upon the next run. As a downside, the next run may take longer than expected as it might re-run unnecessary IBM Storage Scale commands. This will automatically re-generate the cache. Reporting Issues and Feedback ----------------------------- Please use the [issue tracker](https://github.com/IBM/ibm-spectrum-scale-install-infra/issues) to ask questions, report bugs and request features. - Contributing Code ----------------- We welcome contributions to this project, see [CONTRIBUTING.md](CONTRIBUTING.md) for more details. - Disclaimer ---------- -Please note: all playbooks / modules / resources in this repo are released for use "AS IS" without any warranties of any kind, including, but not limited to their installation, use, or performance. We are not responsible for any damage or charges or data loss incurred with their use. You are responsible for reviewing and testing any scripts you run thoroughly before use in any production environment. This content is subject to change without notice. - +Please note: all roles / playbooks / modules / resources in this repository are released for use "AS IS" without any warranties of any kind, including, but not limited to their installation, use, or performance. We are not responsible for any damage or charges or data loss incurred with their use. You are responsible for reviewing and testing any scripts you run thoroughly before use in any production environment. This content is subject to change without notice. Copyright and License --------------------- -Copyright IBM Corporation 2020, released under the terms of the [Apache License 2.0](LICENSE). +Copyright IBM Corporation, released under the terms of the [Apache License 2.0](LICENSE). diff --git a/VARIABLES.md b/VARIABLES.md index 708edaeb..5276cfc7 100644 --- a/VARIABLES.md +++ b/VARIABLES.md @@ -1,115 +1,140 @@ -Variables used by Spectrum Scale (GPFS) Ansible project -======================================================= - -- `scale_architecture` - - example: `x86_64` - - default: `{{ ansible_architecture }}` - - Specify the Spectrum Scale architecture that you want to install on your nodes. - -- `scale_daemon_nodename` - - example: `scale01` - - dafault: `{{ ansible_hostname }}` - - Spectrum Scale daemon nodename (defaults to node's hostname). - -- `scale_admin_nodename` - - example: `scale01` - - dafault: `{{ scale_daemon_nodename }}` - - Spectrum Scale admin nodename (defaults to node's hostname). - - -- `scale_state` - - example: `maintenance` - - default: `present` - - Desired state of the Spectrum Scale node. Can be `present`, `maintenance` or `absent`: - - `present` - node will be added to cluster, daemon will be started - - `maintenance` - node will be added to cluster, daemon will not be started - - `absent` - node will be removed from cluster - -- `scale_prepare_disable_selinux` - - example: `true` - - default: `false` - - Whether or not to disable SELinux. - -- `scale_reboot_automatic` - - example: `true` - - default: `false` - - Whether or not to automatically reboot nodes - if set to `false` then only a message is printed. If set to `true` then nodes are automatically rebooted (dangerous!). - -- `scale_prepare_enable_ssh_login` - - example: `true` - - default: `false` - - Whether or not enable SSH root login (PermitRootLogin) and public key authentication (PubkeyAuthentication). - -- `scale_prepare_restrict_ssh_address` - - example: `true` - - default: `false` - - Whether or not to restrict SSH access to the admin nodename (ListenAddress). Requires `scale_prepare_enable_ssh_login` to be enabled, too. - -- `scale_prepare_disable_ssh_hostkeycheck` - - example: `true` - - default: `false` - - Whether or not to disable SSH hostkey checking (StrictHostKeyChecking). - -- `scale_prepare_exchange_keys` - - example: `true` - - default: `false` - - Whether or not to exchange SSH keys between all nodes. - -- `scale_prepare_pubkey_path` - - example: `/root/.ssh/gpfskey.pub` - - default: `/root/.ssh/id_rsa.pub` - - Path to public SSH key - will be generated (if it does not exist) and exchanged between nodes. Requires `scale_prepare_exchange_keys` to be enabled, too. - -- `scale_prepare_disable_firewall` - - example: `true` - - default: `false` - - Whether or not to disable Linux firewalld - if you need to keep firewalld active then change this variable to `false` and apply your custom firewall rules prior to running this role (e.g. as pre_tasks). - -- `scale_install_localpkg_path` - - example: `/root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install` - - default: none - - Specify the path to the self-extracting Spectrum Scale installation archive on the local system (accessible on Ansible control machine) - it will be copied to your nodes. - -- `scale_install_remotepkg_path` - - example: `/root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install` - - default: none - - Specify the path to Spectrum Scale installation package on the remote system (accessible on Ansible managed node). - -- `scale_install_repository_url` - - example: `http://server/gpfs/` - - default: none - - Specify the URL of the (existing) Spectrum Scale YUM repository (copy the contents of /usr/lpp/mmfs/{{ scale_version }}/ to a web server in order to build your repository). - - Note that if this is a URL then a new repository definition will be created. If this variable is set to `existing` then it is assumed that a repository definition already exists and thus will *not* be created. - -- `scale_install_directory_pkg_path` - - example: `/tmp/gpfs/` - - default: none - - Specify the path to the user-provided directory, containing all Spectrum Scale packages. Note that for this installation method all packages need to be kept in a single directory. - -- `scale_version` - - example: `5.0.4.0` - - default: none - - Specify the Spectrum Scale version that you want to install on your nodes. It is mandatory to define this variable for the following installation methods: - - Repository installation method (`scale_install_repository_url`) - - Local archive installation method (`scale_install_localpkg_path`) - - Remote archive installation method (`scale_install_remotepkg_path`) - - The variable is *not* necessary for the directory installation method (`scale_install_directory`), as with this method the version is automatically detected from the installation package at the given path. +Variables used by IBM Storage Scale (GPFS) Ansible project +========================================================== + +Variables list is dived into each of the Ansible roles. + +Role: Core - Core IBM Storage Scale installation and configuration +------------------------------------------------------------------ + +| Variables | Default | Options | User Mandatory | Descriptions | +| -------------------------------------- | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| scale_architecture: | {{ansible_architecture}} | x86_64 or ppc64le | no | This ansible_architecture is gather from ansible get_facts module, IBM Storage Scale architecture that you want to install on your nodes. | +| scale_version: | none | 5.x.x.x | yes | Specify the IBM Storage Scale version that you want to install on your nodes. With 5.0.5.x. | +| scale_daemon_nodename: | {{ansible_hostname}} | none | no | IBM Storage Scale daemon nodename defaults to nodes hostname | +| scale_admin_nodename: | {{ansible_hostname}} | none | no | IBM Storage Scale admin nodename defaults to nodes hostname | +| scale_state: | present | present,maintenance,absent | no | Desired state of the IBM Storage Scale node. present - node will be added to cluster, daemon will be started maintenance
node will be added to cluster, daemon will not be started absent - node will be removed from cluster | +| scale_prepare_disable_selinux | false | true or false | no | Whether or not to disable SELinux. | +| scale_reboot_automatic | false | true or false | no | Whether or not to automatically reboot nodes - if set to false then only a message is printed. If set to true then nodes are automatically rebooted (dangerous!). | +| scale_prepare_enable_ssh_login | false | true or false | no | Whether or not enable SSH root login (PermitRootLogin) and public key authentication (PubkeyAuthentication). | +| scale_prepare_restrict_ssh_address | false | true or false | no | Whether or not to restrict SSH access to the admin nodename (ListenAddress). Requires scale_prepare_enable_ssh_login to be enabled. | +| scale_prepare_disable_ssh_hostkeycheck | false | true or false | no | Whether or not to disable SSH hostkey checking (StrictHostKeyChecking). | +| scale_prepare_exchange_keys | false | true or false | no | Path to public SSH key - will be generated (if it does not exist) and exchanged between nodes. Requires scale_prepare_exchange_keys to be enabled, too. | +| scale_prepare_pubkey_path | /root/.ssh/id_rsa.pub | /root/.ssh/gpfskey.pub | no | example: /root/.ssh/gpfskey.pub | +| scale_prepare_disable_firewall | default: false | true or false | no | Whether or not to disable Linux firewalld - if you need to keep firewalld active then change this variable to false and apply your custom firewall rules prior to running this role (e.g. as pre_tasks). | +| scale_install_localpkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to the self-extracting IBM Storage Scale installation archive on the local system (accessible on Ansible control machine) - it will be copied to your nodes. | +| scale_install_remotepkg_path | none | /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install | yes | Specify the path to IBM Storage Scale installation package on the remote system (accessible on Ansible managed node). | +| scale_install_repository_url | none | example: http://server/gpfs/ | yes | Specify the URL of the (existing) IBM Storage Scale YUM repository (copy the contents of /usr/lpp/mmfs/{{ scale_version }}/ to a web server in order to build your repository).
Note that if this is a URL then a new repository definition will be created. If this variable is set to existing then it is assumed that a repository definition already exists and thus will _not_ be created. | +| scale_install_directory_pkg_path | none | example: /tmp/gpfs/ | yes | Specify the path to the user-provided directory, containing all IBM Storage Scale packages. Note that for this installation method all packages need to be kept in a single directory. | +| scale_cluster_quorum | false | true or false | no | If you dont specify any quorum nodes then the first seven hosts in your inventory will automatically be assigned the quorum role. even if this variable is false | +| scale_cluster_manager | false | true or false | no | Nodes default manager role - you ll likely want to define per-node roles in your inventory | +| scale_cluster_profile_name: | none | gpfsprotocoldefaults or gpfsprotocolrandomio | no | Specifies a predefined profile of attributes to be applied. System-defined profiles are located in /usr/lpp/mmfs/profiles/
The following system-defined profile names are accepted. gpfsprotocoldefaults and gpfsprotocolrandomio
eg. If you want to apply gpfsprotocoldefaults then specify scale_cluster_profile_name: gpfsprotocoldefaults | +| scale_cluster_profile_dir_path | /usr/lpp/mmfs/profiles/ | Path to cluster profile: example: /usr/lpp/mmfs/profiles/ | no | Fixed variable related to mmcrcluster profile. System-defined profiles are located in /usr/lpp/mmfs/profiles/ | +| scale_enable_gpg_check: | true | true or false | no | Enable/disable gpg key flag | +| scale_install_localpkg_tmpdir_path | /tmp | path to folder. | no | Temporary directory to copy installation package to (local package installation method) | +| scale_nodeclass: | none | Name of the nodeclass: example scale nodeclass: - class1 | no | Node classes can be defined on a per-node basis by defining the scale_nodeclass variable. | +| scale_config: | none | scale_config:
  - nodeclass: class1
    params:
        - pagepool: 4G
        - autoload: yes
        - ignorePrefetchLunCount: yes | no | Configuration attributes can be defined as variables for _any_ host in the play
The host for which you define the configuration attribute is irrelevant. Refer to the man mmchconfig man page for a list of available configuration attributes. | +| scale_storage: | none | scale_storage:
     filesystem: gpfs01
     blockSize: 4M
     maxMetadataReplicas: 2
     defaultMetadataReplicas: 2
     maxDataReplicas: 2
     defaultDataReplicas: 2
     numNodes: 16
     automaticMountOption: true
     defaultMountPoint: /mnt/gpfs01
     disks:
          - device: /dev/sdb
            nsd: nsd_1
            servers: scale01
            failureGroup: 10
            usage: metadataOnly
            pool: system
          - device: /dev/sdc
            nsd: nsd_2
            servers: scale01
            failureGroup: 10
            usage: dataOnly
            pool: data | no | Refer to man mmchfs and man mmchnsd man pages for a description of these storage parameters.
The filesystem parameter is mandatory, servers, and the device parameter is mandatory for each of the file systems disks.
All other file system and disk parameters are optional. scale*storage \_must* be define using group variables.
Do _not_ define disk parameters using host variables or inline variables in your playbook.
Doing so would apply them to all hosts in the group/play, thus defining the same disk multiple times... | +| scale_admin_node | false | true or false | no | Set admin flag on node for Ansible to use. | +| scale_nsd_server | scale_nsd_server | true or false | no | Set nsd flag for installation purpose | + +Role: GUI - GUI for Management of IBM Storage Scale Cluster +----------------------------------------------------------- + +| Variables | Default | Options | User Mandatory | Descriptions | +| --------------------------------- | ------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| scale_gui_hide_tip_callhome | false | true or false | no | Hide the Callhome not enabled tip on gui | +| scale_cluster_gui: | false | true or false | no | Install IBM Storage Scale GUI on nodes, set by host variables. | +| scale_service_gui_start: | true | true or false | no | Wheter or not to start the Scale GUI after installation. | +| scale_gui_admin_user: | none | admin | no | Spesify a name for the admin user to be created. | +| scale_gui_admin_password: | none | Admin@GUI! | no | Password to be set on the admin user | +| scale_gui_admin_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | +| scale_gui_user_username: | none | SEC | no | Ekstra IBM Storage Scale GUI user. example: Monitor or RestAPI. | +| scale_gui_user_password: | none | Storage@Scale1 | no | Password for extra user | +| scale_gui_user_role: | none | SystemAdmin | no | Role access for the extra user. | +| scale_gui_admin_hc_vault: | none | N/A | no | HasiCorp - Create local Admin user with password from vault, cant be combined with the scale_gui_admin_user | +| scale_gui_admin_hc_vault_user: | noen | admin | no | Create local admin user and write password to Vault | +| scale_gui_admin_hc_vault_role: | none | SecurityAdmin,SystemAdmin | no | Role access for the admin user, check IBM doc for valid roles. | +| scale_gui_cert_hc_vault: | false | true or false | no | Generate https Certificate from HasiCorp Vault and import it to Scale GUI.
The Scale host need to be included in HC Vault and the Ansible playbook need to have the computed.name variables, normally the playbook is then run from Terraform. | +| scale_gui_password_policy_change: | false | true or false | no | Change default GUI User Password Policy change what you need in your inventory files and rest wil use default, used with **scale_gui_password_policy:** | | +| scale_gui_password_policy: | false | scale_gui_password_policy:
  minLength: 6
  maxAge: 900
  minAge: 0
  remember: 3
  minUpperChars: 0
  minLowerChars: 0
  minSpecialChars: 0
  minDigits: 0
  maxRepeat: 0
  minDiff: 1
  rejectOrAllowUserName: --rejectUserName | | Change Default GUI User Password Policy Change what you need in your inventory files and rest wil use default.
  
 scale_gui_password_policy:
  minLength: 6 ## Minimum password length
  maxAge: 900 ## Maximum password age
  minAge: 0 ## Minimum password age
  remember: 3 ## Remember old passwords
  minUpperChars: 0 ## Minimum upper case characters
  minLowerChars: 0 ## Minimum lower case characters
  minSpecialChars: 0 ## Minimum special case characters
  minDigits: 0 ## Minimum digits
  maxRepeat: 0 ## Maximum number of repeat characters
  minDiff: 1 ## Minimum different characters with respect to old password
  rejectOrAllowUserName: --rejectUserName ## either --rejectUserName or --allowUserName | +| scale_gui_ldap_integration: | false | true or false | no | Active Directory information for Managing GUI users in an external AD or LDAP server | +| scale_gui_ldap: | none | scale_gui_ldap:
  name: 'myad'
  host: 'myad.mydomain.local'
  bindDn: 'CN=servicebind,CN=Users,DC=mydomain,DC=local'
  bindPassword: 'password'
  baseDn: 'CN=Users,DC=mydomain,DC=local'
  port: '389' #Default 389
  type: 'ad' #Default Microsoft Active Directory
  #securekeystore: /tmp/ad.jks #Local on GUI Node
  #secureport: '636' #Default 636 | no | Managing GUI users in an external AD or LDAP Parameters
 
 Parameter Description
  - **name:** Alias for your LDAP/AD server
  - **host:** The IP address or host name of the LDAP server.
  - **baseDn:** BasedDn string for the repository.
  - **bindDn:** BindDn string for the authentication user.
  - **bindPassword:** Password of the authentication user.
  - **port:** Port number of the LDAP. Default is 389
  - **type:** Repository type (ad, ids, domino, secureway, iplanet, netscape, edirectory or custom). Default is ad.
  - **securekeystore:** Location with file name of the keystore file (.jks, .p12 or .pfx).
  - **secureport:** Port number of the LDAP. 636 over SSL. | +| scale_gui_groups: | none | scale_gui_groups:
  administrator: 'scale-admin'
  securityadmin: 'scale-securityadmin'
  storageadmin: 'scale-storage-administrator'
  snapadmin: 'scale-snapshot-administrator'
  data_access: 'scale-data-access'
  monitor: 'scale-monitor'
  protocoladmin: 'scale-protocoladmin'
  useradmin: 'scale-useradmin' | no | The LDAP/AD Groups needs to be create in the LDAP. (You don't need created before deployment.)
You'll likely want to define this in your host inventory
Add the mappings that you want and replace the **scale-** with your ldap groups.

The following are the default user groups:
  - **Administrator** - Manages all functions on the system except those deals with managing users, user groups, and authentication.
  - **SecurityAdmin** - Manages all functions on the system, including managing users, user groups, and user authentication.
  - **SystemAdmin** - Manages clusters, nodes, alert logs, and authentication.
  - **StorageAdmin** - Manages disks, file systems, pools, filesets, and ILM policies.
  - **SnapAdmin** - Manages snapshots for file systems and filesets.
  - **DataAccess** - Controls access to data. For example, managing access control lists.
  - **Monitor** - Monitors objects and system configuration but cannot configure, modify, or manage the system or its resources.
  - **ProtocolAdmin** - Manages object storage and data export definitions of SMB and NFS protocols.
  - **UserAdmin** - Manages access for GUI users. Users who are part of this group have edit permissions only in the Access pages of the GUI. Check IBM doc for updated list | +| scale_gui_email_notification: | false | false or true | no | Enable E-mail notifications in IBM Storage Scale GUI | +| scale_gui_email: | none | scale_gui_email:
  name: 'SMTP_1'
  ipaddress: 'emailserverhost'
  ipport: '25'
  replay_email_address: scale-server-test@acme.com
  contact_name: 'scale-contact-person'
  subject: &cluster&message
  sender_login_id:
  password:
  headertext:
  footertext: | no | - The email feature transmits operational and error-related data in the form of an event notification email.
  - Email notifications can be customized by setting a custom header and footer for the emails and customizing the subject by selecting and combining from the following variables:
    &message, &messageId, &severity, &dateAndTime, &cluster and &component.
  
  - **name** - Specifies a name for the e-mail server.
  - **address** - Specifies the address of the e-mail server. Enter the SMTP server IP address or host name. For example, 10.45.45.12 or smtp.example.com.
  - **portNumber** - Specifies the port number of the e-mail server. Optional.
  - **reply_email_address/sender_address** - Specifies the sender's email address.
  - **contact_name/sender_name** - Specifies the sender's name.
  - **subject** Notifications can be customized by setting a custom header and footer or with variable like &cluster&message ## Variables: &message &messageId &severity &dateAndTime &cluster&component
  - **sender_login_id** - Login needed to authenticate sender with email server in case the login is different from the sender address (--reply). Optional.
  - **password** - Password used to authenticate sender address (--reply) or login id (--login) with the email sever | +| scale_gui_email_recipients: | none | scale_gui_email_recipients:
  name: 'name_email_recipient_name':
  address: 'email_recipient_address@email.com':
  components_security_level: 'SCALEMGMT=WARNING,CESNETWORK=WARNING':
  reports: 'DISK,GPFS,AUTH':
  quotaNotification: '--quotaNotification' ##if defined it enabled quota Notification:
  quotathreshold: '70.0' | no | **Options:**
  - **NAME**: Name of the email Recipients
  - **Address:** userAddress Specifies the address of the e-mail user
  - **Components_security_level**
     - The value scale_gui_email_recipients_components_security_level: Need to contain the **Component** and the **Warning/Security Level**
       - Chose component like **SCALEMGMT** and the security_level of WARNING wil be **SCALEMGMT=ERROR**
       - Security level: Chose the lowest severity of an event for which you want to receive and email. Example, selectin Tip includes events with severity Tip, Warning, and Error in the email.
       - The Severity level is as follows: : **INFO**, **TIP**, **WARNING**, **ERROR**
     **List of all security levels:**
      AFM=WARNING,AUTH=WARNING,BLOCK=WARNING,CESNETWORK=WARNING,CLOUDGATEWAY=WARNING,CLUSTERSTATE=WARNING,DISK=WARNING,FILEAUDITLOG=WARNING,
      FILESYSTEM=WARNING,GPFS=WARNING,GUI=WARNING,HADOOPCONNECTOR=WARNING,KEYSTONE=WARNING,MSGQUEUE=WARNING,NETWORK=WARNING,NFS=WARNING,
      OBJECT=WARNING,PERFMON=WARNING,SCALEMGMT=WARNING,SMB=WARNING,CUSTOM=WARNING,AUTH_OBJ=WARNING,CES=WARNING,CESIP=WARNING,NODE=WARNING,
      THRESHOLD=WARNING,WATCHFOLDER=WARNING,NVME=WARNING,POWERHW=WARNING
  - **Reports** listOfComponents
       - Specifies the components to be reported. The tasks generating reports are scheduled by default to send a report once per day. Optional.
       AFM,AUTH,BLOCK,CESNETWORK,CLOUDGATEWAY,CLUSTERSTATE,DISK,FILEAUDITLOG,FILESYSTEM,GPFS,GUI,HADOOPCONNECTOR,
       KEYSTONE,MSGQUEUE,NETWORK,NFS,OBJECT,PERFMON,SCALEMGMT,SMB,CUSTOM,AUTH_OBJ,CES,CESIP,NODE,THRESHOLD,WATCHFOLDER,NVME,POWERHW
  - **quotaNotification**
     Enables quota notifications which are sent out if the specified threshold is violated. (See --quotathreshold)
  - **quotathreshold** valueInPercent
     - Sets the threshold(percent of the hard limit)for including quota violations in the quota digest report.
     - The default value is 100. The values -3, -2, -1, and zero have special meaning.
     - Specify the value -2 to include all results, even entries where the hard quota not set.
     - Specify the value -1 to include all entries where hard quota is set and current usage is greater than or equal to the soft quota.
     - Specify the value -3 to include all entries where hard quota is not set and current usage is greater than or equal to the soft quota only.
     - Specify the value 0 to include all entries where the hard quota is set.
  Using unlisted options can lead to an error | +| scale_gui_snmp_notification: | false | true or false | no | Enable SNMP notifications in IBM Storage Scale GUI | +| scale_gui_snmp_server: | false | scale_gui_snmp_server:
  ip_adress: 'snmp_server_host'
  ip_port: '162'
  community: 'Public' | no | - To Configure SNMP Notification.
    - Change the Value:
      - scale_gui_snmp_notification: true
       - ip_adress to your SNMP server/host
       - ip_port to your SNMP port
       - community to your SNMP community | + +Role: NFS,SMB,OBJ - Protocol +---------------------------- + +| variables | Default | Options | User Mandatory | Descriptions | +| ------------------------ | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| scale_install_debuginfo: | true | true or false | no | Flag to install ganesha/nfs debug package | +| scale_install_debuginfo: | true | true or false | no | Flag to install smb debug package | +| scale_protocol_node: | none | true or false | no | Enable to set node to uses as Protcol Node, by host variable. | +| scale_protocols: #IPV4 | none | scale_protocols: #IPV4
  smb: true
  nfs: true
  object: true
  export_ip_pool: [192.168.100.100,192.168.100.101]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | To install IBM Storage Scale Protocol. Refer to man mmces man pages for a description of these Cluster Export. scale_ces_groups can also be user to group nodes. | +| scale_protocols: #Ipv6 | none | scale_protocols: #Ipv6
  smb: true
  nfs: true
  object: true
  interface: [eth0]
  export_ip_pool: [2002:90b:e006:84:250:56ff:feb9:7787]
  filesystem: cesSharedRoot
  mountpoint: /gpfs/cesSharedRoot | no | For enabling Cluster Export Services in an IPv6 environment one also needs to define an interface parameter: scale_ces_groups can also be user to group nodes. | +| scale_ces_obj: | none | scale_ces_obj:
  dynamic_url: False
  enable_s3: False
  local_keystone: True
  enable_file_access: False
  endpoint_hostname: scale-11
  object_fileset: Object_Fileset
  pwd_file: obj_passwd.j2
  admin_user: admin
  admin_pwd: admin001
  database_pwd: admin001 | no | Missing descriptions. | + +Role: HDFS - Hadoop +------------------- + +| variables | Default | Options | User Mandatory | Descriptions | +| -------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | ------------------------------------------------- | +| ha_enabled: | false | true or false | no | HA for namenode in HDFS? | +| scale_hdfs_clusters: | none | - name: mycluster
  filesystem: gpfs1
  namenodes: ['host-vm1.test.net', 'host-vm2.test.net']
  datanodes: ['host-vm3.test.net', 'host-vm4.test.net', 'host-vm5.test.net']
  datadir: datadir | no | Install IBM Storage Scale (HDFS), "Document more" | + +Role: zimon - Performance Monitoring +------------------------------------ + +| variables | Default | Options | User Mandatory | Descriptions | +| ---------------------- | ------- | ------------- | -------------- | ----------------------------------------------------------------------------- | +| scale_zimon_collector: | false | true or false | no | Nodes default GUI collector role, its install the collector on all GUI nodes. | +| scale_cluster_gui | false | true or false | no | Install IBM Storage Scale GUI on nodes, set by host variables. | +| scale_cluster_zimon | false | true or false | no | Install up zimon enabled | + +Role: FAL - File Audit Logging +------------------------------ + +| variables | Default | Options | User Mandatory | Descriptions | +| ---------------- | ------- | ------------- | -------------- | ------------------------------- | +| scale_fal_enable | true | true or false | no | Flag to enable fileauditlogging | + +Role: callhome - Call Home + +| variables | Default | Options | User Mandatory | Descriptions | +| ---------------------- | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| scale_callhome_params: | None | scale_callhome_params:
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly]
    is_enabled: true
    customer_name: abc
    customer_email: abc@abc.com
    customer_id: 12345
    customer_country: IN
    proxy_ip:
    proxy_port:
    proxy_user:
    proxy_password:
    proxy_location:
    callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
    callhome_group1: [scale01,scale02,scale03,scale04]
    callhome_schedule: [daily,weekly] | no | Refer to man mmcallhome man pages for a description of these Call Homes
**scale_callhome_params**:
  is_enabled: true
  customer_name: abc
  customer_email: abc@abc.com
  customer_id: 12345
  customer_country: IN
  proxy_ip:
  proxy_port:
  proxy_user:
  proxy_password:
  proxy_location:
  callhome_server: scale01. ## server that have callhome installed on and can reach out to IBM
  callhome_group1: [scale01,scale02,scale03,scale04]
  callhome_schedule: [daily,weekly] | + +Role: remotemount_configure - Enabled and Configure Remote Mounting of Filesystem +--------------------------------------------------------------------------------- + +| variables | Default | Options | User Mandatory | Descriptions | +| ----------------------------------------------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| scale_remotemount_client_gui_username: | none | username, example admin | yes | Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_client_gui_password: | none | password for user | yes | Password for Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_client_gui_hostname: | none | 10.10.10.1 | yes | IP or Hostname to Client GUI Node | +| scale_remotemount_storage_gui_username: | none | | yes | Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_storage_gui_password: | none | | yes | Password for Scale User with Administrator or ContainerOperator role/rights | +| scale_remotemount_storage_gui_hostname: | none | | yes | IP or Hostname to Storage GUI Node | +| scale_remotemount_storage_adminnodename: | false | true or false | no | IBM Storage Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same.
   In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true | +| scale_remotemount_filesystem_name | none | scale_remotemount_filesystem_name
  scale_remotemount_client_filesystem_name:
  scale_remotemount_client_remotemount_path:
  scale_remotemount_storage_filesystem_name:
  scale_remotemount_access_mount_attributes:
  scale_remotemount_client_mount_fs:
  scale_remotemount_client_mount_priority: 0 | yes | The variables in the list needs to be in a list, as we now support mounting up more filesystem.

  - Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names.
  - Path to where the filesystem shoul be Mounted: /gpfs01/fs1
  - Storage Cluster filesystem you want to mount: gpfs01
  - Filesystem can be mounted in different access mount: RW or RO
  - Indicates when the file system is to be mounted: options are yes, no, automount (When the file system is first accessed.)
  - File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last.
   A value of zero indicates no priority. valid values: 0 - x | +| scale_remotemount_client_filesystem_name: | none | fs1 | yes | Local Filesystem Name of the remote mounted filesystem, So the storage cluster and remote cluster can have different names. | +| scale_remotemount_client_remotemount_path: | none | /gpfs01/fs1 | yes | Path to where the filesystem shoul be Mounted. | +| scale_remotemount_storage_filesystem_name: | none | gpfs01 | yes | Storage Cluster filesystem you want to mount | +| scale_remotemount_access_mount_attributes: | rw | RW, RO | no | Filesystem can be mounted in different access mount: RW or RO | +| scale_remotemount_client_mount_fs: | yes | yes, no, automount | no | Indicates when the file system is to be mounted:\*\* options are yes, no, automount (When the file system is first accessed.) | +| scale_remotemount_client_mount_priority: 0 | 0 | 0 - x | no | File systems with higher Priority numbers are mounted after file systems with lower numbers. File systems that do not have mount priorities are mounted last. A value of zero indicates no priority. | +| scale_remotemount_client_no_gui: | false | true or false | no | If Accessing/Client Cluster dont have GUI, it will use CLI/SSH against Client Cluster | +| scale_remotemount_storage_pub_key_location: | /tmp/storage_cluster_public_key.pub | path to pubkey | no | Client Cluster (Access) is downloading the pubkey from Owning cluster and importing it | +| scale_remotemount_cleanup_remote_mount: | false | true or false | no | Unmounts, remove the filesystem, and the connection between Accessing/Client cluster and Owner/Storage Cluster. This now works on clusters that not have GUI/RESTAPI interface on Client Cluster | +| scale_remotemount_debug: | false | true or false | no | Outputs debug information after tasks | +| scale_remotemount_forceRun: | false | true or false | no | If scale_remotemount_forceRun is passed in, then the playbook is attempting to run remote_mount role regardless of whether the filesystem is configured | +| scale_remotemount_storage_pub_key_location: | /tmp/storage_cluster_public_key.pub | path to | no | Client Cluster (Access) pubkey that is changed from json to right format and then used when creating connection | +| scale_remotemount_storage_pub_key_location_json | /tmp/storage_cluster_public_key_json.pub | path to | no | Client Cluster (Access) is downloading the pubkey as JSON from Owning cluster | +| scale_remotemount_storage_pub_key_delete | true | true or false | no | Delete both temporary pubkey after the connection have been established | +| scale_remotemount_remotecluster_chipers: | AUTHONLY | AES128-SHA
AES256-SHA
AUTHONLY | no | Sets the security mode for communications between the current cluster and the remote cluster
Encryption can have performance effect and increased CPU usage
run **mmauth show ciphers** to check supported ciphers | +| scale_remotemount_storage_pub_key_delete | true | true or false | no | Delete both temporary pubkey after the connection have been established | +| scale_remotemount_validate_certs_uri: | no | no | no | If Ansible URI module should validate https certificate for IBM Storage Scale RestAPI interface. | diff --git a/roles/callhome/README.md b/docs/README.CALLHOME.md old mode 100755 new mode 100644 similarity index 100% rename from roles/callhome/README.md rename to docs/README.CALLHOME.md diff --git a/roles/gui/README.md b/docs/README.GUI.md old mode 100755 new mode 100644 similarity index 100% rename from roles/gui/README.md rename to docs/README.GUI.md diff --git a/roles/scale_hdfs/README.md b/docs/README.HDFS.md similarity index 100% rename from roles/scale_hdfs/README.md rename to docs/README.HDFS.md diff --git a/roles/nfs/README.md b/docs/README.NFS.md similarity index 100% rename from roles/nfs/README.md rename to docs/README.NFS.md diff --git a/roles/scale_object/README.md b/docs/README.OBJ.md similarity index 100% rename from roles/scale_object/README.md rename to docs/README.OBJ.md diff --git a/roles/remote_mount/README.md b/docs/README.REMOTEMOUNT.md similarity index 62% rename from roles/remote_mount/README.md rename to docs/README.REMOTEMOUNT.md index 729d61a2..0cd7f628 100644 --- a/roles/remote_mount/README.md +++ b/docs/README.REMOTEMOUNT.md @@ -3,7 +3,7 @@ IBM Spectrum Scale (GPFS) Remote Cluster and Mount Role Role Definition ------------------------------- -- Role name: **remote_mount** +- Role name: **remotemount_configure** - Definition: - This role adds support for consumers of the playbook to remote mount a IBM Spectrum Scale filesystem from a Storage cluster. The roles leverage the Spectrum Scale REST API , meaning 5.0.5.2 or later versions of Scale contains the endpoints. @@ -15,12 +15,15 @@ Role Definition Features ----------------------------- -- Remote Mounts FS with API calls to Clusters Storage and Client -- Remote Mounts FS with API calls to Storage Clusters and CLI to Client/Accessing Cluster +- Remote Mounts FS with API calls to Cluster Storage and Client +- Remote Mounts FS with API calls to Storage Cluster and CLI to Client/Accessing Cluster - Cleanup Remote Mount from Client and Storage Servers - Remote Mount several filesystems in same ansible play. - Check's and add Remote Filesystems if not already there. -- Check if remote cluster is already defined. +- Check's if remote cluster is already defined. +- Added option for Security mode for communications between the current cluster and the remote cluster (Encryption) +- Mount filesystem on desired client cluster nodes. +- Option to specify either Deamon or Admin node name for cluster traffic. Limitation @@ -72,7 +75,46 @@ The following variables would need to be defined by the user, either as vars to - ``scale_remotemount_storage_pub_key_location_json:`` (Defaults to : "/tmp/storage_cluster_public_key_json.pub") **Client Cluster (Access) is downloading the pubkey as JSON from Owning cluster** - ``scale_remotemount_storage_pub_key_delete:`` (Default to: true) **delete both temporary pubkey after the connection have been established** -- ``scale_remotemount_storage_adminnodename: true `` (Default to: false) **Spectrum Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same. In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true** +- ``scale_remotemount_storage_adminnodename: `` (Default to: false) **Spectrum Scale uses the Deamon node name and the IP Attach to connect and run cluster traffic on. In most cases the admin network and deamon network is the same. In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable to true** + +- ``scale_remotemount_remotecluster_chipers: `` (Default to: AUTHONLY) **Sets the security mode for communications between the current cluster and the remote cluster Encyption can have performance effect and increased CPU usage** + - Run the follwing command to check the supported ciphers: mmauth show ciphers + + ```console + Supported ciphers for nistCompliance=SP800-131A: + AES128-SHA + AES128-SHA256 + AES256-SHA + AES256-SHA256 + ``` + +- ``scale_remotemount_gpfsdemon_check: true ``(Default to: true) **Checks that GPFS deamon is started on GUI node, it will check the first server in NodeClass GUI_MGMT_SERVERS, this is the same flag to check when trying to mount up filesystems on all nodes. Check can be disabled with changing the flag to false.** + +- ``scale_remotemount_client_mount_on_nodes: all``(Default to: all) **Default it will try to mount the filesystem on all client cluster (accessing) nodes, here you can replace this with a comma separated list of servers. example: scale1-test,scale2-test** + + +- ``scale_remotemount_storage_contactnodes_filter: '?fields=roles.gatewayNode%2Cnetwork.daemonNodeName&filter=roles.gatewayNode%3Dfalse' `` + - When adding the storage Cluster as a remotecluster in client cluster we need to specify what nodes should be used as contact node, and in normal cases **all** nodes would be fine. In case we have AFM Gateway nodes, or Cloud nodes TFCT, we want to use the RESTAPI filter to remove those nodes, so they are not used. + + - **Example**: + - Default is only list all servers that have (AFM) gatewayNode=false. ``scale_remotemount_storage_contactnodes_filter: '?fields=roles.gatewayNode%2Cnetwork.daemonNodeName&filter=roles.gatewayNode%3Dfalse'`` + - No AFM and CloudGateway: ``?fields=roles.gatewayNode%2Cnetwork.daemonNodeName%2Croles.cloudGatewayNode&filter=roles.gatewayNode%3Dfalse%2Croles.cloudGatewayNode%3Dfalse`` + - To create your own filter, go to the API Explorer on Spectrum Scale GUI. https://IP-TO-GUI-NODE/ibm/api/explorer/#!/Spectrum_Scale_REST_API_v2/nodesGetv2 + + Roles in version 5.1.1.3 + + ```json + "roles": { + "cesNode": false, + "cloudGatewayNode": false, + "cnfsNode": false, + "designation": "quorum", + "gatewayNode": false, + "managerNode": false, + "otherNodeRoles": "perfmonNode", + "quorumNode": true, + "snmpNode": false + ``` Example Playbook's ------------------------------- @@ -82,20 +124,21 @@ There is also example playbook's in samples folder. ### Playbook: Storage Cluster and Client Cluster have GUI You can use localhost, then all RestAPI call will occur over https to Storage and Client Cluster locally from where you run the Ansible playbook - - - hosts: localhost - vars: - scale_remotemount_client_gui_username: admin - scale_remotemount_client_gui_password: Admin@GUI - scale_remotemount_client_gui_hostname: 10.10.10.10 - scale_remotemount_storage_gui_username: admin - scale_remotemount_storage_gui_password: Admin@GUI - scale_remotemount_storage_gui_hostname: 10.10.10.20 - scale_remotemount_filesystem_name: - - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs2", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables - - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } - roles: - - remote_mount +```yaml +- hosts: localhost + vars: + scale_remotemount_client_gui_username: admin + scale_remotemount_client_gui_password: Admin@GUI + scale_remotemount_client_gui_hostname: 10.10.10.10 + scale_remotemount_storage_gui_username: admin + scale_remotemount_storage_gui_password: Admin@GUI + scale_remotemount_storage_gui_hostname: 10.10.10.20 + scale_remotemount_filesystem_name: + - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs2", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables + - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } + roles: + - remote_mount +``` ``ansible-playbook -i hosts remotmount.yml`` @@ -105,20 +148,20 @@ You can use localhost, then all RestAPI call will occur over https to Storage an Following example will connect up to the first host in your ansible host file, and then run the playbook and do API Call to Storage Cluster. So in this case the Client Cluster node needs access on https/443 to Storage Cluster GUI Node. - - - hosts: scale-client-cluster-node-1 - gather_facts: false - vars: - scale_remotemount_storage_gui_username: admin - scale_remotemount_storage_gui_password: Admin@GUI - scale_remotemount_storage_gui_hostname: 10.10.10.20 - scale_remotemount_client_no_gui: true - scale_remotemount_filesystem_name: - - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs2", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables - - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } - roles: - - remote_mount - +```yaml +- hosts: scale-client-cluster-node-1 + gather_facts: false + vars: + scale_remotemount_storage_gui_username: admin + scale_remotemount_storage_gui_password: Admin@GUI + scale_remotemount_storage_gui_hostname: 10.10.10.20 + scale_remotemount_client_no_gui: true + scale_remotemount_filesystem_name: + - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs2", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables + - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } + roles: + - remote_mount +``` Firewall recommendations for communication among cluster's -------- @@ -145,7 +188,7 @@ to set the tscCmdPortRange configuration variable: Troubleshooting ------------------------ -- If you get **401 - Unauthorized** - Check that your user is working with a Curl, and that is have the correct Role. +- If you get **401 - Unauthorized** - Check that your user is working with a Curl, and that the user have the correct Role/Permission. ``-k`` will use insecure. diff --git a/roles/smb/README.md b/docs/README.SMB.md similarity index 100% rename from roles/smb/README.md rename to docs/README.SMB.md diff --git a/docs/VARIABLES.md b/docs/VARIABLES.md new file mode 100644 index 00000000..708edaeb --- /dev/null +++ b/docs/VARIABLES.md @@ -0,0 +1,115 @@ +Variables used by Spectrum Scale (GPFS) Ansible project +======================================================= + +- `scale_architecture` + - example: `x86_64` + - default: `{{ ansible_architecture }}` + + Specify the Spectrum Scale architecture that you want to install on your nodes. + +- `scale_daemon_nodename` + - example: `scale01` + - dafault: `{{ ansible_hostname }}` + + Spectrum Scale daemon nodename (defaults to node's hostname). + +- `scale_admin_nodename` + - example: `scale01` + - dafault: `{{ scale_daemon_nodename }}` + + Spectrum Scale admin nodename (defaults to node's hostname). + + +- `scale_state` + - example: `maintenance` + - default: `present` + + Desired state of the Spectrum Scale node. Can be `present`, `maintenance` or `absent`: + - `present` - node will be added to cluster, daemon will be started + - `maintenance` - node will be added to cluster, daemon will not be started + - `absent` - node will be removed from cluster + +- `scale_prepare_disable_selinux` + - example: `true` + - default: `false` + + Whether or not to disable SELinux. + +- `scale_reboot_automatic` + - example: `true` + - default: `false` + + Whether or not to automatically reboot nodes - if set to `false` then only a message is printed. If set to `true` then nodes are automatically rebooted (dangerous!). + +- `scale_prepare_enable_ssh_login` + - example: `true` + - default: `false` + + Whether or not enable SSH root login (PermitRootLogin) and public key authentication (PubkeyAuthentication). + +- `scale_prepare_restrict_ssh_address` + - example: `true` + - default: `false` + + Whether or not to restrict SSH access to the admin nodename (ListenAddress). Requires `scale_prepare_enable_ssh_login` to be enabled, too. + +- `scale_prepare_disable_ssh_hostkeycheck` + - example: `true` + - default: `false` + + Whether or not to disable SSH hostkey checking (StrictHostKeyChecking). + +- `scale_prepare_exchange_keys` + - example: `true` + - default: `false` + + Whether or not to exchange SSH keys between all nodes. + +- `scale_prepare_pubkey_path` + - example: `/root/.ssh/gpfskey.pub` + - default: `/root/.ssh/id_rsa.pub` + + Path to public SSH key - will be generated (if it does not exist) and exchanged between nodes. Requires `scale_prepare_exchange_keys` to be enabled, too. + +- `scale_prepare_disable_firewall` + - example: `true` + - default: `false` + + Whether or not to disable Linux firewalld - if you need to keep firewalld active then change this variable to `false` and apply your custom firewall rules prior to running this role (e.g. as pre_tasks). + +- `scale_install_localpkg_path` + - example: `/root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install` + - default: none + + Specify the path to the self-extracting Spectrum Scale installation archive on the local system (accessible on Ansible control machine) - it will be copied to your nodes. + +- `scale_install_remotepkg_path` + - example: `/root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install` + - default: none + + Specify the path to Spectrum Scale installation package on the remote system (accessible on Ansible managed node). + +- `scale_install_repository_url` + - example: `http://server/gpfs/` + - default: none + + Specify the URL of the (existing) Spectrum Scale YUM repository (copy the contents of /usr/lpp/mmfs/{{ scale_version }}/ to a web server in order to build your repository). + + Note that if this is a URL then a new repository definition will be created. If this variable is set to `existing` then it is assumed that a repository definition already exists and thus will *not* be created. + +- `scale_install_directory_pkg_path` + - example: `/tmp/gpfs/` + - default: none + + Specify the path to the user-provided directory, containing all Spectrum Scale packages. Note that for this installation method all packages need to be kept in a single directory. + +- `scale_version` + - example: `5.0.4.0` + - default: none + + Specify the Spectrum Scale version that you want to install on your nodes. It is mandatory to define this variable for the following installation methods: + - Repository installation method (`scale_install_repository_url`) + - Local archive installation method (`scale_install_localpkg_path`) + - Remote archive installation method (`scale_install_remotepkg_path`) + + The variable is *not* necessary for the directory installation method (`scale_install_directory`), as with this method the version is automatically detected from the installation package at the given path. diff --git a/galaxy.yml b/galaxy.yml index 50128fed..66d4d537 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -58,7 +58,7 @@ dependencies: {} repository: https://github.com/IBM/ibm-spectrum-scale-install-infra # The URL to any online docs -documentation: https://github.com/IBM/ibm-spectrum-scale-install-infra +documentation: https://github.com/IBM/ibm-spectrum-scale-install-infra#readme # The URL to the homepage of the collection/project homepage: diff --git a/meta/runtime.yml b/meta/runtime.yml new file mode 100644 index 00000000..2ee3c9fa --- /dev/null +++ b/meta/runtime.yml @@ -0,0 +1,2 @@ +--- +requires_ansible: '>=2.9.10' diff --git a/migrate.sh b/migrate.sh new file mode 100755 index 00000000..2a5f3f6a --- /dev/null +++ b/migrate.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# Replaces old role names with new role names in any text file + +usage () { + echo "Usage: $0 filename" + exit 1 +} + +[ "$#" -eq 1 ] || usage +[ -r "$1" ] || usage + +cp ${1} ${1}.bak + +sed -i ' +s,callhome/cluster,callhome_configure,g +s,callhome/node,callhome_install,g +s,callhome/postcheck,callhome_verify,g +s,callhome/precheck,callhome_prepare,g +s,core/cluster,core_configure,g +s,core/common,core_common,g +s,core/node,core_install,g +s,core/postcheck,core_verify,g +s,core/precheck,core_prepare,g +s,core/upgrade,core_upgrade,g +s,gui/cluster,gui_configure,g +s,gui/node,gui_install,g +s,gui/postcheck,gui_verify,g +s,gui/precheck,gui_prepare,g +s,gui/upgrade,gui_upgrade,g +s,nfs/cluster,nfs_configure,g +s,nfs/common,ces_common,g +s,nfs/node,nfs_install,g +s,nfs/postcheck,nfs_verify,g +s,nfs/precheck,nfs_prepare,g +s,nfs/upgrade,nfs_upgrade,g +s,remote_mount/,remotemount_configure,g +s,scale_auth/upgrade,auth_upgrade,g +s,scale_ece/cluster,ece_configure,g +s,scale_ece/node,ece_install,g +s,scale_ece/precheck,ece_prepare,g +s,scale_ece/upgrade,ece_upgrade,g +s,scale_fileauditlogging/cluster,fal_configure,g +s,scale_fileauditlogging/node,fal_install,g +s,scale_fileauditlogging/postcheck,fal_verify,g +s,scale_fileauditlogging/precheck,fal_prepare,g +s,scale_fileauditlogging/upgrade,fal_upgrade,g +s,scale_hdfs/cluster,hdfs_configure,g +s,scale_hdfs/node,hdfs_install,g +s,scale_hdfs/postcheck,hdfs_verify,g +s,scale_hdfs/precheck,hdfs_prepare,g +s,scale_hdfs/upgrade,hdfs_upgrade,g +s,scale_hpt/node,afm_cos_install,g +s,scale_hpt/postcheck,afm_cos_verify,g +s,scale_hpt/precheck,afm_cos_prepare,g +s,scale_hpt/upgrade,afm_cos_upgrade,g +s,scale_object/cluster,obj_configure,g +s,scale_object/node,obj_install,g +s,scale_object/postcheck,obj_verify,g +s,scale_object/precheck,obj_prepare,g +s,scale_object/upgrade,obj_upgrade,g +s,smb/cluster,smb_configure,g +s,smb/node,smb_install,g +s,smb/postcheck,smb_verify,g +s,smb/precheck,smb_prepare,g +s,smb/upgrade,smb_upgrade,g +s,zimon/cluster,perfmon_configure,g +s,zimon/node,perfmon_install,g +s,zimon/postcheck,perfmon_verify,g +s,zimon/precheck,perfmon_prepare,g +s,zimon/upgrade,perfmon_upgrade,g +' $1 + +exit 0 diff --git a/roles/afm_cos_install/README.md b/roles/afm_cos_install/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/afm_cos_install/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_hpt/node/defaults/main.yml b/roles/afm_cos_install/defaults/main.yml similarity index 100% rename from roles/scale_hpt/node/defaults/main.yml rename to roles/afm_cos_install/defaults/main.yml diff --git a/roles/scale_hpt/node/meta/main.yml b/roles/afm_cos_install/meta/main.yml similarity index 65% rename from roles/scale_hpt/node/meta/main.yml rename to roles/afm_cos_install/meta/main.yml index 51f61bcc..ed914ccc 100644 --- a/roles/scale_hpt/node/meta/main.yml +++ b/roles/afm_cos_install/meta/main.yml @@ -1,10 +1,11 @@ --- galaxy_info: - role_name: scale_hpt author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS package company: IBM + license: Apache-2.0 + min_ansible_version: 2.9 platforms: @@ -13,14 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common diff --git a/roles/scale_hpt/node/tasks/apt/install.yml b/roles/afm_cos_install/tasks/apt/install.yml similarity index 100% rename from roles/scale_hpt/node/tasks/apt/install.yml rename to roles/afm_cos_install/tasks/apt/install.yml diff --git a/roles/scale_hpt/node/tasks/install.yml b/roles/afm_cos_install/tasks/install.yml similarity index 100% rename from roles/scale_hpt/node/tasks/install.yml rename to roles/afm_cos_install/tasks/install.yml diff --git a/roles/scale_hpt/node/tasks/install_dir_pkg.yml b/roles/afm_cos_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_hpt/node/tasks/install_dir_pkg.yml rename to roles/afm_cos_install/tasks/install_dir_pkg.yml diff --git a/roles/scale_hpt/node/tasks/install_local_pkg.yml b/roles/afm_cos_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_hpt/node/tasks/install_local_pkg.yml rename to roles/afm_cos_install/tasks/install_local_pkg.yml diff --git a/roles/scale_hpt/node/tasks/install_repository.yml b/roles/afm_cos_install/tasks/install_repository.yml similarity index 91% rename from roles/scale_hpt/node/tasks/install_repository.yml rename to roles/afm_cos_install/tasks/install_repository.yml index de6e4399..069bbf1b 100644 --- a/roles/scale_hpt/node/tasks/install_repository.yml +++ b/roles/afm_cos_install/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure HPT APT repository @@ -29,6 +30,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure HPT repository @@ -41,6 +43,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/scale_hpt/node/tasks/main.yml b/roles/afm_cos_install/tasks/main.yml similarity index 100% rename from roles/scale_hpt/node/tasks/main.yml rename to roles/afm_cos_install/tasks/main.yml diff --git a/roles/scale_hpt/node/tasks/yum/install.yml b/roles/afm_cos_install/tasks/yum/install.yml similarity index 100% rename from roles/scale_hpt/node/tasks/yum/install.yml rename to roles/afm_cos_install/tasks/yum/install.yml diff --git a/roles/scale_hpt/node/tasks/zypper/install.yml b/roles/afm_cos_install/tasks/zypper/install.yml similarity index 100% rename from roles/scale_hpt/node/tasks/zypper/install.yml rename to roles/afm_cos_install/tasks/zypper/install.yml diff --git a/roles/scale_hpt/node/vars/main.yml b/roles/afm_cos_install/vars/main.yml similarity index 100% rename from roles/scale_hpt/node/vars/main.yml rename to roles/afm_cos_install/vars/main.yml diff --git a/roles/afm_cos_prepare/README.md b/roles/afm_cos_prepare/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/afm_cos_prepare/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_hpt/postcheck/defaults/main.yml b/roles/afm_cos_prepare/defaults/main.yml similarity index 100% rename from roles/scale_hpt/postcheck/defaults/main.yml rename to roles/afm_cos_prepare/defaults/main.yml diff --git a/roles/scale_hpt/precheck/meta/main.yml b/roles/afm_cos_prepare/meta/main.yml similarity index 67% rename from roles/scale_hpt/precheck/meta/main.yml rename to roles/afm_cos_prepare/meta/main.yml index 496df107..cd2e0509 100644 --- a/roles/scale_hpt/precheck/meta/main.yml +++ b/roles/afm_cos_prepare/meta/main.yml @@ -1,10 +1,11 @@ --- galaxy_info: - role_name: hpt_preheck author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS (HPT) company: IBM + license: Apache-2.0 + min_ansible_version: 2.9 platforms: @@ -13,13 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_hpt/postcheck/tasks/main.yml b/roles/afm_cos_prepare/tasks/main.yml similarity index 100% rename from roles/scale_hpt/postcheck/tasks/main.yml rename to roles/afm_cos_prepare/tasks/main.yml diff --git a/roles/afm_cos_upgrade/README.md b/roles/afm_cos_upgrade/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/afm_cos_upgrade/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_hpt/upgrade/defaults/main.yml b/roles/afm_cos_upgrade/defaults/main.yml similarity index 100% rename from roles/scale_hpt/upgrade/defaults/main.yml rename to roles/afm_cos_upgrade/defaults/main.yml diff --git a/roles/scale_hpt/upgrade/meta/main.yml b/roles/afm_cos_upgrade/meta/main.yml similarity index 65% rename from roles/scale_hpt/upgrade/meta/main.yml rename to roles/afm_cos_upgrade/meta/main.yml index 51f61bcc..ed914ccc 100644 --- a/roles/scale_hpt/upgrade/meta/main.yml +++ b/roles/afm_cos_upgrade/meta/main.yml @@ -1,10 +1,11 @@ --- galaxy_info: - role_name: scale_hpt author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS package company: IBM + license: Apache-2.0 + min_ansible_version: 2.9 platforms: @@ -13,14 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common diff --git a/roles/scale_hpt/upgrade/tasks/apt/install.yml b/roles/afm_cos_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/apt/install.yml rename to roles/afm_cos_upgrade/tasks/apt/install.yml diff --git a/roles/scale_hpt/upgrade/tasks/install.yml b/roles/afm_cos_upgrade/tasks/install.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/install.yml rename to roles/afm_cos_upgrade/tasks/install.yml diff --git a/roles/scale_hpt/upgrade/tasks/install_dir_pkg.yml b/roles/afm_cos_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/install_dir_pkg.yml rename to roles/afm_cos_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/scale_hpt/upgrade/tasks/install_local_pkg.yml b/roles/afm_cos_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/install_local_pkg.yml rename to roles/afm_cos_upgrade/tasks/install_local_pkg.yml diff --git a/roles/scale_hpt/upgrade/tasks/install_repository.yml b/roles/afm_cos_upgrade/tasks/install_repository.yml similarity index 91% rename from roles/scale_hpt/upgrade/tasks/install_repository.yml rename to roles/afm_cos_upgrade/tasks/install_repository.yml index d01cad31..1b732d47 100644 --- a/roles/scale_hpt/upgrade/tasks/install_repository.yml +++ b/roles/afm_cos_upgrade/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure HPT APT repository @@ -29,6 +30,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure HPT repository @@ -41,6 +43,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/scale_hpt/upgrade/tasks/main.yml b/roles/afm_cos_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/main.yml rename to roles/afm_cos_upgrade/tasks/main.yml diff --git a/roles/scale_hpt/upgrade/tasks/yum/install.yml b/roles/afm_cos_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/yum/install.yml rename to roles/afm_cos_upgrade/tasks/yum/install.yml diff --git a/roles/scale_hpt/upgrade/tasks/zypper/install.yml b/roles/afm_cos_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/scale_hpt/upgrade/tasks/zypper/install.yml rename to roles/afm_cos_upgrade/tasks/zypper/install.yml diff --git a/roles/scale_hpt/upgrade/vars/main.yml b/roles/afm_cos_upgrade/vars/main.yml similarity index 100% rename from roles/scale_hpt/upgrade/vars/main.yml rename to roles/afm_cos_upgrade/vars/main.yml diff --git a/roles/afm_cos_verify/README.md b/roles/afm_cos_verify/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/afm_cos_verify/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_hpt/precheck/defaults/main.yml b/roles/afm_cos_verify/defaults/main.yml similarity index 100% rename from roles/scale_hpt/precheck/defaults/main.yml rename to roles/afm_cos_verify/defaults/main.yml diff --git a/roles/scale_hpt/postcheck/meta/main.yml b/roles/afm_cos_verify/meta/main.yml similarity index 67% rename from roles/scale_hpt/postcheck/meta/main.yml rename to roles/afm_cos_verify/meta/main.yml index 12cc5d17..cd2e0509 100644 --- a/roles/scale_hpt/postcheck/meta/main.yml +++ b/roles/afm_cos_verify/meta/main.yml @@ -1,10 +1,11 @@ --- galaxy_info: - role_name: hpt_postcheck author: IBM Corporation description: Role for installing IBM Spectrum Scale (GPFS) AFM COS (HPT) company: IBM + license: Apache-2.0 + min_ansible_version: 2.9 platforms: @@ -13,13 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_hpt/precheck/tasks/main.yml b/roles/afm_cos_verify/tasks/main.yml similarity index 100% rename from roles/scale_hpt/precheck/tasks/main.yml rename to roles/afm_cos_verify/tasks/main.yml diff --git a/roles/auth_upgrade/README.md b/roles/auth_upgrade/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/auth_upgrade/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_auth/upgrade/defaults/main.yml b/roles/auth_upgrade/defaults/main.yml similarity index 100% rename from roles/scale_auth/upgrade/defaults/main.yml rename to roles/auth_upgrade/defaults/main.yml diff --git a/roles/smb/cluster/meta/main.yml b/roles/auth_upgrade/meta/main.yml similarity index 67% rename from roles/smb/cluster/meta/main.yml rename to roles/auth_upgrade/meta/main.yml index 295fb1dd..d32d632b 100644 --- a/roles/smb/cluster/meta/main.yml +++ b/roles/auth_upgrade/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: nfs_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,11 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - - nfs/common + - ibm.spectrum_scale.core_common diff --git a/roles/scale_auth/upgrade/tasks/auth.yml b/roles/auth_upgrade/tasks/auth.yml similarity index 100% rename from roles/scale_auth/upgrade/tasks/auth.yml rename to roles/auth_upgrade/tasks/auth.yml diff --git a/roles/scale_auth/upgrade/tasks/main.yml b/roles/auth_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_auth/upgrade/tasks/main.yml rename to roles/auth_upgrade/tasks/main.yml diff --git a/roles/scale_auth/upgrade/tasks/parseFile.yml b/roles/auth_upgrade/tasks/parseFile.yml similarity index 100% rename from roles/scale_auth/upgrade/tasks/parseFile.yml rename to roles/auth_upgrade/tasks/parseFile.yml diff --git a/roles/scale_auth/upgrade/vars/main.yml b/roles/auth_upgrade/vars/main.yml similarity index 100% rename from roles/scale_auth/upgrade/vars/main.yml rename to roles/auth_upgrade/vars/main.yml diff --git a/roles/callhome/cluster/meta/main.yml b/roles/callhome/cluster/meta/main.yml deleted file mode 100755 index 1494a57c..00000000 --- a/roles/callhome/cluster/meta/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -galaxy_info: - role_name: callhome_cluster - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - precheck diff --git a/roles/callhome/node/meta/main.yml b/roles/callhome/node/meta/main.yml deleted file mode 100755 index 5332470d..00000000 --- a/roles/callhome/node/meta/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -galaxy_info: - role_name: callhome_node - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - core/common diff --git a/roles/callhome/postcheck/meta/main.yml b/roles/callhome/postcheck/meta/main.yml deleted file mode 100755 index 97d8df54..00000000 --- a/roles/callhome/postcheck/meta/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -galaxy_info: - role_name: callhome_postcheck - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: [] -# - common diff --git a/roles/callhome/precheck/meta/main.yml b/roles/callhome/precheck/meta/main.yml deleted file mode 100755 index 8e1f498c..00000000 --- a/roles/callhome/precheck/meta/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -galaxy_info: - role_name: callhome_precheck - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: [] diff --git a/roles/callhome_configure/README.md b/roles/callhome_configure/README.md new file mode 120000 index 00000000..72405a1e --- /dev/null +++ b/roles/callhome_configure/README.md @@ -0,0 +1 @@ +../../docs/README.CALLHOME.md \ No newline at end of file diff --git a/roles/callhome/cluster/defaults/main.yml b/roles/callhome_configure/defaults/main.yml similarity index 100% rename from roles/callhome/cluster/defaults/main.yml rename to roles/callhome_configure/defaults/main.yml diff --git a/roles/callhome/cluster/handlers/main.yml b/roles/callhome_configure/handlers/main.yml similarity index 100% rename from roles/callhome/cluster/handlers/main.yml rename to roles/callhome_configure/handlers/main.yml diff --git a/roles/nfs/common/meta/main.yml b/roles/callhome_configure/meta/main.yml old mode 100644 new mode 100755 similarity index 67% rename from roles/nfs/common/meta/main.yml rename to roles/callhome_configure/meta/main.yml index 7cc89b22..1b528927 --- a/roles/nfs/common/meta/main.yml +++ b/roles/callhome_configure/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: ces_common author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,11 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: -- core/common + - ibm.spectrum_scale.callhome_prepare diff --git a/roles/callhome/cluster/tasks/configure.yml b/roles/callhome_configure/tasks/configure.yml similarity index 100% rename from roles/callhome/cluster/tasks/configure.yml rename to roles/callhome_configure/tasks/configure.yml diff --git a/roles/callhome/cluster/tasks/main.yml b/roles/callhome_configure/tasks/main.yml similarity index 100% rename from roles/callhome/cluster/tasks/main.yml rename to roles/callhome_configure/tasks/main.yml diff --git a/roles/callhome/cluster/vars/main.yml b/roles/callhome_configure/vars/main.yml similarity index 100% rename from roles/callhome/cluster/vars/main.yml rename to roles/callhome_configure/vars/main.yml diff --git a/roles/callhome_install/README.md b/roles/callhome_install/README.md new file mode 120000 index 00000000..72405a1e --- /dev/null +++ b/roles/callhome_install/README.md @@ -0,0 +1 @@ +../../docs/README.CALLHOME.md \ No newline at end of file diff --git a/roles/callhome/node/defaults/main.yml b/roles/callhome_install/defaults/main.yml similarity index 100% rename from roles/callhome/node/defaults/main.yml rename to roles/callhome_install/defaults/main.yml diff --git a/roles/callhome/node/handlers/main.yml b/roles/callhome_install/handlers/main.yml similarity index 100% rename from roles/callhome/node/handlers/main.yml rename to roles/callhome_install/handlers/main.yml diff --git a/roles/core/node/meta/main.yml b/roles/callhome_install/meta/main.yml old mode 100644 new mode 100755 similarity index 67% rename from roles/core/node/meta/main.yml rename to roles/callhome_install/meta/main.yml index a3fcd724..d32d632b --- a/roles/core/node/meta/main.yml +++ b/roles/callhome_install/meta/main.yml @@ -1,22 +1,20 @@ --- galaxy_info: - role_name: core_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL versions: - 7 + - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - - common + - ibm.spectrum_scale.core_common diff --git a/roles/callhome/node/tasks/apt/install.yml b/roles/callhome_install/tasks/apt/install.yml similarity index 100% rename from roles/callhome/node/tasks/apt/install.yml rename to roles/callhome_install/tasks/apt/install.yml diff --git a/roles/callhome/node/tasks/install.yml b/roles/callhome_install/tasks/install.yml similarity index 100% rename from roles/callhome/node/tasks/install.yml rename to roles/callhome_install/tasks/install.yml diff --git a/roles/callhome/node/tasks/install_local_pkg.yml b/roles/callhome_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/callhome/node/tasks/install_local_pkg.yml rename to roles/callhome_install/tasks/install_local_pkg.yml diff --git a/roles/callhome/node/tasks/install_remote_pkg.yml b/roles/callhome_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/callhome/node/tasks/install_remote_pkg.yml rename to roles/callhome_install/tasks/install_remote_pkg.yml diff --git a/roles/callhome/node/tasks/install_repository.yml b/roles/callhome_install/tasks/install_repository.yml similarity index 91% rename from roles/callhome/node/tasks/install_repository.yml rename to roles/callhome_install/tasks/install_repository.yml index 6e330361..66e31f80 100755 --- a/roles/callhome/node/tasks/install_repository.yml +++ b/roles/callhome_install/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure Callhome APT repository @@ -29,6 +30,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure Callhome zypper repository @@ -40,6 +42,7 @@ disable_gpg_check: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/callhome/node/tasks/main.yml b/roles/callhome_install/tasks/main.yml similarity index 100% rename from roles/callhome/node/tasks/main.yml rename to roles/callhome_install/tasks/main.yml diff --git a/roles/callhome/node/tasks/yum/install.yml b/roles/callhome_install/tasks/yum/install.yml similarity index 100% rename from roles/callhome/node/tasks/yum/install.yml rename to roles/callhome_install/tasks/yum/install.yml diff --git a/roles/callhome/node/tasks/zypper/install.yml b/roles/callhome_install/tasks/zypper/install.yml similarity index 100% rename from roles/callhome/node/tasks/zypper/install.yml rename to roles/callhome_install/tasks/zypper/install.yml diff --git a/roles/callhome/node/vars/main.yml b/roles/callhome_install/vars/main.yml similarity index 100% rename from roles/callhome/node/vars/main.yml rename to roles/callhome_install/vars/main.yml diff --git a/roles/callhome_prepare/README.md b/roles/callhome_prepare/README.md new file mode 120000 index 00000000..72405a1e --- /dev/null +++ b/roles/callhome_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.CALLHOME.md \ No newline at end of file diff --git a/roles/callhome/precheck/defaults/main.yml b/roles/callhome_prepare/defaults/main.yml similarity index 100% rename from roles/callhome/precheck/defaults/main.yml rename to roles/callhome_prepare/defaults/main.yml diff --git a/roles/callhome/precheck/handlers/main.yml b/roles/callhome_prepare/handlers/main.yml similarity index 100% rename from roles/callhome/precheck/handlers/main.yml rename to roles/callhome_prepare/handlers/main.yml diff --git a/roles/smb/precheck/meta/main.yml b/roles/callhome_prepare/meta/main.yml old mode 100644 new mode 100755 similarity index 71% rename from roles/smb/precheck/meta/main.yml rename to roles/callhome_prepare/meta/main.yml index 3770ad52..dab8063f --- a/roles/smb/precheck/meta/main.yml +++ b/roles/callhome_prepare/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: smb author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,10 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/callhome/precheck/tasks/check.yml b/roles/callhome_prepare/tasks/check.yml similarity index 100% rename from roles/callhome/precheck/tasks/check.yml rename to roles/callhome_prepare/tasks/check.yml diff --git a/roles/callhome/precheck/tasks/main.yml b/roles/callhome_prepare/tasks/main.yml similarity index 100% rename from roles/callhome/precheck/tasks/main.yml rename to roles/callhome_prepare/tasks/main.yml diff --git a/roles/callhome/postcheck/vars/main.yml b/roles/callhome_prepare/vars/main.yml similarity index 100% rename from roles/callhome/postcheck/vars/main.yml rename to roles/callhome_prepare/vars/main.yml diff --git a/roles/callhome_verify/README.md b/roles/callhome_verify/README.md new file mode 120000 index 00000000..72405a1e --- /dev/null +++ b/roles/callhome_verify/README.md @@ -0,0 +1 @@ +../../docs/README.CALLHOME.md \ No newline at end of file diff --git a/roles/callhome/postcheck/defaults/main.yml b/roles/callhome_verify/defaults/main.yml similarity index 100% rename from roles/callhome/postcheck/defaults/main.yml rename to roles/callhome_verify/defaults/main.yml diff --git a/roles/callhome/postcheck/handlers/main.yml b/roles/callhome_verify/handlers/main.yml similarity index 100% rename from roles/callhome/postcheck/handlers/main.yml rename to roles/callhome_verify/handlers/main.yml diff --git a/roles/smb/postcheck/meta/main.yml b/roles/callhome_verify/meta/main.yml old mode 100644 new mode 100755 similarity index 70% rename from roles/smb/postcheck/meta/main.yml rename to roles/callhome_verify/meta/main.yml index 16770b46..760f6876 --- a/roles/smb/postcheck/meta/main.yml +++ b/roles/callhome_verify/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: nfs_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,10 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] +# - ibm.spectrum_scale.core_common diff --git a/roles/callhome/postcheck/tasks/check.yml b/roles/callhome_verify/tasks/check.yml similarity index 100% rename from roles/callhome/postcheck/tasks/check.yml rename to roles/callhome_verify/tasks/check.yml diff --git a/roles/callhome/postcheck/tasks/main.yml b/roles/callhome_verify/tasks/main.yml similarity index 100% rename from roles/callhome/postcheck/tasks/main.yml rename to roles/callhome_verify/tasks/main.yml diff --git a/roles/callhome/precheck/vars/main.yml b/roles/callhome_verify/vars/main.yml similarity index 100% rename from roles/callhome/precheck/vars/main.yml rename to roles/callhome_verify/vars/main.yml diff --git a/roles/ces_common/README.md b/roles/ces_common/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/ces_common/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/nfs/common/defaults/main.yml b/roles/ces_common/defaults/main.yml similarity index 77% rename from roles/nfs/common/defaults/main.yml rename to roles/ces_common/defaults/main.yml index 23fd6038..d3225d76 100644 --- a/roles/nfs/common/defaults/main.yml +++ b/roles/ces_common/defaults/main.yml @@ -1,4 +1,10 @@ --- +## Spectrum Scale daemon nodename (defaults to node's hostname) +scale_daemon_nodename: "{{ ansible_hostname }}" + +## Spectrum Scale admin nodename (defaults to node's hostname) +scale_admin_nodename: "{{ scale_daemon_nodename }}" + # Default variables for the IBM Spectrum Scale (NFS) role - # either edit this file or define your own variables to override the defaults # If ces groups is defined, scale_protocols in scale_clusterdefinition.json will look like below diff --git a/roles/core/upgrade/meta/main.yml b/roles/ces_common/meta/main.yml similarity index 67% rename from roles/core/upgrade/meta/main.yml rename to roles/ces_common/meta/main.yml index a3fcd724..d32d632b 100644 --- a/roles/core/upgrade/meta/main.yml +++ b/roles/ces_common/meta/main.yml @@ -1,22 +1,20 @@ --- galaxy_info: - role_name: core_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL versions: - 7 + - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - - common + - ibm.spectrum_scale.core_common diff --git a/roles/nfs/common/tasks/check.yml b/roles/ces_common/tasks/check.yml similarity index 93% rename from roles/nfs/common/tasks/check.yml rename to roles/ces_common/tasks/check.yml index 9e571685..866fddd4 100644 --- a/roles/nfs/common/tasks/check.yml +++ b/roles/ces_common/tasks/check.yml @@ -16,8 +16,8 @@ - name: check | Collect all protocol nodes set_fact: - scale_protocol_node_list: "{{ scale_protocol_node_list + [hostvars[item]['scale_daemon_nodename']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + scale_protocol_node_list: "{{ scale_protocol_node_list + [hostvars[item]['inventory_hostname']] }}" + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost @@ -34,7 +34,7 @@ - name: check | Collect all protocol node OS set_fact: scale_os_list: "{{ scale_os_list + [hostvars[item]['ansible_distribution']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost @@ -50,7 +50,7 @@ - name: check | Collect all protocol node architecture set_fact: scale_arch_list: "{{ scale_arch_list + [hostvars[item]['ansible_architecture']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost @@ -155,21 +155,21 @@ Please define the CES shared root file system mount point in the inventory." - name: check | Prepare CES ip list set_fact: scale_export_ip: "{{ scale_ces_export_ip|flatten }}" - + - name: check | Prepare IPv6 export ip list set_fact: scale_ces_ipv6_list: "{{ scale_ces_ipv6_list + [ item ]}}" when: item is regex ( scale_ipv6_regex ) with_items: - "{{ scale_export_ip }}" - + - name: check | Prepare IPv4 export ip list set_fact: scale_ces_ipv4_list: "{{ scale_ces_ipv4_list + [ item ]}}" when: item is regex ( scale_ipv4_regex ) with_items: - "{{ scale_export_ip }}" - + - name: check | Check if interface is defined assert: that: @@ -181,7 +181,7 @@ Please define the CES shared root file system mount point in the inventory." msg: "Mixed IPs detected. All CES export IPs can be either IPv4 or IPv6." when: scale_ces_ipv4_list|length >0 and scale_ces_ipv6_list|length > 0 failed_when: scale_ces_ipv4_list|length >0 and scale_ces_ipv6_list|length > 0 - + when: scale_protocols.scale_ces_groups is defined and scale_protocols.scale_ces_groups|length > 0 - block: @@ -191,7 +191,7 @@ Please define the CES shared root file system mount point in the inventory." when: item is regex ( scale_ipv6_regex ) with_items: - "{{ scale_protocols.export_ip_pool }}" - + - name: check | Prepare IPv4 export ip list set_fact: scale_ces_ipv4_list: "{{ scale_ces_ipv4_list + [ item ]}}" @@ -207,9 +207,8 @@ Please define the CES shared root file system mount point in the inventory." - name: check | Check if all ces ips are either IPv4 or IPv6 debug: - msg: "Mixed IPs detected. All CES export IPs can be either IPv4 or IPv6." + msg: "Mixed IPs detected. All CES export IPs can be either IPv4 or IPv6." when: scale_ces_ipv4_list|length >0 and scale_ces_ipv6_list|length > 0 failed_when: scale_ces_ipv4_list|length >0 and scale_ces_ipv6_list|length > 0 - - when: scale_protocols.scale_ces_groups is not defined + when: scale_protocols.scale_ces_groups is not defined diff --git a/roles/nfs/common/tasks/configure.yml b/roles/ces_common/tasks/configure.yml similarity index 76% rename from roles/nfs/common/tasks/configure.yml rename to roles/ces_common/tasks/configure.yml index c8a2c1de..8590adb8 100644 --- a/roles/nfs/common/tasks/configure.yml +++ b/roles/ces_common/tasks/configure.yml @@ -7,6 +7,16 @@ scale_service_list: [] scale_ces_nodes: "" +- name: check | Set default daemon nodename + set_fact: + scale_daemon_nodename: "{{ scale_daemon_nodename }}" + when: hostvars[inventory_hostname].scale_daemon_nodename is undefined + +- name: check | Set default admin nodename + set_fact: + scale_admin_nodename: "{{ scale_admin_nodename }}" + when: hostvars[inventory_hostname].scale_admin_nodename is undefined + - name: configure | Collect status of cesSharedRoot command: "{{ scale_command_path }}mmlsconfig cesSharedRoot" register: scale_ces_status @@ -21,9 +31,9 @@ - name: configure | Prepare server nodes string set_fact: - scale_server_nodes: "{{ scale_server_nodes + ',' + item|string }}" + scale_server_nodes: "{{ scale_server_nodes + ',' + hostvars[item]['scale_daemon_nodename'] | string }}" with_items: - - "{{ scale_protocol_node_list }}" + - "{{ scale_protocol_node_list }}" - name: configure | Setting server licenses on protocol nodes command: "{{ scale_command_path }}mmchlicense server --accept -N {{ scale_server_nodes[1:] }}" @@ -42,8 +52,8 @@ - name: configure | Collect all nodes on which ces is not enabled set_fact: - scale_ces_disabled_nodes: "{{ scale_ces_disabled_nodes + [ item ]}}" - when: not scale_ces_enable_status.stdout_lines is search(item) + scale_ces_disabled_nodes: "{{ scale_ces_disabled_nodes + [hostvars[item]['scale_daemon_nodename']] }}" + when: not scale_ces_enable_status.stdout_lines is search(hostvars[item]['scale_daemon_nodename']) with_items: - "{{ scale_protocol_node_list }}" @@ -52,14 +62,13 @@ name: iputils-arping state: present when: (ansible_distribution in scale_ubuntu_distribution) and - (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) + (inventory_hostname in scale_protocol_node_list) - name: configure | Check if SMB is running shell: cmd: "{{ scale_command_path }}mmces service list|grep SMB" register: scale_service_status - when: (ansible_fqdn in scale_protocol_node_list) or - (inventory_hostname in scale_protocol_node_list) + when: inventory_hostname in scale_protocol_node_list ignore_errors: true failed_when: false run_once: true @@ -67,14 +76,14 @@ - name: configure | Add SMB service to list set_fact: scale_service_list: "{{ scale_service_list + [scale_service_status.stderr|regex_search('SMB')] }}" - when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and + when: (inventory_hostname in scale_protocol_node_list) and ( scale_service_status.rc > 0 ) run_once: true - name: configure | Add SMB service to list set_fact: scale_service_list: "{{ scale_service_list + ['SMB'] }}" - when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and + when: (inventory_hostname in scale_protocol_node_list) and ( scale_service_status.rc == 0 ) run_once: true @@ -82,8 +91,7 @@ shell: cmd: "{{ scale_command_path }}mmces service list|grep NFS" register: scale_service_status - when: (ansible_fqdn in scale_protocol_node_list) or - (inventory_hostname in scale_protocol_node_list) + when: inventory_hostname in scale_protocol_node_list ignore_errors: true failed_when: false run_once: true @@ -91,25 +99,53 @@ - name: configure | Add NFS service to the list set_fact: scale_service_list: "{{ scale_service_list + [scale_service_status.stderr|regex_search('NFS')] }}" - when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and + when: (inventory_hostname in scale_protocol_node_list) and ( scale_service_status.rc > 0 ) run_once: true - name: configure | Add NFS service to the list set_fact: scale_service_list: "{{ scale_service_list + ['NFS'] }}" + when: (inventory_hostname in scale_protocol_node_list) and + ( scale_service_status.rc == 0 ) + run_once: true + +- name: configure | Check if OBJ is running + shell: + cmd: "{{ scale_command_path }}mmces service list|grep OBJ" + register: scale_service_status + when: (ansible_fqdn in scale_protocol_node_list) or + (inventory_hostname in scale_protocol_node_list) + ignore_errors: true + failed_when: false + run_once: true + +- name: configure | Add OBJ service to the list + set_fact: + scale_service_list: "{{ scale_service_list + [scale_service_status.stderr|regex_search('OBJ')] }}" + when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and + ( scale_service_status.rc > 0 ) + run_once: true + +- name: configure | Add OBJ service to the list + set_fact: + scale_service_list: "{{ scale_service_list + ['OBJ'] }}" when: (ansible_fqdn in scale_protocol_node_list or inventory_hostname in scale_protocol_node_list) and ( scale_service_status.rc == 0 ) run_once: true - import_role: - name: nfs/node + name: ibm.spectrum_scale.nfs_install when: scale_ces_disabled_nodes|length > 0 and 'NFS' in scale_service_list - import_role: - name: smb/node + name: ibm.spectrum_scale.smb_install when: scale_ces_disabled_nodes|length > 0 and 'SMB' in scale_service_list +- import_role: + name: ibm.spectrum_scale.obj_install + when: scale_ces_disabled_nodes|length > 0 and 'OBJ' in scale_service_list + - name: configure | Prepare ces nodes string set_fact: scale_ces_nodes: "{{ scale_ces_nodes + ',' + item|string }}" @@ -128,10 +164,10 @@ #- name: configure | Collect status of ces nodes #shell: - # cmd: "{{ scale_command_path }}mmces node list|grep {{ item }}" + # cmd: "{{ scale_command_path }}mmces node list|grep {{ hostvars[item]['scale_daemon_nodename'] }}" #register: scale_ces_enable_status #with_items: - #- "{{ scale_protocol_node_list }}" + # - "{{ scale_protocol_node_list }}" #delegate_to: "{{ scale_protocol_node_list.0 }}" #- name: configure | Check CES enabled on all nodes @@ -157,7 +193,7 @@ - name: configure| Check CES enabled on all nodes assert: that: - - "item in scale_ces_enable_status.stdout" + - hostvars[item]['scale_daemon_nodename'] in scale_ces_enable_status.stdout fail_msg: "CES is not enabled on {{ item }} protocol node" success_msg: "Successful enabling of CES on protocol node {{ item }}" with_items: @@ -192,8 +228,7 @@ - name: configure | Assign export ips as pool for CES groups command: "{{ scale_command_path }}mmces address add --ces-ip {{ item.export_ip_pool|join(',') }} --ces-group {{ item.group_name}}" - when: (ansible_fqdn in scale_protocol_node_list) or - (inventory_hostname in scale_protocol_node_list) + when: inventory_hostname in scale_protocol_node_list delegate_to: "{{ scale_protocol_node_list.0 }}" with_items: - "{{ scale_protocols.scale_ces_groups }}" @@ -218,7 +253,7 @@ - name: configure | Rebalance CES IPs command: "{{ scale_command_path }}mmces address move --rebalance" - #when: ansible_fqdn in scale_protocol_node_list + #when: inventory_hostname in scale_protocol_node_list delegate_to: "{{ scale_protocol_node_list.0 }}" run_once: true diff --git a/roles/nfs/common/tasks/main.yml b/roles/ces_common/tasks/main.yml similarity index 100% rename from roles/nfs/common/tasks/main.yml rename to roles/ces_common/tasks/main.yml diff --git a/roles/nfs/common/vars/main.yml b/roles/ces_common/vars/main.yml similarity index 100% rename from roles/nfs/common/vars/main.yml rename to roles/ces_common/vars/main.yml diff --git a/roles/core/cluster/meta/main.yml b/roles/core/cluster/meta/main.yml deleted file mode 100644 index 26132da7..00000000 --- a/roles/core/cluster/meta/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -galaxy_info: - role_name: core_cluster - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - custom_module diff --git a/roles/core_common/README.md b/roles/core_common/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_common/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core/common/defaults/main.yml b/roles/core_common/defaults/main.yml similarity index 95% rename from roles/core/common/defaults/main.yml rename to roles/core_common/defaults/main.yml index 8289e317..425e0efa 100644 --- a/roles/core/common/defaults/main.yml +++ b/roles/core_common/defaults/main.yml @@ -35,3 +35,5 @@ scale_install_localpkg_tmpdir_path: /tmp ## Enable/disable gpg key flag scale_enable_gpg_check: true +## Storage Scale GPG key filename +scale_gpg_key_name: "SpectrumScale_public_key.pgp" diff --git a/roles/core/common/handlers/main.yml b/roles/core_common/handlers/main.yml similarity index 100% rename from roles/core/common/handlers/main.yml rename to roles/core_common/handlers/main.yml diff --git a/roles/core/common/meta/main.yml b/roles/core_common/meta/main.yml similarity index 69% rename from roles/core/common/meta/main.yml rename to roles/core_common/meta/main.yml index 88542913..9cd697a4 100644 --- a/roles/core/common/meta/main.yml +++ b/roles/core_common/meta/main.yml @@ -1,21 +1,18 @@ --- galaxy_info: - role_name: core_common author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/core/common/tasks/apt/set_vars.yml b/roles/core_common/tasks/apt/set_vars.yml similarity index 100% rename from roles/core/common/tasks/apt/set_vars.yml rename to roles/core_common/tasks/apt/set_vars.yml diff --git a/roles/core/common/tasks/check.yml b/roles/core_common/tasks/check.yml similarity index 92% rename from roles/core/common/tasks/check.yml rename to roles/core_common/tasks/check.yml index 15b96601..2ea6b6b6 100644 --- a/roles/core/common/tasks/check.yml +++ b/roles/core_common/tasks/check.yml @@ -91,17 +91,22 @@ # set dynamic variable based on supported OS - name: check | Set variables based on yum/dnf based OS - include: yum/set_vars.yml + include_tasks: yum/set_vars.yml when: ansible_distribution in scale_rhel_distribution - name: check | Set variables based on apt based os - include: apt/set_vars.yml + include_tasks: apt/set_vars.yml when: ansible_distribution in scale_ubuntu_distribution - name: check | Set variables based on zypper based OS - include: zypper/set_vars.yml + include_tasks: zypper/set_vars.yml when: ansible_distribution in scale_sles_distribution +- name: check | Storage Scale GPG key + set_fact: + scale_gpg_key_name: "Storage_Scale_public_key.pgp" + when: scale_version is defined and scale_version >= "5.1.8.0" + # Copy and import gpg key on RHEL and SLES if gpfs version >= 5.0.5.0 - block: - name: check | Copy key @@ -112,7 +117,7 @@ - rpm_key: state: present - key: "{{ scale_gpgKey_dest }}/SpectrumScale_public_key.pgp" + key: "{{ scale_gpgKey_dest }}/{{ scale_gpg_key_name }}" when: ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) and scale_enable_gpg_check and scale_version >= "5.0.5.0" and scale_install_repository_url is defined) diff --git a/roles/core/common/tasks/main.yml b/roles/core_common/tasks/main.yml similarity index 100% rename from roles/core/common/tasks/main.yml rename to roles/core_common/tasks/main.yml diff --git a/roles/core/common/tasks/yum/set_vars.yml b/roles/core_common/tasks/yum/set_vars.yml similarity index 100% rename from roles/core/common/tasks/yum/set_vars.yml rename to roles/core_common/tasks/yum/set_vars.yml diff --git a/roles/core/common/tasks/zypper/set_vars.yml b/roles/core_common/tasks/zypper/set_vars.yml similarity index 100% rename from roles/core/common/tasks/zypper/set_vars.yml rename to roles/core_common/tasks/zypper/set_vars.yml diff --git a/roles/core/cluster/tests/inventory b/roles/core_common/tests/inventory similarity index 100% rename from roles/core/cluster/tests/inventory rename to roles/core_common/tests/inventory diff --git a/roles/core/cluster/tests/test.yml b/roles/core_common/tests/test.yml similarity index 100% rename from roles/core/cluster/tests/test.yml rename to roles/core_common/tests/test.yml diff --git a/roles/core/common/vars/main.yml b/roles/core_common/vars/main.yml similarity index 94% rename from roles/core/common/vars/main.yml rename to roles/core_common/vars/main.yml index 0b5e9ad8..54424c53 100644 --- a/roles/core/common/vars/main.yml +++ b/roles/core_common/vars/main.yml @@ -40,8 +40,8 @@ scale_sles_distribution: ## Specify package extraction path and gpg key path scale_extracted_default_path: "/usr/lpp/mmfs" scale_extracted_path: "{{ scale_extracted_default_path }}/{{ scale_version }}" -scale_gpgKey_src: "/usr/lpp/mmfs/{{ scale_version }}/Public_Keys/SpectrumScale_public_key.pgp" -scale_gpgKey_repository_src: "{{ scale_install_repository_url }}Public_Keys/SpectrumScale_public_key.pgp" +scale_gpgKey_src: "/usr/lpp/mmfs/{{ scale_version }}/Public_Keys/{{ scale_gpg_key_name }}" +scale_gpgKey_repository_src: "{{ scale_install_repository_url }}Public_Keys/{{ scale_gpg_key_name }}" scale_gpgKey_dest: "/root/" scale_install_gpgcheck: "yes" scale_disable_gpgcheck: "no" diff --git a/roles/core_configure/README.md b/roles/core_configure/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_configure/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core/cluster/defaults/main.yml b/roles/core_configure/defaults/main.yml similarity index 99% rename from roles/core/cluster/defaults/main.yml rename to roles/core_configure/defaults/main.yml index c7a23844..7769cc72 100644 --- a/roles/core/cluster/defaults/main.yml +++ b/roles/core_configure/defaults/main.yml @@ -57,7 +57,7 @@ scale_storage_filesystem_defaults: defaultDataReplicas: 1 numNodes: 32 automaticMountOption: true - + ## defaultMountPoint will be this prefix, followed by the filesystem name defaultMountPoint_prefix: /mnt/ ## Overwrite existing NSDs - if set to 'true' then disks will *not* be checked @@ -83,4 +83,4 @@ scale_node_role_change: true scale_node_update_check: true ## admin node flag -is_admin_node: false +scale_admin_node: false diff --git a/roles/core/cluster/handlers/main.yml b/roles/core_configure/handlers/main.yml similarity index 100% rename from roles/core/cluster/handlers/main.yml rename to roles/core_configure/handlers/main.yml diff --git a/roles/core/postcheck/meta/main.yml b/roles/core_configure/meta/main.yml similarity index 68% rename from roles/core/postcheck/meta/main.yml rename to roles/core_configure/meta/main.yml index f4128a33..9cd697a4 100644 --- a/roles/core/postcheck/meta/main.yml +++ b/roles/core_configure/meta/main.yml @@ -1,21 +1,18 @@ --- galaxy_info: - role_name: core_postcheck author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/core/cluster/tasks/check.yml b/roles/core_configure/tasks/check.yml similarity index 82% rename from roles/core/cluster/tasks/check.yml rename to roles/core_configure/tasks/check.yml index d87e881c..b7a2af3f 100644 --- a/roles/core/cluster/tasks/check.yml +++ b/roles/core_configure/tasks/check.yml @@ -22,13 +22,14 @@ scale_admin_nodename: "{{ scale_admin_nodename }}" when: hostvars[inventory_hostname].scale_admin_nodename is undefined -- set_fact: is_admin_node=false - when: hostvars[inventory_hostname].is_admin_node is undefined +- set_fact: + scale_admin_node: false + when: hostvars[inventory_hostname].scale_admin_node is undefined - name: check | Assign default admin nodes set_fact: - is_admin_node: true - when: true not in ansible_play_hosts | map('extract', hostvars, 'is_admin_node') | map('bool') | list + scale_admin_node: true + when: true not in ansible_play_hosts | map('extract', hostvars, 'scale_admin_node') | map('bool') | list with_sequence: start=1 end={{ [ ansible_play_hosts | length, 1 ] | min }} run_once: true delegate_to: "{{ ansible_play_hosts[item | int - 1] }}" @@ -38,7 +39,7 @@ add_host: name: "{{ item }}" groups: scale_cluster_admin_nodes - when: hostvars[item].is_admin_node is defined and hostvars[item].is_admin_node | bool + when: hostvars[item].scale_admin_node is defined and hostvars[item].scale_admin_node | bool with_items: "{{ ansible_play_hosts }}" changed_when: false diff --git a/roles/core/cluster/tasks/cluster.yml b/roles/core_configure/tasks/cluster.yml similarity index 93% rename from roles/core/cluster/tasks/cluster.yml rename to roles/core_configure/tasks/cluster.yml index 624211ba..d5dfb2d7 100644 --- a/roles/core/cluster/tasks/cluster.yml +++ b/roles/core_configure/tasks/cluster.yml @@ -79,7 +79,7 @@ delegate_to: localhost when: - scale_cluster_profile_name is defined and scale_cluster_profile_name != 'None' - - scale_cluster_profile_name not in gpfs_cluster_system_profile + - scale_cluster_profile_name not in scale_cluster_system_profile - block: - name: cluster | cluster profile name validation @@ -98,7 +98,7 @@ delegate_to: localhost when: - scale_cluster_profile_name is defined and scale_cluster_profile_name != 'None' - - scale_cluster_profile_name not in gpfs_cluster_system_profile + - scale_cluster_profile_name not in scale_cluster_system_profile - block: - name: cluster | Copy user defined profile @@ -108,7 +108,7 @@ mode: '0444' when: - scale_cluster_profile_name is defined and scale_cluster_profile_name != 'None' - - scale_cluster_profile_name not in gpfs_cluster_system_profile + - scale_cluster_profile_name not in scale_cluster_system_profile # # Create new cluster @@ -149,6 +149,12 @@ when: - scale_cluster_config is defined and scale_cluster_config.remote_file_copy is defined + - name: cluster | Set gpfs cluster user defined port if it is defined + set_fact: + extra_option: "{{ extra_option }} --port {{ scale_cluster_config.scale_port_number }}" + when: + - scale_cluster_config is defined and scale_cluster_config.scale_port_number is defined + - name: cluster | Create new cluster command: /usr/lpp/mmfs/bin/mmcrcluster -N /var/mmfs/tmp/NodeFile -C {{ scale_cluster_clustername }} {{ profile_type }} {{ extra_option }} notify: accept-licenses diff --git a/roles/core/cluster/tasks/cluster_start.yml b/roles/core_configure/tasks/cluster_start.yml similarity index 100% rename from roles/core/cluster/tasks/cluster_start.yml rename to roles/core_configure/tasks/cluster_start.yml diff --git a/roles/core/cluster/tasks/config.yml b/roles/core_configure/tasks/config.yml similarity index 100% rename from roles/core/cluster/tasks/config.yml rename to roles/core_configure/tasks/config.yml diff --git a/roles/core/cluster/tasks/finalize.yml b/roles/core_configure/tasks/finalize.yml similarity index 100% rename from roles/core/cluster/tasks/finalize.yml rename to roles/core_configure/tasks/finalize.yml diff --git a/roles/core/cluster/tasks/install_gplbin.yml b/roles/core_configure/tasks/install_gplbin.yml similarity index 88% rename from roles/core/cluster/tasks/install_gplbin.yml rename to roles/core_configure/tasks/install_gplbin.yml index 2f149f21..a750e9cc 100644 --- a/roles/core/cluster/tasks/install_gplbin.yml +++ b/roles/core_configure/tasks/install_gplbin.yml @@ -13,8 +13,9 @@ state: present notify: yum-clean-metadata when: - - ansible_pkg_mgr == 'yum' + - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' # # Add kernel extension prereqs diff --git a/roles/core/cluster/tasks/main.yml b/roles/core_configure/tasks/main.yml similarity index 100% rename from roles/core/cluster/tasks/main.yml rename to roles/core_configure/tasks/main.yml diff --git a/roles/core/cluster/tasks/removenode.yml b/roles/core_configure/tasks/removenode.yml similarity index 94% rename from roles/core/cluster/tasks/removenode.yml rename to roles/core_configure/tasks/removenode.yml index a6aafbf6..5df9c6d7 100644 --- a/roles/core/cluster/tasks/removenode.yml +++ b/roles/core_configure/tasks/removenode.yml @@ -22,7 +22,7 @@ groups: scale_cluster_members when: - hostvars[item].scale_state is defined and hostvars[item].scale_state == 'present' - - hostvars[item].is_admin_node is defined and hostvars[item].is_admin_node|bool + - hostvars[item].scale_admin_node is defined and hostvars[item].scale_admin_node|bool - hostvars[item].scale_cluster_clusterId.stdout with_items: "{{ ansible_play_hosts }}" changed_when: false diff --git a/roles/core/cluster/tasks/storage.yml b/roles/core_configure/tasks/storage.yml similarity index 95% rename from roles/core/cluster/tasks/storage.yml rename to roles/core_configure/tasks/storage.yml index ade649e0..db1f6238 100644 --- a/roles/core/cluster/tasks/storage.yml +++ b/roles/core_configure/tasks/storage.yml @@ -100,9 +100,24 @@ - item.size > 1 with_items: "{{ scale_storage_stanzafile_new.results }}" - - name: storage | Wait for NSD configuration to be synced across cluster - wait_for: - timeout: 30 + - block: + - debug: + msg: Wait for 240 second for NSD configuration to be synced across cluster. Please be patient... + + - name: storage | Wait for NSD configuration to be synced across cluster + wait_for: + timeout: 240 + + - name: storage | wait-nsd-active + shell: /usr/lpp/mmfs/bin/mmlsnsd -a -Y | grep -v HEADER | cut -d ':' -f 8 + register: scale_existig_nsd_list + until: + - ((scale_existig_nsd_list.stdout_lines) | length) >= (scale_storage_nsddefs | unique | length) + retries: 12 + delay: 20 + changed_when: false + when: scale_storage_nsddefs | length > 0 + run_once: true # # Create new filesystems diff --git a/roles/core/cluster/tasks/storage_disk.yml b/roles/core_configure/tasks/storage_disk.yml similarity index 100% rename from roles/core/cluster/tasks/storage_disk.yml rename to roles/core_configure/tasks/storage_disk.yml diff --git a/roles/core/cluster/tasks/storage_fs.yml b/roles/core_configure/tasks/storage_fs.yml similarity index 100% rename from roles/core/cluster/tasks/storage_fs.yml rename to roles/core_configure/tasks/storage_fs.yml diff --git a/roles/core/cluster/templates/AddNodeFile.j2 b/roles/core_configure/templates/AddNodeFile.j2 similarity index 100% rename from roles/core/cluster/templates/AddNodeFile.j2 rename to roles/core_configure/templates/AddNodeFile.j2 diff --git a/roles/core/cluster/templates/ChangeFile.j2 b/roles/core_configure/templates/ChangeFile.j2 similarity index 100% rename from roles/core/cluster/templates/ChangeFile.j2 rename to roles/core_configure/templates/ChangeFile.j2 diff --git a/roles/core/cluster/templates/NewNodeFile.j2 b/roles/core_configure/templates/NewNodeFile.j2 similarity index 100% rename from roles/core/cluster/templates/NewNodeFile.j2 rename to roles/core_configure/templates/NewNodeFile.j2 diff --git a/roles/core/cluster/templates/NodeClass.j2 b/roles/core_configure/templates/NodeClass.j2 similarity index 100% rename from roles/core/cluster/templates/NodeClass.j2 rename to roles/core_configure/templates/NodeClass.j2 diff --git a/roles/core/cluster/templates/StanzaFile.j2 b/roles/core_configure/templates/StanzaFile.j2 similarity index 100% rename from roles/core/cluster/templates/StanzaFile.j2 rename to roles/core_configure/templates/StanzaFile.j2 diff --git a/roles/core/cluster/templates/StanzaFile_fs.j2 b/roles/core_configure/templates/StanzaFile_fs.j2 similarity index 100% rename from roles/core/cluster/templates/StanzaFile_fs.j2 rename to roles/core_configure/templates/StanzaFile_fs.j2 diff --git a/roles/core/cluster/templates/StanzaFile_nsd.j2 b/roles/core_configure/templates/StanzaFile_nsd.j2 similarity index 100% rename from roles/core/cluster/templates/StanzaFile_nsd.j2 rename to roles/core_configure/templates/StanzaFile_nsd.j2 diff --git a/roles/core/common/tests/inventory b/roles/core_configure/tests/inventory similarity index 100% rename from roles/core/common/tests/inventory rename to roles/core_configure/tests/inventory diff --git a/roles/core/common/tests/test.yml b/roles/core_configure/tests/test.yml similarity index 100% rename from roles/core/common/tests/test.yml rename to roles/core_configure/tests/test.yml diff --git a/roles/core/cluster/vars/main.yml b/roles/core_configure/vars/main.yml similarity index 96% rename from roles/core/cluster/vars/main.yml rename to roles/core_configure/vars/main.yml index 7abdb4cc..aaef8ee8 100644 --- a/roles/core/cluster/vars/main.yml +++ b/roles/core_configure/vars/main.yml @@ -25,7 +25,7 @@ scale_active_states: - active # scale supported profile -gpfs_cluster_system_profile: +scale_cluster_system_profile: - gpfsprotocoldefaults - gpfsprotocolrandomio diff --git a/roles/core_install/README.md b/roles/core_install/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_install/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core/node/defaults/main.yml b/roles/core_install/defaults/main.yml similarity index 100% rename from roles/core/node/defaults/main.yml rename to roles/core_install/defaults/main.yml diff --git a/roles/core/node/handlers/main.yml b/roles/core_install/handlers/main.yml similarity index 100% rename from roles/core/node/handlers/main.yml rename to roles/core_install/handlers/main.yml diff --git a/roles/custom_module/meta/main.yml b/roles/core_install/meta/main.yml similarity index 67% rename from roles/custom_module/meta/main.yml rename to roles/core_install/meta/main.yml index e3f01801..017c7c5f 100644 --- a/roles/custom_module/meta/main.yml +++ b/roles/core_install/meta/main.yml @@ -1,19 +1,19 @@ --- galaxy_info: - role_name: core_cluster author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common diff --git a/roles/core/node/tasks/apt/install.yml b/roles/core_install/tasks/apt/install.yml similarity index 100% rename from roles/core/node/tasks/apt/install.yml rename to roles/core_install/tasks/apt/install.yml diff --git a/roles/core/node/tasks/build.yml b/roles/core_install/tasks/build.yml similarity index 100% rename from roles/core/node/tasks/build.yml rename to roles/core_install/tasks/build.yml diff --git a/roles/core/node/tasks/finalize.yml b/roles/core_install/tasks/finalize.yml similarity index 100% rename from roles/core/node/tasks/finalize.yml rename to roles/core_install/tasks/finalize.yml diff --git a/roles/core/node/tasks/install.yml b/roles/core_install/tasks/install.yml similarity index 100% rename from roles/core/node/tasks/install.yml rename to roles/core_install/tasks/install.yml diff --git a/roles/core/node/tasks/install_dir_pkg.yml b/roles/core_install/tasks/install_dir_pkg.yml similarity index 91% rename from roles/core/node/tasks/install_dir_pkg.yml rename to roles/core_install/tasks/install_dir_pkg.yml index 2269ff51..f4fc360e 100644 --- a/roles/core/node/tasks/install_dir_pkg.yml +++ b/roles/core_install/tasks/install_dir_pkg.yml @@ -35,10 +35,10 @@ - block: - name: install | Copy installation package to node - copy: + synchronize: src: "{{ scale_install_directory_pkg_path }}" dest: "{{ scale_extracted_path }}" - mode: a+x + use_ssh_args: yes - name: install | Set installation package path set_fact: @@ -55,9 +55,21 @@ - name: Import a gpg key from a file rpm_key: state: present - key: "{{ dir_path }}/SpectrumScale_public_key.pgp" - when: ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) + key: "{{ dir_path }}/{{ scale_gpg_key_name }}" + when: + - ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) and scale_enable_gpg_check and scale_version >= "5.0.5.0") + - scale_gpgkey_path is undefined + +- name: Import a gpg key from a file + rpm_key: + state: present + key: "{{ scale_gpgkey_path }}/{{ scale_gpg_key_name }}" + when: + - ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) + and scale_enable_gpg_check and scale_version >= "5.0.5.0") + - scale_gpgkey_path is defined + # # Find GPFS BASE diff --git a/roles/core/node/tasks/install_gplbin.yml b/roles/core_install/tasks/install_gplbin.yml similarity index 74% rename from roles/core/node/tasks/install_gplbin.yml rename to roles/core_install/tasks/install_gplbin.yml index a3a5bfbf..df2468b5 100644 --- a/roles/core/node/tasks/install_gplbin.yml +++ b/roles/core_install/tasks/install_gplbin.yml @@ -15,28 +15,34 @@ when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' - name: install | Configure GPL module repository apt_repository: - name: spectrum-scale-gplbin - description: IBM Spectrum Scale (GPFS) GPL module - baseurl: "{{ scale_install_gplbin_repository_url }}" - gpgcheck: false + filename: spectrum-scale-gplbin + repo: "{{ scale_install_gplbin_repository_url }}" + validate_certs: no state: present + update_cache: yes + codename: IBM Spectrum Scale (GPFS) GPL module + mode: 0777 when: - ansible_pkg_mgr == 'apt' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' - name: install | Configure GPL module repository zypper_repository: name: spectrum-scale-gplbin description: IBM Spectrum Scale (GPFS) GPL module - baseurl: "{{ scale_install_gplbin_repository_url }}" - gpgcheck: false + repo: "{{ scale_install_gplbin_repository_url }}" + disable_gpg_check: yes state: present + overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' # # Add kernel extension prereqs # diff --git a/roles/core/node/tasks/install_license_pkg.yml b/roles/core_install/tasks/install_license_pkg.yml similarity index 100% rename from roles/core/node/tasks/install_license_pkg.yml rename to roles/core_install/tasks/install_license_pkg.yml diff --git a/roles/core/node/tasks/install_license_repository.yml b/roles/core_install/tasks/install_license_repository.yml similarity index 100% rename from roles/core/node/tasks/install_license_repository.yml rename to roles/core_install/tasks/install_license_repository.yml diff --git a/roles/core/node/tasks/install_local_pkg.yml b/roles/core_install/tasks/install_local_pkg.yml similarity index 99% rename from roles/core/node/tasks/install_local_pkg.yml rename to roles/core_install/tasks/install_local_pkg.yml index 8c621053..dd93e952 100644 --- a/roles/core/node/tasks/install_local_pkg.yml +++ b/roles/core_install/tasks/install_local_pkg.yml @@ -110,7 +110,7 @@ - rpm_key: state: present - key: "{{ scale_gpgKey_dest }}SpectrumScale_public_key.pgp" + key: "{{ scale_gpgKey_dest }}{{ scale_gpg_key_name }}" when: ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) and scale_enable_gpg_check and scale_version >= "5.0.5.0") diff --git a/roles/core/node/tasks/install_remote_pkg.yml b/roles/core_install/tasks/install_remote_pkg.yml similarity index 99% rename from roles/core/node/tasks/install_remote_pkg.yml rename to roles/core_install/tasks/install_remote_pkg.yml index f7885fd1..fb0274a4 100644 --- a/roles/core/node/tasks/install_remote_pkg.yml +++ b/roles/core_install/tasks/install_remote_pkg.yml @@ -81,7 +81,7 @@ - rpm_key: state: present - key: "{{ scale_gpgKey_dest }}SpectrumScale_public_key.pgp" + key: "{{ scale_gpgKey_dest }}{{ scale_gpg_key_name }}" when: ((ansible_distribution in scale_sles_distribution or ansible_distribution in scale_rhel_distribution) and scale_enable_gpg_check and scale_version >= "5.0.5.0") diff --git a/roles/core/node/tasks/install_repository.yml b/roles/core_install/tasks/install_repository.yml similarity index 94% rename from roles/core/node/tasks/install_repository.yml rename to roles/core_install/tasks/install_repository.yml index bcb0311d..f04f0865 100644 --- a/roles/core/node/tasks/install_repository.yml +++ b/roles/core_install/tasks/install_repository.yml @@ -17,6 +17,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure apt repository @@ -32,6 +33,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure zypper repository @@ -46,6 +48,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/core/node/tasks/main.yml b/roles/core_install/tasks/main.yml similarity index 100% rename from roles/core/node/tasks/main.yml rename to roles/core_install/tasks/main.yml diff --git a/roles/core/node/tasks/update.yml b/roles/core_install/tasks/update.yml similarity index 100% rename from roles/core/node/tasks/update.yml rename to roles/core_install/tasks/update.yml diff --git a/roles/core/node/tasks/upgrade.yml b/roles/core_install/tasks/upgrade.yml similarity index 91% rename from roles/core/node/tasks/upgrade.yml rename to roles/core_install/tasks/upgrade.yml index f19dc469..40e502c0 100644 --- a/roles/core/node/tasks/upgrade.yml +++ b/roles/core_install/tasks/upgrade.yml @@ -54,6 +54,11 @@ scale_repo_gpfsversion: "{{ package_gpfs_version }}" when: scale_install_repository_url is defined +- set_fact: + scale_vars_update: "{{ ansible_play_hosts| + map('extract', hostvars, 'scale_install_needsupdate')| + list }}" + run_once: true - block: ## run_once: true - name: update | Check if any running node needs to be updated @@ -67,6 +72,6 @@ ###################################################################### assert: that: - - true not in ansible_play_hosts | map('extract', hostvars, 'scale_install_needsupdate') | list + - "{{ 'True' not in scale_vars_update }}" msg: "{{ msg.split('\n') }}" run_once: true diff --git a/roles/core/node/tasks/yum/install.yml b/roles/core_install/tasks/yum/install.yml similarity index 100% rename from roles/core/node/tasks/yum/install.yml rename to roles/core_install/tasks/yum/install.yml diff --git a/roles/core/node/tasks/zypper/install.yml b/roles/core_install/tasks/zypper/install.yml similarity index 100% rename from roles/core/node/tasks/zypper/install.yml rename to roles/core_install/tasks/zypper/install.yml diff --git a/roles/core/node/templates/AddNodeFile.j2 b/roles/core_install/templates/AddNodeFile.j2 similarity index 100% rename from roles/core/node/templates/AddNodeFile.j2 rename to roles/core_install/templates/AddNodeFile.j2 diff --git a/roles/core/node/templates/ChangeFile.j2 b/roles/core_install/templates/ChangeFile.j2 similarity index 100% rename from roles/core/node/templates/ChangeFile.j2 rename to roles/core_install/templates/ChangeFile.j2 diff --git a/roles/core/node/templates/NewNodeFile.j2 b/roles/core_install/templates/NewNodeFile.j2 similarity index 100% rename from roles/core/node/templates/NewNodeFile.j2 rename to roles/core_install/templates/NewNodeFile.j2 diff --git a/roles/core/node/templates/NodeClass.j2 b/roles/core_install/templates/NodeClass.j2 similarity index 100% rename from roles/core/node/templates/NodeClass.j2 rename to roles/core_install/templates/NodeClass.j2 diff --git a/roles/core/node/templates/StanzaFile.j2 b/roles/core_install/templates/StanzaFile.j2 similarity index 100% rename from roles/core/node/templates/StanzaFile.j2 rename to roles/core_install/templates/StanzaFile.j2 diff --git a/roles/core/node/tests/inventory b/roles/core_install/tests/inventory similarity index 100% rename from roles/core/node/tests/inventory rename to roles/core_install/tests/inventory diff --git a/roles/core/node/tests/test.yml b/roles/core_install/tests/test.yml similarity index 100% rename from roles/core/node/tests/test.yml rename to roles/core_install/tests/test.yml diff --git a/roles/core/node/vars/main.yml b/roles/core_install/vars/main.yml similarity index 100% rename from roles/core/node/vars/main.yml rename to roles/core_install/vars/main.yml diff --git a/roles/core_prepare/README.md b/roles/core_prepare/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_prepare/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core/precheck/defaults/main.yml b/roles/core_prepare/defaults/main.yml similarity index 96% rename from roles/core/precheck/defaults/main.yml rename to roles/core_prepare/defaults/main.yml index 336d2e10..05a3dbbe 100644 --- a/roles/core/precheck/defaults/main.yml +++ b/roles/core_prepare/defaults/main.yml @@ -96,6 +96,12 @@ scale_build_gplsrc_prereqs: - kernel-devel - make +## List of optional prereq package to install +scale_prereqs_package: + - numactl + +## List of optional prereq package to install flag +scale_install_prereqs_packages: false ## Default cluster name scale_cluster_clustername: gpfs1.local diff --git a/roles/core/precheck/handlers/main.yml b/roles/core_prepare/handlers/main.yml similarity index 81% rename from roles/core/precheck/handlers/main.yml rename to roles/core_prepare/handlers/main.yml index 4b4ca3a7..8330d9a0 100644 --- a/roles/core/precheck/handlers/main.yml +++ b/roles/core_prepare/handlers/main.yml @@ -23,11 +23,7 @@ ignore_errors: true - name: wait-for-server - wait_for: - host: "{{ ansible_default_ipv4.address }}" - port: 22 - state: started + wait_for_connection: delay: 45 timeout: 300 delegate_to: localhost -# handlers file for precheck diff --git a/roles/core/precheck/meta/main.yml b/roles/core_prepare/meta/main.yml similarity index 67% rename from roles/core/precheck/meta/main.yml rename to roles/core_prepare/meta/main.yml index 65065ffd..017c7c5f 100644 --- a/roles/core/precheck/meta/main.yml +++ b/roles/core_prepare/meta/main.yml @@ -1,22 +1,19 @@ --- galaxy_info: - role_name: core_precheck author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - - common + - ibm.spectrum_scale.core_common diff --git a/roles/core/precheck/tasks/main.yml b/roles/core_prepare/tasks/main.yml similarity index 100% rename from roles/core/precheck/tasks/main.yml rename to roles/core_prepare/tasks/main.yml diff --git a/roles/core/precheck/tasks/prepare.yml b/roles/core_prepare/tasks/prepare.yml similarity index 75% rename from roles/core/precheck/tasks/prepare.yml rename to roles/core_prepare/tasks/prepare.yml index 9fd9a321..dbefa4e5 100644 --- a/roles/core/precheck/tasks/prepare.yml +++ b/roles/core_prepare/tasks/prepare.yml @@ -119,3 +119,39 @@ name: yum-utils state: present when: ansible_pkg_mgr == 'yum' + +- block: ## when: scale_install_prereqs_packages is defined + - name: prepare | Install prerequisite packages + yum: + name: "{{ scale_prereqs_package }}" + state: present + disable_excludes: all + when: ansible_pkg_mgr == 'yum' + register: scale_gpl_yum_result + retries: 10 + until: scale_gpl_yum_result is success + delay: 20 + + - name: prepare | Install prerequisite packages + dnf: + name: "{{ scale_prereqs_package }}" + state: present + disable_excludes: all + when: ansible_pkg_mgr == 'dnf' + register: scale_gpl_dnf_result + retries: 10 + until: scale_gpl_dnf_result is success + delay: 20 + + - name: prepare | Install prerequisite packages + apt: + name: "{{ scale_prereqs_package }}" + state: present + when: ansible_pkg_mgr == 'apt' + + - name: prepare | Install prerequisite packages + zypper: + name: "{{ scale_prereqs_package }}" + state: present + when: ansible_pkg_mgr == 'zypper' + when: scale_install_prereqs_packages | bool diff --git a/roles/core/postcheck/tests/inventory b/roles/core_prepare/tests/inventory similarity index 100% rename from roles/core/postcheck/tests/inventory rename to roles/core_prepare/tests/inventory diff --git a/roles/core/postcheck/tests/test.yml b/roles/core_prepare/tests/test.yml similarity index 100% rename from roles/core/postcheck/tests/test.yml rename to roles/core_prepare/tests/test.yml diff --git a/roles/core/precheck/vars/main.yml b/roles/core_prepare/vars/main.yml similarity index 100% rename from roles/core/precheck/vars/main.yml rename to roles/core_prepare/vars/main.yml diff --git a/roles/core_upgrade/README.md b/roles/core_upgrade/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_upgrade/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core/upgrade/defaults/main.yml b/roles/core_upgrade/defaults/main.yml similarity index 100% rename from roles/core/upgrade/defaults/main.yml rename to roles/core_upgrade/defaults/main.yml diff --git a/roles/core/upgrade/handlers/main.yml b/roles/core_upgrade/handlers/main.yml similarity index 100% rename from roles/core/upgrade/handlers/main.yml rename to roles/core_upgrade/handlers/main.yml diff --git a/roles/core_upgrade/meta/main.yml b/roles/core_upgrade/meta/main.yml new file mode 100644 index 00000000..017c7c5f --- /dev/null +++ b/roles/core_upgrade/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common diff --git a/roles/core/upgrade/tasks/apt/install.yml b/roles/core_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/core/upgrade/tasks/apt/install.yml rename to roles/core_upgrade/tasks/apt/install.yml diff --git a/roles/core/upgrade/tasks/build.yml b/roles/core_upgrade/tasks/build.yml similarity index 94% rename from roles/core/upgrade/tasks/build.yml rename to roles/core_upgrade/tasks/build.yml index 1583cec5..b51ab046 100644 --- a/roles/core/upgrade/tasks/build.yml +++ b/roles/core_upgrade/tasks/build.yml @@ -29,8 +29,7 @@ # - name: build | Compile GPL module shell: export LINUX_DISTRIBUTION={{ scale_build_distribution }} ; /usr/lpp/mmfs/bin/mmbuildgpl --quiet - args: - creates: /lib/modules/{{ ansible_kernel }}/extra/mmfs26.ko + register: scale_build_gpl - name: build | Stat GPL module stat: diff --git a/roles/core/upgrade/tasks/finalize.yml b/roles/core_upgrade/tasks/finalize.yml similarity index 100% rename from roles/core/upgrade/tasks/finalize.yml rename to roles/core_upgrade/tasks/finalize.yml diff --git a/roles/core/upgrade/tasks/install.yml b/roles/core_upgrade/tasks/install.yml similarity index 100% rename from roles/core/upgrade/tasks/install.yml rename to roles/core_upgrade/tasks/install.yml diff --git a/roles/core/upgrade/tasks/install_dir_pkg.yml b/roles/core_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/core/upgrade/tasks/install_dir_pkg.yml rename to roles/core_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/core/upgrade/tasks/install_gplbin.yml b/roles/core_upgrade/tasks/install_gplbin.yml similarity index 90% rename from roles/core/upgrade/tasks/install_gplbin.yml rename to roles/core_upgrade/tasks/install_gplbin.yml index a9660c46..7b43d86c 100644 --- a/roles/core/upgrade/tasks/install_gplbin.yml +++ b/roles/core_upgrade/tasks/install_gplbin.yml @@ -15,6 +15,7 @@ when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' - name: upgrade | Configure GPL module repository apt_repository: @@ -26,6 +27,7 @@ when: - ansible_pkg_mgr == 'apt' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' - name: upgrade | Configure GPL module repository zypper_repository: @@ -37,6 +39,7 @@ when: - ansible_pkg_mgr == 'zypper' - scale_install_gplbin_repository_url is defined + - scale_install_gplbin_repository_url != 'existing' # # Add kernel extension prereqs # diff --git a/roles/core/upgrade/tasks/install_license_pkg.yml b/roles/core_upgrade/tasks/install_license_pkg.yml similarity index 100% rename from roles/core/upgrade/tasks/install_license_pkg.yml rename to roles/core_upgrade/tasks/install_license_pkg.yml diff --git a/roles/core/upgrade/tasks/install_license_repository.yml b/roles/core_upgrade/tasks/install_license_repository.yml similarity index 100% rename from roles/core/upgrade/tasks/install_license_repository.yml rename to roles/core_upgrade/tasks/install_license_repository.yml diff --git a/roles/core/upgrade/tasks/install_local_pkg.yml b/roles/core_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/core/upgrade/tasks/install_local_pkg.yml rename to roles/core_upgrade/tasks/install_local_pkg.yml diff --git a/roles/core/upgrade/tasks/install_remote_pkg.yml b/roles/core_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/core/upgrade/tasks/install_remote_pkg.yml rename to roles/core_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/core/upgrade/tasks/install_repository.yml b/roles/core_upgrade/tasks/install_repository.yml similarity index 96% rename from roles/core/upgrade/tasks/install_repository.yml rename to roles/core_upgrade/tasks/install_repository.yml index 8cc53aed..131dc5c0 100644 --- a/roles/core/upgrade/tasks/install_repository.yml +++ b/roles/core_upgrade/tasks/install_repository.yml @@ -20,6 +20,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure apt repository @@ -35,6 +36,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure zypper repository @@ -49,6 +51,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/core/upgrade/tasks/main.yml b/roles/core_upgrade/tasks/main.yml similarity index 100% rename from roles/core/upgrade/tasks/main.yml rename to roles/core_upgrade/tasks/main.yml diff --git a/roles/core/upgrade/tasks/yum/install.yml b/roles/core_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/core/upgrade/tasks/yum/install.yml rename to roles/core_upgrade/tasks/yum/install.yml diff --git a/roles/core/upgrade/tasks/zypper/install.yml b/roles/core_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/core/upgrade/tasks/zypper/install.yml rename to roles/core_upgrade/tasks/zypper/install.yml diff --git a/roles/core/precheck/tests/inventory b/roles/core_upgrade/tests/inventory similarity index 100% rename from roles/core/precheck/tests/inventory rename to roles/core_upgrade/tests/inventory diff --git a/roles/core/precheck/tests/test.yml b/roles/core_upgrade/tests/test.yml similarity index 100% rename from roles/core/precheck/tests/test.yml rename to roles/core_upgrade/tests/test.yml diff --git a/roles/core/upgrade/vars/main.yml b/roles/core_upgrade/vars/main.yml similarity index 100% rename from roles/core/upgrade/vars/main.yml rename to roles/core_upgrade/vars/main.yml diff --git a/roles/core_verify/README.md b/roles/core_verify/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/core_verify/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/core/postcheck/defaults/main.yml b/roles/core_verify/defaults/main.yml similarity index 100% rename from roles/core/postcheck/defaults/main.yml rename to roles/core_verify/defaults/main.yml diff --git a/roles/core/postcheck/handlers/main.yml b/roles/core_verify/handlers/main.yml similarity index 100% rename from roles/core/postcheck/handlers/main.yml rename to roles/core_verify/handlers/main.yml diff --git a/roles/core_verify/meta/main.yml b/roles/core_verify/meta/main.yml new file mode 100644 index 00000000..9cd697a4 --- /dev/null +++ b/roles/core_verify/meta/main.yml @@ -0,0 +1,18 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + + galaxy_tags: [] + +dependencies: [] diff --git a/roles/core/postcheck/tasks/main.yml b/roles/core_verify/tasks/main.yml similarity index 100% rename from roles/core/postcheck/tasks/main.yml rename to roles/core_verify/tasks/main.yml diff --git a/roles/core/upgrade/tests/inventory b/roles/core_verify/tests/inventory similarity index 100% rename from roles/core/upgrade/tests/inventory rename to roles/core_verify/tests/inventory diff --git a/roles/core/upgrade/tests/test.yml b/roles/core_verify/tests/test.yml similarity index 100% rename from roles/core/upgrade/tests/test.yml rename to roles/core_verify/tests/test.yml diff --git a/roles/core/postcheck/vars/main.yml b/roles/core_verify/vars/main.yml similarity index 100% rename from roles/core/postcheck/vars/main.yml rename to roles/core_verify/vars/main.yml diff --git a/roles/custom_module/defaults/main.yml b/roles/custom_module/defaults/main.yml deleted file mode 100644 index 1ca33279..00000000 --- a/roles/custom_module/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# Default variables for the IBM Spectrum Scale (GPFS) custom module - diff --git a/roles/custom_module/inventory/hosts b/roles/custom_module/inventory/hosts deleted file mode 100644 index 74d91ba1..00000000 --- a/roles/custom_module/inventory/hosts +++ /dev/null @@ -1,28 +0,0 @@ -[scale_cluster] -node1.domain.com -node2.domain.com -node3.doamin.com -node4.domain.com - -[controller] -node1.domain.com - -[quorum_nodes] -node1.domain.com -node2.domain.com -node3.doamin.com - -[manager_nodes] -node1.domain.com -node2.domain.com - -[test_remove_storage_nodes] -node3.doamin.com filesystem="FS1" nsds="nsd3;nsd7" -node4.domain.com filesystem="FS1" nsds="nsd4;nsd8" - -[test_remove_nodes] -node3.doamin.com -node4.domain.com - -[test_add_nodes] -node3.doamin.com designation=client diff --git a/roles/custom_module/library/__init.py__ b/roles/custom_module/library/__init.py__ deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/custom_module/library/ibm_spectrumscale_cluster.py b/roles/custom_module/library/ibm_spectrumscale_cluster.py deleted file mode 100644 index 72f0b50b..00000000 --- a/roles/custom_module/library/ibm_spectrumscale_cluster.py +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -ANSIBLE_METADATA = { - 'status': ['preview'], - 'supported_by': 'IBM', - 'metadata_version': '1.0' - } - - -DOCUMENTATION = ''' ---- -module: ibm_spectrumscale_cluster -short_description: IBM Spectrum Scale Cluster Management -version_added: "0.0" - -description: - - This module can be used to create or delete an IBM Spectrum Scale - Cluster or retrieve information about the cluster. - -options: - op: - description: - - An operation to execute on the IBM Spectrum Scale Cluster. - Mutually exclusive with the state operand. - required: false - state: - description: - - The desired state of the cluster. - required: false - default: "present" - choices: [ "present", "absent" ] - stanza: - description: - - Cluster blueprint that defines membership and node attributes - required: false - name: - description: - - The name of the cluster to be created, deleted or whose - information is to be retrieved - required: false - -''' - -EXAMPLES = ''' -# Retrive information about an existing IBM Spectrum Scale cluster -- name: Retrieve IBM Spectrum Scale Cluster information - ibm_spectrumscale_cluster: - op: list - -# Create a new IBM Spectrum Scale Cluster -- name: Create an IBM Spectrum Scale Cluster - ibm_spectrumscale_cluster: - state: present - stanza: "/tmp/stanza" - name: "node1.domain.com" - -# Delete an existing IBM Spectrum Scale Cluster -- name: Delete an IBM Spectrum Scale Cluster - ibm_spectrumscale_cluster: - state: absent - name: "node1.domain.com" -''' - -RETURN = ''' -changed: - description: A boolean indicating if the module has made changes - type: boolean - returned: always - -msg: - description: The output from the cluster create/delete operations - type: str - returned: when supported - -rc: - description: The return code from the IBM Spectrum Scale mm command - type: int - returned: always - -results: - description: The JSON document containing the cluster information - type: str - returned: when supported -''' - -import os -import json -import sys -import traceback -from ansible.module_utils.basic import AnsibleModule - -try: - from ansible.module_utils.ibm_spectrumscale_utils import RC_SUCCESS, SpectrumScaleLogger -except: - from ibm_spectrumscale_utils import RC_SUCCESS, SpectrumScaleLogger - -try: - from ansible.module_utils.ibm_spectrumscale_cluster_utils import SpectrumScaleCluster -except: - from ibm_spectrumscale_cluster_utils import SpectrumScaleCluster - - -def main(): - logger = SpectrumScaleLogger.get_logger() - - logger.debug("------------------------------------") - logger.debug("Function Entry: ibm_spectrumscale_cluster.main()") - logger.debug("------------------------------------") - - # Setup the module argument specifications - scale_arg_spec = dict( - op = dict( - type='str', - choices=['get'], - required=False - ), - state = dict( - type='str', - choices=['present', 'absent'], - required=False - ), - stanza = dict( - type='str', - required=False - ), - name = dict( - type='str', - required=False - ) - ) - - - scale_req_if_args = [ - [ "state", "present", [ "stanza", "name" ] ], - [ "state", "absent", [ "name" ] ] - ] - - scale_req_one_of_args = [ - [ "op", "state" ] - ] - - # Instantiate the Ansible module with the given argument specifications - module = AnsibleModule( - argument_spec=scale_arg_spec, - required_one_of=scale_req_one_of_args, - required_if=scale_req_if_args - ) - - rc = RC_SUCCESS - msg = result_json = "" - state_changed = False - if module.params['op'] and "get" in module.params['op']: - # Retrieve the IBM Spectrum Scale cluster information - try: - scale_cluster = SpectrumScaleCluster() - cluster_info_dict = {} - cluster_info_dict["cluster_info"] = scale_cluster.get_cluster_dict() - result_json = json.dumps(cluster_info_dict) - msg = "Retrieve Cluster information successfully executed" - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - elif module.params['state']: - if "present" in module.params['state']: - # Create a new IBM Spectrum Scale cluster - try: - cmd_rc, stdout = SpectrumScaleCluster.create_cluster( - module.params['name'], - module.params['stanza'] - ) - rc = cmd_rc - msg = "Create Cluster successfully executed" - result_json = stdout - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - else: - # Delete the existing IBM Spectrum Scale cluster - try: - cmd_rc, stdout = SpectrumScaleCluster.delete_cluster( - module.params['name'] - ) - rc = cmd_rc - msg = "Delete Cluster successfully executed" - result_json = stdout - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - - - if rc == RC_SUCCESS: - state_changed = True - - logger.debug("------------------------------------") - logger.debug("Function Exit: ibm_spectrumscale_cluster.main()") - logger.debug("------------------------------------") - - SpectrumScaleLogger.shutdown() - - # Module is done. Return back the result - module.exit_json(changed=state_changed, msg=msg, rc=rc, result=result_json) - - -if __name__ == '__main__': - main() diff --git a/roles/custom_module/library/ibm_spectrumscale_filesystem.py b/roles/custom_module/library/ibm_spectrumscale_filesystem.py deleted file mode 100644 index 5bac8e40..00000000 --- a/roles/custom_module/library/ibm_spectrumscale_filesystem.py +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -ANSIBLE_METADATA = { - 'status': ['preview'], - 'supported_by': 'IBM', - 'metadata_version': '1.0' - } - - -DOCUMENTATION = ''' ---- -module: ibm_spectrumscale_filesystem -short_description: IBM Spectrum Scale Filesystem Management -version_added: "0.0" - -description: - - This module can be used to create or delete an IBM Spectrum Scale - filesystem or retrieve information about the filesystem. - -options: - op: - description: - - An operation to execute on the IBM Spectrum Scale filesystem. - Mutually exclusive with the state operand. - required: false - state: - description: - - The desired state of the filesystem. - required: false - default: "present" - choices: [ "present", "absent" ] - stanza: - description: - - Filesystem blueprint that defines membership and NSD attributes - required: false - name: - description: - - The name of the filesystem to be created, deleted or whose - information is to be retrieved - required: false - block_size: - description: - - The filesystem blocksize - required: false - default_metadata_replicas: - description: - - The filesystem defaultMetadataReplicas - required: false - default_data_replicas: - description: - - The filesystem defaultDataReplicas - required: false - num_nodes: - description: - - The filesystem numNodes - required: false - automatic_mount_option: - description: - - The filesystem automaticMountOption - required: false - default_mount_point: - description: - - The filesystem defaultMountPoint - required: false - -''' - -EXAMPLES = ''' -# Retrive information about an existing IBM Spectrum Scale filesystem -- name: Retrieve IBM Spectrum Scale filesystem information - ibm_spectrumscale_filesystem: - op: get - -# Create a new IBM Spectrum Scale Filesystem -- name: Create an IBM Spectrum Scale filesystem - ibm_spectrumscale_filesystem: - state: present - stanza: "/tmp/filesystem-stanza" - name: "FS1" - -# Delete an existing IBM Spectrum Scale Filesystem -- name: Delete an IBM Spectrum Scale filesystem - ibm_spectrumscale_filesystem: - state: absent - name: "FS1" -''' - -RETURN = ''' -changed: - description: A boolean indicating if the module has made changes - type: boolean - returned: always - -msg: - description: The output from the filesystem create/delete operations - type: str - returned: when supported - -rc: - description: The return code from the IBM Spectrum Scale mm command - type: int - returned: always - -results: - description: The JSON document containing the filesystem information - type: str - returned: when supported -''' - -import json -import sys -import traceback -from ansible.module_utils.basic import AnsibleModule - -try: - from ansible.module_utils.ibm_spectrumscale_utils import RC_SUCCESS, SpectrumScaleLogger -except: - from ibm_spectrumscale_utils import RC_SUCCESS, SpectrumScaleLogger - -try: - from ansible.module_utils.ibm_spectrumscale_filesystem_utils import SpectrumScaleFS -except: - from ibm_spectrumscale_filesystem_utils import SpectrumScaleFS - - -def main(): - logger = SpectrumScaleLogger.get_logger() - - logger.debug("---------------------------------------") - logger.debug("Function Entry: ibm_spectrumscale_filesystem.main()") - logger.debug("---------------------------------------") - - # Setup the module argument specifications - scale_arg_spec = dict( - op = dict( - type='str', - choices=['get'], - required=False - ), - state = dict( - type='str', - choices=['present', 'absent'], - required=False - ), - stanza = dict( - type='str', - required=False - ), - name = dict( - type='str', - required=False - ), - block_size = dict( - type='str', - required=False - ), - num_nodes = dict( - type='str', - required=False - ), - default_metadata_replicas = dict( - type='str', - required=False - ), - default_data_replicas = dict( - type='str', - required=False - ), - automatic_mount_option = dict( - type='str', - required=False - ), - default_mount_point = dict( - type='str', - required=False - ) - ) - - - scale_req_if_args = [ - [ "state", "present", [ "stanza", - "name", - "block_size", - "num_nodes", - "default_metadata_replicas", - "default_data_replicas", - "automatic_mount_option", - "default_mount_point" ] - ], - [ "state", "absent", [ "name" ] ] - ] - - scale_req_one_of_args = [ - [ "op", "state" ] - ] - - # Instantiate the Ansible module with the given argument specifications - module = AnsibleModule( - argument_spec=scale_arg_spec, - required_one_of=scale_req_one_of_args, - required_if=scale_req_if_args - ) - - rc = RC_SUCCESS - msg = result_json = "" - state_changed = False - if module.params['op'] and "get" in module.params['op']: - # Retrieve the IBM Spectrum Scale filesystem information - try: - result_dict = {} - filesystem_list = [] - - filesystems = SpectrumScaleFS.get_filesystems() - for fs in filesystems: - filesystem_info = {} - filesystem_info["deviceName"] = fs.get_device_name() - filesystem_info["properties"] = fs.get_properties_list() - filesystem_list.append(filesystem_info) - - result_dict["filesystems"] = filesystem_list - result_json = json.dumps(result_dict) - - msg = "Successfully retrieved filesystem information" - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - elif module.params['state']: - if "present" in module.params['state']: - # Create a new IBM Spectrum Scale cluster - try: - rc, result_json = SpectrumScaleFS.create_filesystem( - module.params['stanza'], - module.params['name'], - module.params["block_size"], - module.params["num_nodes"], - module.params["default_metadata_replicas"], - module.params["default_data_replicas"], - module.params["automatic_mount_option"], - module.params["default_mount_point"] - ) - msg = "Successfully created filesystem" - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - else: - # Delete the existing IBM Spectrum Scale cluster - try: - rc, result_json = SpectrumScaleFS.delete_filesystem( - module.params['name'] - ) - msg = "Successfully deleted filesystem" - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - module.fail_json(msg=e_msg) - - if rc == RC_SUCCESS: - state_changed = True - - logger.debug("---------------------------------------") - logger.debug("Function Exit: ibm_spectrumscale_filesystem.main()") - logger.debug("---------------------------------------") - - logger = SpectrumScaleLogger.shutdown() - - # Module is done. Return back the result - module.exit_json(changed=state_changed, msg=msg, rc=rc, result=result_json) - - -if __name__ == '__main__': - main() diff --git a/roles/custom_module/library/ibm_spectrumscale_node.py b/roles/custom_module/library/ibm_spectrumscale_node.py deleted file mode 100644 index 4a47e327..00000000 --- a/roles/custom_module/library/ibm_spectrumscale_node.py +++ /dev/null @@ -1,926 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -ANSIBLE_METADATA = { - 'status': ['preview'], - 'supported_by': 'IBM', - 'metadata_version': '1.0' - } - - -DOCUMENTATION = ''' ---- -module: ibm_spectrumscale_node -short_description: IBM Spectrum Scale Node Management -version_added: "0.1" - -description: - - This module can be used to add, remove or retrieve information - about an IBM Spectrum Scale Node(s) from the Cluster. - -options: - op: - description: - - An operation to execute on the IBM Spectrum Scale Node. - Mutually exclusive with the state operand. - required: false - state: - description: - - The desired state of the Node in relation to the cluster. - required: false - default: "present" - choices: [ "present", "absent" ] - nodefile: - description: - - Blueprint that defines all node attributes - required: false - name: - description: - - The name of the Node to be added, removed or whose - information is to be retrieved - required: false - -''' - -EXAMPLES = ''' -# Retrive information about an existing IBM Spectrum Scale Node(s) -- name: Retrieve IBM Spectrum Scale Node information - ibm_spectrumscale_node: - op: list - -# Adds a Node to the IBM Spectrum Scale Cluster -- name: Add node to IBM Spectrum Scale Cluster - ibm_spectrumscale_node: - state: present - nodefile: "/tmp/nodefile" - name: "node1.domain.com" - -# Delete an existing IBM Spectrum Node from the Cluster -- name: Delete an IBM Spectrum Scale Node from Cluster - ibm_spectrumscale_node: - state: absent - name: "node1.domain.com" -''' - -RETURN = ''' -changed: - description: A boolean indicating if the module has made changes - type: boolean - returned: always - -msg: - description: The output from the cluster create/delete operations - type: str - returned: when supported - -rc: - description: The return code from the IBM Spectrum Scale mm command - type: int - returned: always - -results: - description: The JSON document containing the cluster information - type: str - returned: when supported -''' - -import os -import re -import sys -import json -import time -import logging -import traceback -from ansible.module_utils.basic import AnsibleModule - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, RC_SUCCESS, \ - parse_aggregate_cmd_output, \ - SpectrumScaleLogger, \ - SpectrumScaleException -except Exception as e: - print(e) - from ibm_spectrumscale_utils import runCmd, RC_SUCCESS, parse_aggregate_cmd_output, \ - SpectrumScaleLogger, SpectrumScaleException - -try: - from ansible.module_utils.ibm_spectrumscale_disk_utils import SpectrumScaleDisk -except Exception as e: - print(e) - from ibm_spectrumscale_disk_utils import SpectrumScaleDisk - -try: - from ansible.module_utils.ibm_spectrumscale_df_utils import SpectrumScaleDf -except: - from ibm_spectrumscale_df_utils import SpectrumScaleDf - -try: - from ansible.module_utils.ibm_spectrumscale_nsd_utils import SpectrumScaleNSD -except: - from ibm_spectrumscale_nsd_utils import SpectrumScaleNSD - -try: - from ansible.module_utils.ibm_spectrumscale_filesystem_utils import SpectrumScaleFS -except: - from ibm_spectrumscale_filesystem_utils import SpectrumScaleFS - -try: - from ansible.module_utils.ibm_spectrumscale_cluster_utils import SpectrumScaleCluster, \ - SpectrumScaleNode -except: - from ibm_spectrumscale_cluster_utils import SpectrumScaleCluster, SpectrumScaleNode - -try: - from ansible.module_utils.ibm_spectrumscale_zimon_utils import get_zimon_collectors -except: - from ibm_spectrumscale_zimon_utils import get_zimon_collectors - -############################################################################### -## ## -## Helper Functions ## -## ## -############################################################################### - -def get_all_nsds_of_node(logger, instance): - """ - This function performs "mmlsnsd -X -Y". - Args: - instance (str): instance for which disks are use by filesystem. - region (str): Region of operation - Returns: - all_disk_names (list): Disk names in list format. - Ex: [nsd_1a_1_0, nsd_1c_1_0, nsd_1c_d_1] - """ - logger.debug("Function Entry: get_all_nsds_of_node. " - "Args: instance={0}".format(instance)) - nsd_list = [] - nsd_list = SpectrumScaleNSD.get_all_nsd_info() - - all_nsd_names = [] - for nsd in nsd_list: - if nsd.get_remarks() == 'server node' and instance in nsd.get_server_list(): - all_nsd_names.append(nsd.get_name()) - - logger.debug("Function Exit: get_all_nsds_of_node(). " - "Return Params: all_nsd_names={0} ".format(all_nsd_names)) - - return all_nsd_names - - -def gpfs_df_disk(logger, fs_name): - """ - This function performs "mmdf" to obtain disk capacities. - Args: - fs_name (str): Filesystem name associated with the disks. - Returns: - disk_size_map (dict): Disk name vs. free block size vs. percent - free blocks. - Ex: { - 'nsd_1a_1_0': {'free_size': 10485760, - 'used_size': 480256, - 'percent': 95}, - 'nsd_1c_1_0': {'free_size': 10485760, - 'used_size': 480256, - 'percent': 95} - } - """ - logger.debug("Function Entry: gpfs_df_disk(). " - "Args: fs_name={0}".format(fs_name)) - - nsd_df_list = SpectrumScaleDf.get_df_info(fs_name) - disk_size_map = {} - for nsd_df in nsd_df_list: - total = nsd_df.get_disk_size() - free = nsd_df.get_free_blocks() - used = total - free - free_block_pct = nsd_df.get_free_blocks_pct() - disk = nsd_df.get_nsd_name() - disk_size_map[disk] = { - 'free_size': free, - 'used_size': used, - 'percent': free_block_pct - } - - logger.debug("Function Exit: gpfs_df_disk(). " - "Return Params: disk_size_map={0} ".format(disk_size_map)) - - return disk_size_map - - -def get_node_nsd_info(logger): - logger.debug("Function Entry: get_node_nsd_info().") - - nsd_list = SpectrumScaleNSD.get_all_nsd_info() - - node_nsd_map = {} - nsd_node_map = {} - - for nsd in nsd_list: - if nsd.get_remarks() == 'server node': - # Populate the node_nsd_map data structure - nsd_list = [] - for node_name in nsd.get_server_list(): - if node_name in list(node_nsd_map.keys()): - nsd_list = node_nsd_map[node_name] - nsd_list.append(nsd.get_name()) - node_nsd_map[node_name] = nsd_list - - # Populate the nsd_node_map data structure - host_list = [] - if nsd.get_name() in list(nsd_node_map.keys()): - host_list = nsd_node_map[nsd.get_name()] - for server in nsd.get_server_list(): - host_list.append(server) - nsd_node_map[nsd.get_name()] = host_list - - logger.debug("Function Exit: get_node_nsd_info(). " - "Return Params: node_nsd_map={0} " - "nsd_node_map={1}".format(node_nsd_map, nsd_node_map)) - - return node_nsd_map, nsd_node_map - - -############################################################################### -## ## -## Functions to remove node(s) from cluster ## -## ## -############################################################################### - -# -# Retrieve the mapping of Filesystems to NSDs -# -# Returns: -# fs_to_nsd_map (dict): Dict of fs names and SpectrumScaleDisk objects -# -def get_filesystem_to_nsd_mapping(logger): - logger.debug("Function Entry: get_filesystem_to_nsd_mapping().") - - fs_to_nsd_map = {} - - # Retrieve all filesystems on this cluster - fs_instance_list = SpectrumScaleFS.get_filesystems() - - # For each filesystem, determine the Filesystem to NSD mapping - for fs in fs_instance_list: - - # Get all NSDs for this Filesystem - nsds_for_fs = SpectrumScaleDisk.get_all_disk_info(fs.get_device_name()) - - for nsd in nsds_for_fs: - nsd_list = [] - - # If an entry already exists for the File system, then - # simply add the new NSD to the list - if fs.get_device_name() in list(fs_to_nsd_map.keys()): - nsd_list = fs_to_nsd_map[fs.get_device_name()] - - nsd_list.append(nsd) - fs_to_nsd_map[fs.get_device_name()] = nsd_list - - logger.debug("Function Exit: get_filesystem_to_nsd_mapping(). " - "Return Params: fs_to_nsd_map={0} ".format(fs_to_nsd_map)) - - return fs_to_nsd_map - - -def check_cluster_health(logger): - logger.debug("Function Entry: check_cluster_health(). ") - - unhealthy_nodes = [] - all_nodes_state = SpectrumScaleNode.get_state() - - for node_name, state in list(all_nodes_state.items()): - if ("down" in state or - "arbitrating" in state or - "unknown" in state): - unhealthy_nodes.append(node_name) - - if unhealthy_nodes: - unhealthy_nodes_str = ' '.join(map(str, unhealthy_nodes)) - error_msg = ("The following node(s) \"{0}\" is(are) currently not up. " - "Ensure all nodes in the cluster are fully operational " - "before retrying the operation.".format(unhealthy_nodes_str)) - logger.error(error_msg) - raise SpectrumScaleException(error_msg, "", [], -1, "", "") - - logger.debug("Function Exit: check_cluster_health(). ") - - -def check_nodes_exist(logger, nodes_to_be_deleted): - logger.debug("Function Entry: check_nodes_exist(). " - "Args: nodes_to_be_deleted={0}".format(nodes_to_be_deleted)) - - logger.info("Checking if node(s) marked for removal exist in the cluster") - filtered_nodes_to_be_deleted = [] - existing_node_list = SpectrumScaleCluster().get_nodes() - for node_to_del in nodes_to_be_deleted: - for existing_node in existing_node_list: - if (node_to_del in existing_node.get_daemon_node_name() or - node_to_del in existing_node.get_admin_node_name() or - node_to_del in existing_node.get_ip_address()): - filtered_nodes_to_be_deleted.append(existing_node) - - logger.debug("Function Exit: check_nodes_exist(). " - "Return Params: filtered_nodes_to_be_deleted=" - "{0} ".format(filtered_nodes_to_be_deleted)) - - return filtered_nodes_to_be_deleted - - -def check_roles_before_delete(logger, existing_node_list_to_del): - logger.debug("Function Entry: check_roles_before_delete(). " - "Args: existing_node_list_to_del=" - "{0}".format(existing_node_list_to_del)) - - logger.info("Checking the designations for all nodes marked for removal") - - for node_to_del in existing_node_list_to_del: - # Do not delete nodes that are designated as "quorum", "manager", - # "gateway", "ces", "TCT", "SNMP" - if (node_to_del.is_quorum_node() or - node_to_del.is_manager_node() or - node_to_del.is_gateway_node() or - node_to_del.is_ces_node() or - node_to_del.is_tct_node() or - node_to_del.is_snmp_node()): - exp_msg = ("Cannot remove node {0} since it is designated " - "as either a quorum, gateway, CES, TCT or SNMP " - "node. Re-run the current command without " - "{1}".format(node_to_del.get_admin_node_name(), - node_to_del.get_admin_node_name())) - logger.error(exp_msg) - raise SpectrumScaleException(exp_msg, "", [], -1, "", "") - - # TODO: Should we also check the Zimon Collector Nodes - # zimon_col_nodes = get_zimon_collectors() - - logger.debug("Function Exit: check_roles_before_delete().") - - -def check_disk_health(logger, fs_nsd_map): - logger.debug("Function Entry: check_disk_health(). " - "Args fs_nsd_map={0}".format(fs_nsd_map)) - - unhealthy_disks = [] - for fs_name, disk_list in list(fs_nsd_map.items()): - for disk in disk_list: - if "down" in disk.get_availability(): - unhealthy_disks.append(disk.get_nsd_name()) - - if unhealthy_disks: - unhealthy_disks_str = ' '.join(map(str, unhealthy_disks)) - error_msg = ("The following disks \"{0}\" are currently not healthy. " - "Ensure all disks in the cluster are healthy before " - "retrying the operation.".format(unhealthy_disks_str)) - logger.error(error_msg) - raise SpectrumScaleException(error_msg, "", [], -1, "", "") - - logger.debug("Function Exit: check_disk_health(). ") - - -def remove_multi_attach_nsd(logger, nodes_to_be_deleted): - logger.debug("Function Entry: remove_multi_attach_nsd(). " - "Args nodes_to_be_deleted={0}".format(nodes_to_be_deleted)) - - logger.info("Checking node(s) for multi-node attached NSD(s)") - - # Iterate through each server to be deleted - node_map, nsd_map = get_node_nsd_info(logger) - for node_to_delete in nodes_to_be_deleted: - logger.debug("Processing all NSDs on node={0} for " - "removal".format(node_to_delete.get_admin_node_name())) - #node_map, nsd_map = get_node_nsd_info(logger) - - # Check if the node to be deleted has access to any NSDs - #if node_to_delete in node_map.keys(): - if node_to_delete.get_admin_node_name() in list(node_map.keys()): - nsds_to_delete_list = node_map[node_to_delete.get_admin_node_name()] - - # For each Node, check all the NSDS it has access to. If the - # Node has access to an NSD that can also be accessed from other - # NSD servers, then we can simply modify the server access list - # through the mmchnsd command - for nsd_to_delete in nsds_to_delete_list: - # Clone list to avoid modifying original content - nsd_attached_to_nodes = (nsd_map[nsd_to_delete])[:] - nsd_attached_to_nodes.remove(node_to_delete.get_admin_node_name()) - if len(nsd_attached_to_nodes) >= 1: - # This node has access to an NSD, that can also be - # accessed by other NSD servers. Therefore modify the - # server access list - logger.info("Removing server access to NSD {0} from node " - "{1}".format(nsd_to_delete, - node_to_delete.get_admin_node_name())) - SpectrumScaleNSD.remove_server_access_to_nsd(nsd_to_delete, - node_to_delete.get_admin_node_name(), - nsd_attached_to_nodes) - - # All "mmchnsd" calls are asynchronous. Therefore wait here till all - # modifications are committed before proceeding further. For now just - # sleep but we need to enhance this to ensure the async op has completed - time.sleep(10) - - logger.debug("Function Exit: remove_multi_attach_nsd(). ") - - -# -# This function performs removal / termination of nodes from the IBM Spectrum -# Scale cluster. If the node is a server node that has access to NSD(s), then -# we attempt to remove access to this NSD (if the NSD is a shared NSD) or -# delete access to it (if its a dedicated NSD). -# -# Args: -# node_names_to_delete: Nodes to be deleted from the cluster -# -# Return: -# rc: Return code -# msg: Output message -def remove_nodes(logger, node_names_to_delete): - logger.debug("Function Entry: remove_nodes(). " - "Args: node_list={0}".format(node_names_to_delete)) - - rc = RC_SUCCESS - msg = result_json = "" - removed_node_list = [] - - logger.info("Attempting to remove node(s) {0} from the " - "cluster".format(' '.join(map(str, node_names_to_delete)))) - - # TODO: The cluster health check should only fail if we are attempting - # to remove NSD servers while other NSD servers are down. The - # removal of compute nodes should be permitted even if NSD - # servers are down. For now disable check until correct algorithm - # can be implemented - # Ensure all nodes in the cluster are healthy - #check_cluster_health(logger) - - # Check that the list of nodes to delete already exist. If not, - # simply ignore - nodes_to_delete = check_nodes_exist(logger, node_names_to_delete) - - if len(nodes_to_delete) == 0: - msg = str("All node(s) marked for removal ({0}) are already not part " - "of the cluster".format(' '.join(map(str, - node_names_to_delete)))) - logger.info(msg) - return rc, msg, result_json - - # Precheck nodes to make sure they do not have any roles that should - # not be deleted - check_roles_before_delete(logger, nodes_to_delete) - - # For each Filesystem, Get the Filesystem to NSD (disk) mapping - fs_nsd_map = get_filesystem_to_nsd_mapping(logger) - - # TODO: The disk health check should only fail if we are attempting - # to remove NSD servers when any disks are down. The removal - # of compute nodes should be permitted even if disks are down. - # For now disable check until correct algorithm can be implemented - #check_disk_health(logger, fs_nsd_map) - - # An NSD node can have access to a multi attach NSD (shared NSD) or - # dedicated access to the NSD (FPO model) or a combination of both. - - # First modify the Shared NSDs and remove access to all NSD Nodes - # that are to be deleted. Note: As long as these are Shared NSD's - # another NSD server will continue to have access to the NSD (and - # therefore Data) - remove_multi_attach_nsd(logger, nodes_to_delete) - - # Finally delete any dedicated NSDs (this will force the data to be - # copied to another NSD in the same Filesystem). Finally delete the - # node from the cluster - - logger.debug("Identified all filesystem to disk mapping: " - "{0}".format(fs_nsd_map)) - - for node_to_del_obj in nodes_to_delete: - node_to_del = node_to_del_obj.get_admin_node_name() - logger.debug("Operating on server: {0}".format(node_to_del)) - - # For each node to be deleted, retrieve the NSDs (disks) on the node - all_node_disks = get_all_nsds_of_node(logger, node_to_del) - logger.debug("Identified disks for server ({0}): " - "{1}".format(node_to_del, all_node_disks)) - - # The Node does not have any disks on it (compute node). Delete the - # node without any more processing - if len(all_node_disks) == 0: - logger.info("Unmounting filesystem(s) on {0}".format(node_to_del)) - SpectrumScaleFS.unmount_filesystems(node_to_del, wait=True) - - logger.info("Shutting down node {0}".format(node_to_del)) - SpectrumScaleNode.shutdown_node(node_to_del, wait=True) - - logger.info("Deleting compute node {0}".format(node_to_del)) - SpectrumScaleCluster.delete_node(node_to_del) - - removed_node_list.append(node_to_del) - continue - - # Generate a list of NSD (disks) on the host to be deleted for - # each filesystem - # - # fs_disk_map{} contains the following: - # Filesystem Name -> NSDs on the host to be deleted - fs_disk_map = {} - for fs_name, disks in list(fs_nsd_map.items()): - node_specific_disks = [] - for disk_instance in disks: - if disk_instance.get_nsd_name() in all_node_disks: - node_specific_disks.append(disk_instance.get_nsd_name()) - fs_disk_map[fs_name] = node_specific_disks - - logger.debug("Identified filesystem to disk map for server " - "({0}): {1}".format(node_to_del, fs_disk_map)) - - for fs in fs_disk_map: - disk_cap = gpfs_df_disk(logger, fs) - logger.debug("Identified disk capacity for filesystem " - "({0}): {1}".format(fs, disk_cap)) - - # Algorithm used for checking at-least 20% free space during - # mmdeldisk in progress; - # - Identify the size of data stored in disks going to be - # deleted. - # - Identify the free size of the filesystem - # (excluding the disk going to be deleted) - # - Allow for disk deletion, if total_free size is 20% greater - # even after moving used data stored in disk going to be deleted. - size_to_be_del = 0 - for disk in fs_disk_map[fs]: - size_to_be_del += disk_cap[disk]['used_size'] - logger.debug("Identified data size going to be deleted from " - "filesystem ({0}): {1}".format(fs, size_to_be_del)) - - other_disks = [] - for disk_name in disk_cap: - if disk_name not in fs_disk_map[fs]: - other_disks.append(disk_name) - logger.debug("Identified other disks of the filesystem " - "({0}): {1}".format(fs, other_disks)) - - if not other_disks: - msg = str("No free disks available to restripe data " - "for the filesystem {0}".format(fs)) - logger.error(msg) - raise SpectrumScaleException(msg=msg, mmcmd="", cmdargs=[], - rc=-1, stdout="", stderr="") - - size_avail_after_migration, total_free = 0, 0 - for disk in other_disks: - # Accumulate free size on all disks. - total_free += disk_cap[disk]['free_size'] - logger.debug("Identified free size in other disks of the " - "filesystem ({0}): {1}".format(fs, total_free)) - - size_avail_after_migration = total_free - size_to_be_del - logger.debug("Expected size after restriping of the filesystem " - "({0}): {1}".format(fs, size_avail_after_migration)) - - percent = int(size_avail_after_migration*100/total_free) - logger.debug("Expected percentage of size left after restriping " - "of the filesystem ({0}): {1}".format(fs, percent)) - - if percent < 20: - msg = ("Not enough space left for restriping data for " - "filesystem {0}".format(fs)) - logger.error(msg) - raise SpectrumScaleException(msg=msg, mmcmd="", cmdargs=[], - rc=-1, stdout="", stderr="") - - if fs_disk_map[fs]: - # mmdeldisk will not be hit if there are no disks to delete. - logger.info("Deleting disk(s) {0} from node " - "{1}".format(' '.join(map(str, fs_disk_map[fs])), - node_to_del)) - SpectrumScaleDisk.delete_disk(node_to_del, fs, fs_disk_map[fs]) - - if all_node_disks: - # mmdelnsd will not be hot if there are no disks to delete. - logger.info("Deleting all NSD(s) {0} attached to node " - "{1}".format(' '.join(map(str, all_node_disks)), - node_to_del)) - SpectrumScaleNSD.delete_nsd(all_node_disks) - - logger.info("Unmounting filesystem(s) on {0}".format(node_to_del)) - SpectrumScaleFS.unmount_filesystems(node_to_del, wait=True) - - logger.info("Shutting down node {0}".format(node_to_del)) - SpectrumScaleNode.shutdown_node(node_to_del, wait=True) - - logger.info("Deleting storage node {0}".format(node_to_del)) - SpectrumScaleCluster.delete_node(node_to_del) - - removed_node_list.append(node_to_del) - - msg = str("Successfully removed node(s) {0} from the " - "cluster".format(' '.join(map(str, removed_node_list)))) - - logger.info(msg) - logger.debug("Function Exit: remove_nodes(). " - "Return Params: rc={0} msg={1}".format(rc, msg)) - - return rc, msg, result_json - - -############################################################################### -## ## -## Functions to retrieve Node information ## -## ## -############################################################################### - -def get_node_info_as_json(logger, node_names=[]): - logger.debug("Function Entry: get_node_info_as_json(). " - "Args: node_names={0}".format(node_names)) - - rc = 0 - msg = result_json = "" - node_info_dict = {} - node_info_list = [] - - cluster = SpectrumScaleCluster() - node_instance_list = cluster.get_nodes() - - for node_instance in node_instance_list: - if len(node_names) == 0: - node_info_list.append(node_instance.get_node_dict()) - else: - if (node_instance.get_ip_address() in node_names or - node_instance.get_admin_node_name() in node_names or - node_instance.get_daemon_node_name() in node_names): - node_info_list.append(node_instance.get_node_dict()) - - node_info_dict["clusterNodes"] = node_info_list - result_json = json.dumps(node_info_dict) - msg = "List cluster successfully executed" - - logger.debug("Function Exit: get_node_info_as_json(). " - "Return Params: rc={0} msg={1} " - "result_json={2}".format(rc, msg, result_json)) - - return rc, msg, result_json - - -def get_node_status_as_json(logger, node_names=[]): - logger.debug("Function Entry: get_node_status_as_json(). " - "Args: node_names={0}".format(node_names)) - - rc = 0 - msg = result_json = "" - node_status = {} - - node_state = SpectrumScaleNode.get_state(node_names) - result_json = json.dumps(node_state) - msg = "Cluster status successfully executed" - - logger.debug("Function Exit: get_node_status_as_json(). " - "Return Params: rc={0} msg={1} " - "result_json={2}".format(rc, msg, result_json)) - - return rc, msg, result_json - - -############################################################################### -## ## -## Functions to Stop/Start Node(s) in the Cluster ## -## ## -############################################################################### - -def start_nodes(logger, node_names): - logger.debug("Function Entry: start_nodes(). " - "Args: node_names={0}".format(node_names)) - - rc = RC_SUCCESS - msg = stdout = result_json = "" - - for node in node_names: - logger.info("Attempting to start node {0}".format(node)) - rc, stdout = SpectrumScaleNode.start_node(node, wait=True) - - msg = str("Successfully started node(s) " - "{0}".format(' '.join(map(str, node_names)))) - - logger.info(msg) - - logger.debug("Function Exit: start_nodes(). " - "Return Params: rc={0} msg={1} " - "result_json={2}".format(rc, msg, result_json)) - - return rc, msg, result_json - - -def stop_nodes(logger, node_names): - logger.debug("Function Entry: stop_nodes(). " - "Args: node_names={0}".format(node_names)) - - rc = RC_SUCCESS - msg = stdout = result_json = "" - - for node in node_names: - logger.info("Attempting to stop node {0}".format(node)) - rc, stdout = SpectrumScaleNode.shutdown_node(node, wait=True) - - msg = str("Successfully stopped node(s) " - "{0}".format(' '.join(map(str, node_names)))) - - logger.info(msg) - - logger.debug("Function Exit: stop_nodes(). " - "Return Params: rc={0} msg={1} " - "result_json={2}".format(rc, msg, result_json)) - - return rc, msg, result_json - - -############################################################################### -## ## -## Functions to add Node(s) to the Cluster ## -## ## -############################################################################### - -def add_nodes(logger, node_names, stanza, license): - logger.debug("Function Entry: add_nodes(). " - "Args: node_names={0}".format(node_names)) - - rc = RC_SUCCESS - msg = stdout = result_json = "" - - logger.info("Attempting to add node(s) {0} to the " - "cluster".format(' '.join(map(str, node_names)))) - - rc, stdout, stderr = SpectrumScaleCluster.add_node(node_names, stanza) - - logger.info("Attempting to apply licenses to newly added " - "node(s)".format(' '.join(map(str, node_names)))) - - rc, stdout = SpectrumScaleCluster.apply_license(node_names, license) - - for node in node_names: - logger.info("Attempting to start node {0}".format(node)) - rc, stdout = SpectrumScaleNode.start_node(node, wait=True) - - msg = str("Successfully added node(s) {0} to the " - "cluster".format(' '.join(map(str, node_names)))) - - logger.info(msg) - - logger.debug("Function Exit: add_nodes(). " - "Return Params: rc={0} msg={1} " - "result_json={2}".format(rc, msg, result_json)) - - return rc, msg, result_json - - -############################################################################### -## ## -## Main Function ## -## ## -############################################################################### - -def main(): - logger = SpectrumScaleLogger.get_logger() - - logger.debug("----------------------------------") - logger.debug("Function Entry: ibm_spectrumscale_node.main()") - logger.debug("----------------------------------") - - # Setup the module argument specifications - scale_arg_spec = dict( - op = dict( - type='str', - choices=['get', 'status', 'start', 'stop'], - required=False - ), - state = dict( - type='str', - choices=['present', 'absent'], - required=False - ), - nodefile = dict( - type='str', - required=False - ), - name = dict( - type='str', - required=False - ), - license = dict( - type='str', - choices=['server', 'client', 'fpo'], - required=False - ), - ) - - - scale_req_args = [ - [ "state", "present", [ "nodefile", "name", "license" ] ], - [ "state", "absent", [ "name" ] ] - ] - - - scale_req_one_of_args = [ - [ "op", "state" ] - ] - - scale_mutual_ex_args = [ - [ "get", "status", "start", "stop" ] - ] - - # Instantiate the Ansible module with the given argument specifications - module = AnsibleModule( - argument_spec=scale_arg_spec, - required_one_of=scale_req_one_of_args, - required_if=scale_req_args, - mutually_exclusive=scale_mutual_ex_args - ) - - rc = -1 - msg = result_json = "" - state_changed = False - - try: - if module.params['op']: - node_names = [] - if module.params['name']: - node_names = module.params['name'].split(',') - - if "get" in module.params['op']: - # Retrieve the IBM Spectrum Scale node information - rc, msg, result_json = get_node_info_as_json(logger, - node_names) - elif "status" in module.params['op']: - # Retrieve the IBM Spectrum Scale Node state - rc, msg, result_json = get_node_status_as_json(logger, - node_names) - elif "start" in module.params['op']: - # Start the IBM Spectrum Scale Server(s) - rc, msg, result_json = start_nodes(logger, node_names) - elif "stop" in module.params['op']: - # Stop the IBM Spectrum Scale Server(s) - rc, msg, result_json = stop_nodes(logger, node_names) - - elif module.params['state']: - listofserver = module.params['name'] - if "present" in module.params['state']: - # Create a new IBM Spectrum Scale cluster - rc, msg, result_json = add_nodes(logger, - listofserver.split(','), - module.params['nodefile'], - module.params['license']) - else: - # Delete the existing IBM Spectrum Scale cluster - rc, msg, result_json = remove_nodes(logger, - listofserver.split(',')) - - if rc == RC_SUCCESS: - state_changed = True - - except SpectrumScaleException as sse: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(sse), st)) - logger.debug(e_msg) - failure_msg = "FAILED: " + sse.get_message() - module.fail_json(msg=failure_msg, changed=False, rc=-1, - result=result_json, stderr=str(st)) - except Exception as e: - st = traceback.format_exc() - e_msg = ("Exception: {0} StackTrace: {1}".format(str(e), st)) - logger.debug(e_msg) - failure_msg = "FAILED: " + e.get_message() - module.fail_json(msg=failure_msg, changed=False, rc=-1, - result=result_json, stderr=str(st)) - - logger.debug("---------------------------------") - logger.debug("Function Exit: ibm_spectrumscale_node.main()") - logger.debug("---------------------------------") - - SpectrumScaleLogger.shutdown() - - # Module is done. Return back the result - if rc == RC_SUCCESS: - module.exit_json(msg=msg, changed=state_changed, rc=rc, result=result_json) - else: - failure_msg = "FAILED: " + msg - module.fail_json(msg=failure_msg, changed=state_changed, rc=rc, - result=result_json) - - -if __name__ == '__main__': - main() diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_cluster_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_cluster_utils.py deleted file mode 100644 index a1e9f28a..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_cluster_utils.py +++ /dev/null @@ -1,643 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -import json -import time - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - parse_aggregate_cmd_output, parse_unique_records, GPFS_CMD_PATH, \ - RC_SUCCESS, SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, parse_aggregate_cmd_output, \ - parse_unique_records, GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException - - -class SpectrumScaleNode: - - def __init__(self, node_dict): - self.node = node_dict - self.node_number = int(self.node["nodeNumber"]) - self.daemon_name = self.node["daemonNodeName"] - self.admin_name = self.node["adminNodeName"] - self.ip = self.node["ipAddress"] - self.admin_login = self.node["adminLoginName"] - self.designation = self.node["designation"] - self.other_roles = self.node["otherNodeRoles"] - self.role_alias = self.node["otherNodeRolesAlias"] - - def get_node_number(self): - return self.node_number - - def get_daemon_node_name(self): - return self.daemon_name - - def get_admin_node_name(self): - return self.admin_name - - def get_ip_address(self): - return self.ip - - def get_admin_login_name(self): - return self.admin_login - - def get_designation(self): - # The "designation" field can have the following values: - # "quorumManager" - # "quorum" - # "manager" - # "" - return self.designation - - def get_other_node_roles(self): - # The "otherNodeRoles" field can have a comma seperated list of - # one of the following alphabets - # "M" - cloudNodeMarker - # "G" - gatewayNode - # "C" - cnfsNode - # "X" - cesNode - # "C" - ctdbNode - # "I" - ioNode - # "s" - snmpAgent - # "t" - tealAgent - # "Z" - perfmonNode - # "E" - cnfsEnabled - # "D" - cnfsDisabled - # "new" - NEW_NODE - # "" - OLD_NODE - # "Q" - quorumNode - # "N" - nonQuorumNode - return self.other_roles - - def get_other_node_roles_alias(self): - # The "otherNodeRolesAlias" field can have a comma seperated list of - # one of the following - # "gateway" - # "ctdb" - # "ionode" - # "snmp_collector" - # "teal_collector" - # "perfmon" - # "ces" - # "cnfs" - return self.role_alias - - def is_quorum_node(self): - if "quorum" in self.designation: - return True - return False - - def is_manager_node(self): - if "manager" in (self.designation).lower(): - return True - return False - - def is_tct_node(self): - if "M" in self.other_roles: - return True - return False - - def is_gateway_node(self): - if ("G" in self.other_roles or - "gateway" in self.role_alias): - return True - return False - - def is_ctdb_node(self): - if "ctdb" in self.role_alias: - return True - return False - - def is_io_node(self): - if ("I" in self.other_roles or - "ionode" in self.role_alias): - return True - return False - - def is_snmp_node(self): - if ("s" in self.other_roles or - "snmp_collector" in self.role_alias): - return True - return False - - def is_teal_node(self): - if ("t" in self.other_roles or - "teal_collector" in self.role_alias): - return True - return False - - def is_perfmon_node(self): - if ("Z" in self.other_roles or - "perfmon" in self.role_alias): - return True - return False - - def is_ces_node(self): - if ("X" in self.other_roles or - "ces" in self.role_alias): - return True - return False - - def is_cnfs_node(self): - if ("E" in self.other_roles or - "D" in self.other_roles or - "cnfs" in self.role_alias): - return True - return False - - def to_json(self): - return json.dumps(self.node) - - def get_node_dict(self): - return self.node - - def print_node(self): - print(("Node Number : {0}".format(self.get_node_number()))) - print(("Daemon Node Name : {0}".format(self.get_daemon_node_name()))) - print(("IP Address : {0}".format(self.get_ip_address()))) - print(("Admin Node Name : {0}".format(self.get_admin_node_name()))) - print(("Designation : {0}".format(self.get_designation()))) - print(("Other Node Roles : {0}".format(self.get_other_node_roles()))) - print(("Admin Login Name : {0}".format(self.get_admin_login_name()))) - print(("Other Node Roles Alias : {0}".format(self.get_other_node_roles_alias()))) - print(("Is Quorum Node : {0}".format(self.is_quorum_node()))) - print(("Is Manager Node : {0}".format(self.is_manager_node()))) - print(("Is TCT Node : {0}".format(self.is_tct_node()))) - print(("Is Gateway Node : {0}".format(self.is_gateway_node()))) - print(("Is CTDB Node : {0}".format(self.is_ctdb_node()))) - print(("Is IO Node : {0}".format(self.is_io_node()))) - print(("Is SNMP Node : {0}".format(self.is_snmp_node()))) - print(("Is Teal Node : {0}".format(self.is_teal_node()))) - print(("Is Perfmon Node : {0}".format(self.is_perfmon_node()))) - print(("Is CES Node : {0}".format(self.is_ces_node()))) - print(("Is CNFS Node : {0}".format(self.is_cnfs_node()))) - - - def __str__(self): - return str("Node Number : {0}\n" - "Daemon Node Name : {1}\n" - "IP Address : {2}\n" - "Admin Node Name : {3}\n" - "Designation : {4}\n" - "Other Node Roles : {5}\n" - "Admin Login Name : {6}\n" - "Other Node Roles Alias : {7}\n" - "Is Quorum Node : {8}\n" - "Is Manager Node : {9}\n" - "Is TCT Node : {10}\n" - "Is Gateway Node : {11}\n" - "Is CTDB Node : {12}\n" - "Is IO Node : {13}\n" - "Is SNMP Node : {14}\n" - "Is Teal Node : {15}\n" - "Is Perfmon Node : {16}\n" - "Is CES Node : {17}\n" - "Is CNFS Node : {18}".format( - self.get_node_number(), - self.get_daemon_node_name(), - self.get_ip_address(), - self.get_admin_node_name(), - self.get_designation(), - self.get_other_node_roles(), - self.get_admin_login_name(), - self.get_other_node_roles_alias(), - self.is_quorum_node(), - self.is_manager_node(), - self.is_tct_node(), - self.is_gateway_node(), - self.is_ctdb_node(), - self.is_io_node(), - self.is_snmp_node(), - self.is_teal_node(), - self.is_perfmon_node(), - self.is_ces_node(), - self.is_cnfs_node())) - - - @staticmethod - def get_state(node_names=[], admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - cmd = [] - mmcmd_idx = 1 - - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmgetstate")]) - - if len(node_names) == 0: - cmd.append("-a") - else: - # If a set of node names have ben provided, use that instead - node_name_str = ','.join(node_names) - cmd.append("-N") - cmd.append(node_name_str) - - cmd.append("-Y") - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Retrieving the node state failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - node_state_dict = parse_unique_records(stdout) - node_state_list = node_state_dict["mmgetstate"] - - node_state = {} - for node in node_state_list: - node_state[node["nodeName"]] = node["state"] - - return node_state - - - @staticmethod - def shutdown_node(node_name, wait=True, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - cmd = [] - mmcmd_idx = 1 - - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - if isinstance(node_name, str): - node_name_str = node_name - node_name_list = [node_name] - else: - node_name_str = ' '.join(node_name) - node_name_list = node_name - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmshutdown"), "-N", node_name_str]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Shutting down node failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - if wait: - # Wait for a maximum of 36 * 5 = 180 seconds (3 minutes) - MAX_RETRY = 36 - retry = 0 - done = False - while(not done and retry < MAX_RETRY): - time.sleep(5) - node_state = SpectrumScaleNode.get_state(node_name_list, admin_ip) - done = all("down" in state for state in list(node_state.values())) - retry = retry + 1 - - if not done: - raise SpectrumScaleException("Shutting down node(s) timed out", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], -1, "", - "Node state is not \"down\" after retries") - return rc, stdout - - - @staticmethod - def start_node(node_name, wait=True, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - cmd = [] - mmcmd_idx = 1 - - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - if isinstance(node_name, str): - node_name_str = node_name - node_name_list = [node_name] - else: - node_name_str = ' '.join(node_name) - node_name_list = node_name - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmstartup"), "-N", node_name_str]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Starting node failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - if wait: - # Wait for a maximum of 36 * 5 = 180 seconds (3 minutes) - MAX_RETRY = 36 - retry = 0 - done = False - while(not done and retry < MAX_RETRY): - time.sleep(5) - node_state = SpectrumScaleNode.get_state(node_name_list, admin_ip) - done = all("active" in state for state in list(node_state.values())) - retry = retry + 1 - - if not done: - raise SpectrumScaleException("Starting node(s) timed out", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], -1, "", - "Node state is not \"active\" after retries") - return rc, stdout - - -class SpectrumScaleCluster: - - def __retrieve_cluster_info(self, admin_ip): - stdout = stderr = "" - rc = RC_SUCCESS - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmlscluster"), "-Y"]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - if rc != RC_SUCCESS: - raise SpectrumScaleException("Retrieving the cluster information failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - return parse_aggregate_cmd_output(stdout, - ["clusterSummary", - "cnfsSummary", - "cesSummary"]) - - def __init__(self, admin_ip=None): - self.cluster_dict = self.__retrieve_cluster_info(admin_ip) - self.name = self.cluster_dict["clusterSummary"]["clusterName"] - self.c_id = self.cluster_dict["clusterSummary"]["clusterId"] - self.uid_domain = self.cluster_dict["clusterSummary"]["uidDomain"] - self.rsh_path = self.cluster_dict["clusterSummary"]["rshPath"] - self.rsh_sudo_wrapper = self.cluster_dict["clusterSummary"]["rshSudoWrapper"] - self.rcp_path = self.cluster_dict["clusterSummary"]["rcpPath"] - self.rcp_sudo_wrapper = self.cluster_dict["clusterSummary"]["rcpSudoWrapper"] - self.repository_type = self.cluster_dict["clusterSummary"]["repositoryType"] - self.primary_server = self.cluster_dict["clusterSummary"]["primaryServer"] - self.secondary_server = self.cluster_dict["clusterSummary"]["secondaryServer"] - - - def get_name(self): - return self.name - - def get_id(self): - return self.c_id - - def get_uid_domain(self): - return self.uid_domain - - def get_rsh_path(self): - return self.rsh_path - - def get_rsh_sudo_wrapper(self): - return self.rsh_sudo_wrapper - - def get_rcp_path(self): - return self.rcp_path - - def get_rcp_sudo_wrapper(self): - return self.rcp_sudo_wrapper - - def get_repository_type(self): - return self.repository_type - - def get_primary_server(self): - return self.primary_server - - def get_secondary_server(self): - return self.secondary_server - - def __str__(self): - return str("Cluster Name : {0}\n" - "Cluster ID : {1}\n" - "UID Domain : {2}\n" - "rsh Path : {3}\n" - "rsh Sudo Wrapper: {4}\n" - "rcp Path : {5}\n" - "rcp Sudo Wrapper: {6}\n" - "Repository Type : {7}\n" - "Primary Server : {8}\n" - "Secondary Server: {9}".format( - self.get_name(), - self.get_id(), - self.get_uid_domain(), - self.get_rsh_path(), - self.get_rsh_sudo_wrapper(), - self.get_rcp_path(), - self.get_rcp_sudo_wrapper(), - self.get_repository_type(), - self.get_primary_server(), - self.get_secondary_server())) - - def to_json(self): - return json.dumps(self.cluster_dict) - - def get_cluster_dict(self): - return self.cluster_dict - - def get_nodes(self): - node_list = [] - for node in self.cluster_dict["clusterNode"]: - node_instance = SpectrumScaleNode(node) - node_list.append(node_instance) - - return node_list - - @staticmethod - def delete_node(node_name, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - if isinstance(node_name, str): - node_name_str = node_name - else: - node_name_str = ' '.join(node_name) - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmdelnode"), "-N", node_name_str]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Deleting node from cluster failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - return rc, stdout - - - @staticmethod - def add_node(node_name, stanza_path, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - if isinstance(node_name, str): - node_name_str = node_name - else: - node_name_str = ' '.join(node_name) - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmaddnode"), - "-N", stanza_path, "--accept"]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Adding node to cluster failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - return rc, stdout, stderr - - - @staticmethod - def apply_license(node_name, license, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - if isinstance(node_name, str): - node_name_str = node_name - else: - node_name_str = ' '.join(node_name) - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmchlicense"), license, - "--accept", "-N", node_name_str]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Changing license on node failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - return rc, stdout - - - @staticmethod - def create_cluster(name, stanza_path, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmcrcluster"), "-N", stanza_path, - "-C", name]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Creating cluster failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - return rc, stdout - - - @staticmethod - def delete_cluster(name, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmdelnode"), "-a"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Deleting cluster failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - return rc, stdout - - -def main(): - cluster = SpectrumScaleCluster() - print((cluster.to_json())) - print("\n") - - for node in cluster.get_nodes(): - print(node) - print("\n") - - -if __name__ == "__main__": - main() - diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_df_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_df_utils.py deleted file mode 100644 index 6a9dfea4..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_df_utils.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import os -import json - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - parse_aggregate_cmd_output, parse_unique_records, GPFS_CMD_PATH, \ - RC_SUCCESS, SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, parse_aggregate_cmd_output, \ - parse_unique_records, GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException - - -class SpectrumScaleDf: - nsd_df = {} - - def __init__(self, nsd_df_dict): - self.node = nsd_df_dict - - def get_nsd_name(self): - nsd_name = self.node["nsdName"] - return nsd_name - - def get_storage_pool(self): - pool = self.node["storagePool"] - return pool - - def get_disk_size(self): - disk_size = self.node["diskSize"] - if disk_size: - return int(disk_size) - return 0 - - def get_failure_group(self): - fg = self.node["failureGroup"] - return fg - - def stores_meta_data(self): - meta = self.node["metadata"] - return meta - - def stores_data(self): - data = self.node["data"] - return data - - def get_free_blocks(self): - free_blocks = self.node["freeBlocks"] - if free_blocks: - return int(free_blocks) - return 0 - - def get_free_blocks_pct(self): - free_blocks_pct = self.node["freeBlocksPct"] - if free_blocks_pct: - return int(free_blocks_pct) - return 0 - - def get_free_fragments(self): - free_fragments = self.node["freeFragments"] - if free_fragments: - return int(free_fragments) - return 0 - - def get_free_fragments_pct(self): - free_fragments_pct = self.node["freeFragmentsPct"] - if free_fragments_pct: - return int(free_fragments_pct) - return 0 - - def get_disk_available_for_alloc(self): - disk_available_for_alloc = self.node["diskAvailableForAlloc"] - return disk_available_for_alloc - - def to_json(self): - return json.dumps(self.nsd_df_dict) - - def get_nsd_df_dict(self): - return self.nsd_df_dict - - def print_nsd_df(self): - print(("NSD Name : {0}".format(self.get_nsd_name()))) - print(("Storage Pool : {0}".format(self.get_storage_pool()))) - print(("Disk Size : {0}".format(self.get_disk_size()))) - print(("Failure Group : {0}".format(self.get_failure_group()))) - print(("Stores Metadata : {0}".format(self.stores_meta_data()))) - print(("Stores Data : {0}".format(self.stores_data()))) - print(("Free Blocks : {0}".format(self.get_free_blocks()))) - print(("Free Blocks % : {0}".format(self.get_free_blocks_pct()))) - print(("Free Fragments : {0}".format(self.get_free_fragments()))) - print(("Free Fragments % : {0}".format(self.get_free_fragments_pct()))) - print(("Disk Available For Alloc: {0}".format(self.get_disk_available_for_alloc()))) - - - @staticmethod - def get_df_info(filesystem_name, admin_ip=None): - nsd_df_info_list = [] - - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - # TODO - # The original code executed the command "/usr/lpp/mmfs/bin/mmdf -d -Y" - # but this did not work if there were multiple Pools with a separate System Pool. - # Therefore the "-d" flag has been removed. Check to see why the "-d" flag was - # was used in the first place - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmdf"), filesystem_name, "-Y"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Retrieving filesystem disk space usage failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - df_dict = parse_aggregate_cmd_output(stdout, ["poolTotal", "data", - "metadata", "fsTotal", - "inode"]) - - nsd_df_list = df_dict["nsd"] - - for nsd_df in nsd_df_list: - nsd_df_instance = SpectrumScaleDf(nsd_df) - nsd_df_info_list.append(nsd_df_instance) - - return nsd_df_info_list - - -def main(): - # TODO: Dynamically fetch the Filesystem Names - nsd_df_list = get_nsd_df_info("FS1") - for nsd_df in nsd_df_list: - nsd_df.print_nsd_df() - print("\n") - - -if __name__ == "__main__": - main() - diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_disk_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_disk_utils.py deleted file mode 100644 index 1f6837f4..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_disk_utils.py +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -import sys -import json - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - parse_unique_records, GPFS_CMD_PATH, RC_SUCCESS, \ - SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, parse_unique_records, \ - GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException - - -class SpectrumScaleDisk: - disk = {} - filesystem = "" - - def __init__(self, disk_dict, fs_name): - self.disk = disk_dict - self.filesystem = fs_name - - def get_nsd_name(self): - nsd_name = self.disk["nsdName"] - return nsd_name - - def get_driver_type(self): - driver_type = self.disk["driverType"] - return driver_type - - def get_sector_size(self): - sector_size = self.disk["sectorSize"] - return sector_size - - def get_failure_group(self): - failure_group = self.disk["failureGroup"] - return failure_group - - def contains_metadata(self): - metadata = self.disk["metadata"] - if "yes" in metadata: - return True - return False - - def contains_data(self): - data = self.disk["data"] - if "yes" in data: - return True - return False - - def get_status(self): - status = self.disk["status"] - return status - - def get_availability(self): - availability = self.disk["availability"] - return availability - - def get_disk_id(self): - disk_id = self.disk["diskID"] - return disk_id - - def get_storage_pool(self): - pool_name = self.disk["storagePool"] - return pool_name - - def get_remarks(self): - remarks = self.disk["remarks"] - return remarks - - def get_num_quorum_disks(self): - num_qd_str = self.disk["numQuorumDisks"] - num_quorum_disks = int(num_qd_str) - return num_quorum_disks - - def get_read_quorum_value(self): - read_qv_str = self.disk["readQuorumValue"] - read_quorum_value = int(read_qv_str) - return read_quorum_value - - def get_write_quorum_value(self): - write_qv_str = self.disk["writeQuorumValue"] - write_quorum_value = int(write_qv_str) - return write_quorum_value - - def get_disk_size_KB(self): - disk_sz_str = self.disk["diskSizeKB"] - disk_size_KB = int(disk_sz_str) - return disk_size_KB - - def get_disk_UID(self): - disk_uid = self.disk["diskUID"] - return disk_uid - - def get_thin_disk_type(self): - thin_disk_type = self.disk["thinDiskType"] - return thin_disk_type - - def to_json(self): - return json.dumps(self.disk) - - def print_disk(self): - print(("NSD Name : {0}".format(self.get_nsd_name()))) - print(("Driver Type : {0}".format(self.get_driver_type()))) - print(("Sector Size : {0}".format(self.get_sector_size()))) - print(("Failure Group : {0}".format(self.get_failure_group()))) - print(("Contains Metadata : {0}".format(self.contains_metadata()))) - print(("Contains Data : {0}".format(self.contains_data()))) - print(("Status : {0}".format(self.get_status()))) - print(("Availability : {0}".format(self.get_availability()))) - print(("Disk ID : {0}".format(self.get_disk_id()))) - print(("Storage Pool : {0}".format(self.get_storage_pool()))) - print(("Remarks : {0}".format(self.get_remarks()))) - print(("Num Quorum Disks : {0}".format(self.get_num_quorum_disks()))) - print(("Read Quorum Value : {0}".format(self.get_read_quorum_value()))) - print(("Write Quorum Value : {0}".format(self.get_write_quorum_value()))) - print(("NSD Disk Size (KB) : {0}".format(self.get_disk_size_KB()))) - print(("Disk UID : {0}".format(self.get_disk_UID()))) - print(("Thin Disk Type : {0}".format(self.get_thin_disk_type()))) - - @staticmethod - def get_all_disk_info(fs_name, admin_ip=None): - disk_info_list = [] - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmlsdisk"), fs_name, "-Y"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc == RC_SUCCESS: - # TODO: Check the return codes and examine other possibility and verify below - if "No disks were found" in stderr: - return nsd_info_list - else: - raise SpectrumScaleException("Retrieving disk information failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - disk_dict = parse_unique_records(stdout) - disk_list = disk_dict["mmlsdisk"] - - for disk in disk_list: - disk_instance = SpectrumScaleDisk(disk, fs_name) - disk_info_list.append(disk_instance) - - return disk_info_list - - - @staticmethod - def delete_disk(node_name, filesystem_name, disk_names, admin_ip=None): - """ - This function performs "mmdeldisk". - Args: - node_name (str): Node for which disk needs to be deleted. - filesystems_name (str): Filesystem name associated with the disks. - disk_names (list): Disk name to be deleted. - Ex: ['gpfs1nsd', 'gpfs2nsd', 'gpfs3nsd'] - """ - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - disk_name_str = ";".join(disk_names) - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmdeldisk"), filesystem_name, - disk_name_str, '-N', node_name]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Deleting disk(s) failed. ", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - -def main(): - if len(sys.argv) == 2: - fs_name = sys.argv[1] - try: - disk_list = get_all_disk_info(fs_name) - for disk in disk_list: - disk.print_disk() - print("\n") - except Exception as e: - print(e) - else: - print("The file system name should be specified") - rc = 1 - - -if __name__ == "__main__": - main() diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_filesystem_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_filesystem_utils.py deleted file mode 100644 index dd685979..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_filesystem_utils.py +++ /dev/null @@ -1,420 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -import json - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - parse_simple_cmd_output, GPFS_CMD_PATH, RC_SUCCESS, \ - SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, parse_simple_cmd_output, \ - GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException - - -class SpectrumScaleFS: - - def __init__(self, device_name, filesystem_properties): - self.device_name = device_name - self.properties_list = filesystem_properties - - def __get_property_as_str(self, prop_name): - str_prop_value = "" - for fs_property in self.properties_list: - if prop_name in fs_property["fieldName"]: - str_prop_value = fs_property["data"] - return str_prop_value - - def __get_property_as_int(self, prop_name): - int_prop_value = 0 - for fs_property in self.properties_list: - if prop_name in fs_property["fieldName"]: - int_prop_value = int(fs_property["data"]) - return int_prop_value - - def __get_property_as_bool(self, prop_name): - bool_prop_value = False - for fs_property in self.properties_list: - if prop_name in fs_property["fieldName"]: - if ("Yes" in fs_property["data"] or - "yes" in fs_property["data"]): - bool_prop_value = True - return bool_prop_value - - def get_device_name(self): - return self.device_name - - def get_syspool_min_fragment_size(self): - syspool_min_fragment_size = 0 - for fs_property in self.properties_list: - if ("minFragmentSize" in fs_property["fieldName"] and - "system pool" in fs_property["remarks"]): - syspool_min_fragment_size = int(fs_property["data"]) - return syspool_min_fragment_size - - def get_other_pool_min_fragment_size(self): - other_pool_min_fragment_size = 0 - for fs_property in self.properties_list: - if ("minFragmentSize" in fs_property["fieldName"] and - "other pools" in fs_property["remarks"]): - other_pool_min_fragment_size = int(fs_property["data"]) - return other_pool_min_fragment_size - - def get_inode_size(self): - return self.__get_property_as_int("inodeSize") - - def get_indirect_block_size(self): - return self.__get_property_as_int("indirectBlockSize") - - def get_default_metadata_replicas(self): - return self.__get_property_as_int("defaultMetadataReplicas") - - def get_max_metadata_replicas(self): - return self.__get_property_as_int("maxMetadataReplicas") - - def get_default_data_replicas(self): - return self.__get_property_as_int("defaultDataReplicas") - - def get_max_data_replicas(self): - return self.__get_property_as_int("maxDataReplicas") - - def get_block_allocation_type(self): - return self.__get_property_as_str("blockAllocationType") - - def get_file_locking_semantics(self): - return self.__get_property_as_str("fileLockingSemantics") - - def get_acl_semantics(self): - return self.__get_property_as_str("ACLSemantics") - - def get_num_nodes(self): - return self.__get_property_as_int("numNodes") - - def get_syspool_block_size(self): - syspool_block_size = 0 - for fs_property in self.properties_list: - if ("blockSize" in fs_property["fieldName"] and - "system pool" in fs_property["remarks"]): - syspool_block_size = int(fs_property["data"]) - return syspool_block_size - - def get_other_pool_block_size(self): - other_pool_block_size = 0 - for fs_property in self.properties_list: - if ("blockSize" in fs_property["fieldName"] and - "other pools" in fs_property["remarks"]): - other_pool_block_size = int(fs_property["data"]) - return other_pool_block_size - - def get_quotas_accounting_enabled(self): - return self.__get_property_as_str("quotasAccountingEnabled") - - def get_quotas_enforced(self): - return self.__get_property_as_str("quotasEnforced") - - def get_default_quotas_enabled(self): - return self.__get_property_as_str("defaultQuotasEnabled") - - def get_per_fileset_quotas(self): - return self.__get_property_as_bool("perfilesetQuotas") - - def is_fileset_df_enabled(self): - return self.__get_property_as_bool("filesetdfEnabled") - - def get_filesystem_version(self): - return self.__get_property_as_str("filesystemVersion") - - def get_filesystem_version_local(self): - return self.__get_property_as_str("filesystemVersionLocal") - - def get_filesystem_version_manager(self): - return self.__get_property_as_str("filesystemVersionManager") - - def get_filesystem_version_original(self): - return self.__get_property_as_str("filesystemVersionOriginal") - - def get_filesystem_highest_supported(self): - return self.__get_property_as_str("filesystemHighestSupported") - - def get_create_time(self): - return self.__get_property_as_str("create-time") - - def is_dmapi_enabled(self): - return self.__get_property_as_bool("DMAPIEnabled") - - def get_logfile_size(self): - return self.__get_property_as_int("logfileSize") - - def is_exact_m_time(self): - return self.__get_property_as_bool("exactMtime") - - def get_suppress_atime(self): - return self.__get_property_as_str("suppressAtime") - - def get_strict_replication(self): - return self.__get_property_as_str("strictReplication") - - def is_fast_ea_enabled(self): - return self.__get_property_as_bool("fastEAenabled") - - def is_encrypted(self): - return self.__get_property_as_bool("encryption") - - def get_max_number_of_inodes(self): - return self.__get_property_as_int("maxNumberOfInodes") - - def get_max_snapshot_id(self): - return self.__get_property_as_int("maxSnapshotId") - - def get_uid(self): - return self.__get_property_as_str("UID") - - def get_log_replicas(self): - return self.__get_property_as_int("logReplicas") - - def is_4k_aligned(self): - return self.__get_property_as_bool("is4KAligned") - - def is_rapid_repair_enabled(self): - return self.__get_property_as_bool("rapidRepairEnabled") - - def get_write_cache_threshold(self): - return self.__get_property_as_int("write-cache-threshold") - - def get_subblocks_per_full_block(self): - return self.__get_property_as_int("subblocksPerFullBlock") - - def get_storage_pools(self): - storage_pool_list = [] - storage_pool_str = self.__get_property_as_str("storagePools") - if storage_pool_str: - storage_pool_list = storage_pool_str.split(";") - return storage_pool_list - - def is_file_audit_log_enabled(self): - return self.__get_property_as_bool("file-audit-log") - - def is_maintenance_mode(self): - return self.__get_property_as_bool("maintenance-mode") - - def get_disks(self): - disk_list = [] - disk_str = self.__get_property_as_str("disks") - if disk_str: - disk_list = disk_str.split(";") - return disk_list - - def is_automatic_mount_option_enabled(self): - return self.__get_property_as_bool("automaticMountOption") - - def get_additional_mount_options(self): - return self.__get_property_as_str("additionalMountOptions") - - def get_default_mount_point(self): - return self.__get_property_as_str("defaultMountPoint") - - def get_mount_priority(self): - return self.__get_property_as_int("mountPriority") - - def get_properties_list(self): - return self.properties_list - - def to_json(self): - # TODO: Include Filesystem Device Name - return json.dumps(self.properties_list) - - def print_filesystem(self): - print(("Device Name : {0}".format(self.get_device_name()))) - print(("Syspool Min Fragment Size : {0}".format(self.get_syspool_min_fragment_size()))) - print(("Other Pool Min Fragment Size : {0}".format(self.get_other_pool_min_fragment_size()))) - print(("Inode Size : {0}".format(self.get_inode_size()))) - print(("Indirect Block Size : {0}".format(self.get_indirect_block_size()))) - print(("Default Metadata Replicas : {0}".format(self.get_default_metadata_replicas()))) - print(("Max Metadata Replicas : {0}".format(self.get_max_metadata_replicas()))) - print(("Default Data Replicas : {0}".format(self.get_default_data_replicas()))) - print(("Max Data Replicas : {0}".format(self.get_max_data_replicas()))) - print(("Block Allocation Type : {0}".format(self.get_block_allocation_type()))) - print(("File Locking Semantics : {0}".format(self.get_file_locking_semantics()))) - print(("ACL Semantics : {0}".format(self.get_acl_semantics()))) - print(("Num Nodes : {0}".format(self.get_num_nodes()))) - print(("Syspool Block Size : {0}".format(self.get_syspool_block_size()))) - print(("Other Pool Block Size : {0}".format(self.get_other_pool_block_size()))) - print(("Quotas Accounting Enabled : {0}".format(self.get_quotas_accounting_enabled()))) - print(("Quotas Enforced : {0}".format(self.get_quotas_enforced()))) - print(("Default Quotas Enabled : {0}".format(self.get_default_quotas_enabled()))) - print(("Per Fileset Quotas : {0}".format(self.get_per_fileset_quotas()))) - print(("Fileset df Enabled : {0}".format(self.is_fileset_df_enabled()))) - print(("Filesystem Version : {0}".format(self.get_filesystem_version()))) - print(("Filesystem Version Local : {0}".format(self.get_filesystem_version_local()))) - print(("Filesystem Version Manager : {0}".format(self.get_filesystem_version_manager()))) - print(("Filesystem Version Original : {0}".format(self.get_filesystem_version_original()))) - print(("Filesystem Highest Supported : {0}".format(self.get_filesystem_highest_supported()))) - print(("Create Time : {0}".format(self.get_create_time()))) - print(("DMAPI Enabled : {0}".format(self.is_dmapi_enabled()))) - print(("Logfile Size : {0}".format(self.get_logfile_size()))) - print(("Is Exact m Time : {0}".format(self.is_exact_m_time()))) - print(("Suppress atime : {0}".format(self.get_suppress_atime()))) - print(("Strict Replication : {0}".format(self.get_strict_replication()))) - print(("Is Fast EA Enabled : {0}".format(self.is_fast_ea_enabled()))) - print(("Is Encrypted : {0}".format(self.is_encrypted()))) - print(("Max Number Of Inodes : {0}".format(self.get_max_number_of_inodes()))) - print(("Max Snapshot Id : {0}".format(self.get_max_snapshot_id()))) - print(("UID : {0}".format(self.get_uid()))) - print(("Log Replicas : {0}".format(self.get_log_replicas()))) - print(("Is 4K Aligned : {0}".format(self.is_4k_aligned()))) - print(("Is Rapid Repair Enabled : {0}".format(self.is_rapid_repair_enabled()))) - print(("Write Cache Threshold : {0}".format(self.get_write_cache_threshold()))) - print(("Subblocks Per Full Block : {0}".format(self.get_subblocks_per_full_block()))) - print(("Storage Pools : {0}".format(self.get_storage_pools()))) - print(("Is File Audit Log Enabled : {0}".format(self.is_file_audit_log_enabled()))) - print(("Is Maintenance Mode : {0}".format(self.is_maintenance_mode()))) - print(("Disks : {0}".format(self.get_disks()))) - print(("Is Automatic Mount Option Enabled : {0}".format(self.is_automatic_mount_option_enabled()))) - print(("Additional Mount Options : {0}".format(self.get_additional_mount_options()))) - print(("Default Mount Point : {0}".format(self.get_default_mount_point()))) - print(("Mount Priority : {0}".format(self.get_mount_priority()))) - - - @staticmethod - def get_filesystems(admin_ip=None): - filesystem_info_list = [] - - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmlsfs"), "all", "-Y"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - if 'mmlsfs: No file systems were found.' in stdout or \ - 'mmlsfs: No file systems were found.' in stderr: - return filesystem_info_list - - raise SpectrumScaleException("Retrieving filesystem information failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - filesystem_dict = parse_simple_cmd_output(stdout, "deviceName", - "properties", "filesystems") - filesystem_list = filesystem_dict["filesystems"] - - for filesystem in filesystem_list: - device_name = filesystem["deviceName"] - fs_properties = filesystem["properties"] - filesystem_instance = SpectrumScaleFS(device_name, fs_properties) - filesystem_info_list.append(filesystem_instance) - - return filesystem_info_list - - - @staticmethod - def unmount_filesystems(node_name, wait=True, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmumount"), "all", "-N", node_name]) - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - if 'mmumount: No file systems were found' in stdout or \ - 'mmumount: No file systems were found' in stderr: - # We can claim success on umount if there are no filesystems - return RC_SUCCESS - - raise SpectrumScaleException("Unmounting filesystems on node failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, stdout, stderr) - return rc, stdout - - - @staticmethod - def create_filesystem(name, stanza_path, block_size, - default_metadata_replicas, - default_data_replicas, num_nodes, - automatic_mount_option, - default_mount_point, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmcrfs"), name, - "-F", stanza_path, - "-B", block_size, - "-m", default_metadata_replicas, - "-r", default_data_replicas, - "-n", num_nodes, - "-A", automatic_mount_option, - "-T", default_mount_point]) - # TODO: Make this idempotent - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Create filesystems on node failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - return rc, stdout - - - @staticmethod - def delete_filesystem(name): - # TODO: Implement - rc = RC_SUCCESS - msg = "" - return rc, msg - - -def main(): - filesystem_list = get_filesystems() - for filesystem in filesystem_list: - filesystem.print_filesystem() - print("\n") - - -if __name__ == "__main__": - main() diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_nsd_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_nsd_utils.py deleted file mode 100644 index 44ba0e59..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_nsd_utils.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import os -import json - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - parse_unique_records, GPFS_CMD_PATH, RC_SUCCESS, \ - SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, parse_unique_records, \ - GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException - - -class SpectrumScaleNSD: - def __init__(self, nsd_dict): - self.nsd = nsd_dict - - def get_name(self): - name = self.nsd["diskName"] - return name - - def get_volume_id(self): - volumeId = self.nsd["volumeId"] - return volumeId - - def get_server_list(self): - server_list = [] - server_list_str = self.nsd["serverList"] - if server_list_str: - server_list = server_list_str.split(",") - return server_list - - def get_device_type(self): - device_type = self.nsd["deviceType"] - return device_type - - def get_disk_name(self): - disk_name = self.nsd["localDiskName"] - return disk_name - - def get_remarks(self): - remarks = self.nsd["remarks"] - return remarks - - def to_json(self): - return json.dumps(self.nsd) - - def print_nsd(self): - print(("NSD Name : {0}".format(self.get_name()))) - print(("Volume ID : {0}".format(self.get_volume_id()))) - print(("Server List: {0}".format(self.get_server_list()))) - print(("Device Type: {0}".format(self.get_device_type()))) - print(("Disk Name : {0}".format(self.get_disk_name()))) - print(("Remarks : {0}".format(self.get_remarks()))) - - - @staticmethod - def get_all_nsd_info(admin_ip=None): - nsd_info_list = [] - - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmlsnsd"),"-a", "-X", "-Y"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc == RC_SUCCESS: - if "No disks were found" in stderr: - return nsd_info_list - else: - raise SpectrumScaleException("Retrieving NSD information Failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - nsd_dict = parse_unique_records(stdout) - nsd_list = nsd_dict["nsd"] - - for nsd in nsd_list: - nsd_instance = SpectrumScaleNSD(nsd) - nsd_info_list.append(nsd_instance) - - return nsd_info_list - - - @staticmethod - def delete_nsd(nsd_list, admin_ip=None): - nsd_names = ";".join(nsd_list) - - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmdelnsd"), nsd_names]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Deleting NSD(s) Failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - - @staticmethod - def remove_server_access_to_nsd(nsd_to_delete, node_to_delete, - nsd_attached_to_nodes, admin_ip=None): - stdout = stderr = "" - rc = RC_SUCCESS - - # mmchnsd "nsd1:node1.domain.com" - server_access_list = ','.join(map(str, nsd_attached_to_nodes)) - server_access_list = nsd_to_delete+":"+server_access_list - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmchnsd"), server_access_list]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - e_msg = ("Exception encountered during execution of modifying NSD " - "server access list for NSD={0} on Node={1}. Exception " - "Message={2)".format(nsd_to_delete, node_to_delete, e)) - raise SpectrumScaleException(e_msg, cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - if rc != RC_SUCCESS: - e_msg = ("Failed to modify NSD server access list for NSD={0} on " - "Node={1}".format(nsd_to_delete, node_to_delete)) - raise SpectrumScaleException(e_msg, cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - rc, stdout, stderr) - - -def main(): - try: - nsd_list = SpectrumScaleNSD.get_all_nsd_info() - for nsd in nsd_list: - nsd.print_nsd() - print("\n") - except Exception as e: - print(e) - -if __name__ == "__main__": - main() diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_utils.py deleted file mode 100755 index fafe5be0..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_utils.py +++ /dev/null @@ -1,688 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import os -import sys -import json -import time -import subprocess -import threading -import logging -import signal -import urllib.request, urllib.parse, urllib.error -import urllib.parse -import types -from collections import OrderedDict - -GPFS_CMD_PATH = "/usr/lpp/mmfs/bin" -RC_SUCCESS = 0 -CMD_TIMEDOUT = "CMD_TIMEDOUT" - -class SpectrumScaleException(Exception): - _expmsg="" - _mmcmd="" - _cmdargs="" - _rc=0 - _stdout="" - _stderr="" - - def __init__(self, msg, mmcmd, cmdargs, rc, stdout, stderr): - self._expmsg = msg - self._mmcmd = mmcmd - self._cmdargs = nsd_names = " ".join(cmdargs) - self._rc = rc - self._stdout = stdout - self._stderr = stderr - - def get_message(self): - return self._expmsg - - def __str__(self): - error_str = ("{0}. " - "Command: \"{1}\". " - "Arguments: \"{2}\". " - "Error Code: {3}. " - "Error Message: \"{4}\". ").format(self._expmsg, - self._mmcmd, - self._cmdargs, - self._rc, - self._stderr) - - return error_str - - -###################################### -## ## -## Logger Functions ## -## ## -###################################### -class SpectrumScaleLogger: - logger = None - - @staticmethod - def get_logger(): - if SpectrumScaleLogger.logger == None: - logger = logging.getLogger() - logger.setLevel(logging.DEBUG) - - log_file_handler = logging.FileHandler('/var/log/ibm_specscale_ansible.log') - log_file_handler.setLevel(logging.DEBUG) - log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - log_file_handler.setFormatter(log_formatter) - logger.addHandler(log_file_handler) - - # TODO: Enable once the "Shared Connection eror is rectified" - # console_handler = logging.StreamHandler() - # console_handler.setLevel(logging.INFO) - # console_formatter = logging.Formatter('[%(levelname)s] %(message)s') - # console_handler.setFormatter(console_formatter) - # logger.addHandler(console_handler) - - SpectrumScaleLogger.logger = logger - - return SpectrumScaleLogger.logger - - @staticmethod - def shutdown(): - if SpectrumScaleLogger.logger: - logging.shutdown() - - -###################################### -## ## -## Utility Functions ## -## ## -###################################### -def decode(input_string): - return urllib.parse.unquote(input_string) - - -def _stop_process(proc, logger, log_cmd, timeout): - try: - if proc.poll() is None: - logger.info("Command %s timed out after %s sec. Sending SIGTERM", log_cmd, timeout) - print(("Command %s timed out after %s sec. Sending SIGTERM", log_cmd, timeout)) - os.kill(proc.pid, signal.SIGTERM) # SIGKILL or SIGTERM - - time.sleep(0.5) - if proc.poll() is None: - logger.info("Command %s timed out after %s sec. Sending SIGKILL", log_cmd, timeout) - print(("Command %s timed out after %s sec. Sending SIGKILL", log_cmd, timeout)) - os.kill(proc.pid, signal.SIGKILL) - except Exception as e: - logger.warning(str(e)) - print((str(e))) - - -def runCmd(cmd, timeout=300, sh=False, env=None, retry=0): - """ - Execute an external command, read the output and return it. - @param cmd (str|list of str): command to be executed - @param timeout (int): timeout in sec, after which the command is forcefully terminated - @param sh (bool): True if the command is to be run in a shell and False if directly - @param env (dict): environment variables for the new process (instead of inheriting from the current process) - @param retry (int): number of retries on command timeout - @return: (stdout, stderr, rc) (str, str, int): the output of the command - """ - - logger = SpectrumScaleLogger.get_logger() - - if isinstance(cmd, str): - log_cmd = cmd - else: - log_cmd = ' '.join(cmd) - - t_start = time.time() - try: - if env is not None: - fullenv = dict(os.environ) - fullenv.update(env) - env = fullenv - # create the subprocess, ensuring a new process group is spawned - # so we can later kill the process and all its child processes - proc = subprocess.Popen(cmd, shell=sh, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - close_fds=False, env=env, universal_newlines=True) - - timer = threading.Timer(timeout, _stop_process, [proc, logger, log_cmd, timeout]) - timer.start() - - (sout, serr) = proc.communicate() - timer.cancel() # stop the timer when we got data from process - - ret = proc.poll() - except OSError as e: - logger.debug(str(e)) - sout = "" - serr = str(e) - ret = 127 if "No such file" in serr else 255 - finally: - try: - proc.stdout.close() - proc.stderr.close() - except: #pylint: disable=bare-except - pass - - t_run = time.time() - t_start - logger.debug("runCmd: Command executed: {0} Start time: {1} End time: {2} " - "Total time: {3}".format(log_cmd, t_start, - time.time(), t_run)) - - cmd_timeout = ret in (-signal.SIGTERM, -signal.SIGKILL) # 143,137 - if ret == -6 and retry >= 0 : # special handling for sigAbrt - logger.warning("runCmd: retry abrt %s with subprocess %s", cmd, s32) - (sout, serr, ret) = runCmd(cmd, timeout, sh, env, -1) - - if cmd_timeout and retry > 0: - retry -= 1 - logger.warning("runCmd: Retry command %s counter: %s", cmd, retry) - (sout, serr, ret) = runCmd(cmd, timeout, sh, env, retry) - elif cmd_timeout: - serr = CMD_TIMEDOUT - logger.warning("runCmd: %s Timeout:%d ret:%s", cmd, timeout, ret) - else: - logger.debug("runCmd: %s :(%d) ret:%s \n%s \n%s", cmd, timeout, ret, serr, sout) - - return (sout, serr, ret) - - -###################################### -## ## -## Parse Functions ## -## ## -###################################### - -# NOTE: The machine parsable "mm" list (-Y) commands fall into three categories -# in terms of how the information is organized and therefore should be parsed. -# Each of these different formats are listed below along with the appropriate -# parsing functions - -############################################# -# # -# TYPE 1 # -# # -############################################# -# -# "mm" command output type #1 -# -# mmlscluster:clusterSummary:HEADER:version:reserved:reserved:clusterName:clusterId:uidDomain:rshPath:rshSudoWrapper:rcpPath:rcpSudoWrapper:repositoryType:primaryServer:secondaryServer: -# mmlscluster:clusterNode:HEADER:version:reserved:reserved:nodeNumber:daemonNodeName:ipAddress:adminNodeName:designation:otherNodeRoles:adminLoginName:otherNodeRolesAlias: -# mmlscluster:cnfsSummary:HEADER:version:reserved:reserved:cnfsSharedRoot:cnfsMoundPort:cnfsNFSDprocs:cnfsReboot:cnfsMonitorEnabled:cnfsGanesha: -# mmlscluster:cnfsNode:HEADER:version:reserved:reserved:nodeNumber:daemonNodeName:ipAddress:cnfsState:cnfsGroupId:cnfsIplist: -# mmlscluster:cesSummary:HEADER:version:reserved:reserved:cesSharedRoot:EnabledServices:logLevel:addressPolicy: -# mmlscluster:cesNode:HEADER:version:reserved:reserved:nodeNumber:daemonNodeName:ipAddress:cesGroup:cesState:cesIpList: -# mmlscluster:cloudGatewayNode:HEADER:version:reserved:reserved:nodeNumber:daemonNodeName: -# mmlscluster:clusterSummary:0:1:::cluster.domain.com:2936932203756487754:cluster.domain.com:/usr/bin/ssh:no:/usr/bin/scp:no:CCR:server-1.domain.com:: -# mmlscluster:clusterNode:0:1:::1:server-1.domain.com:10.0.0.1:server-1.domain.com:quorum:Z::perfmon: -# mmlscluster:clusterNode:0:1:::2:server-3.domain.com:10.0.0.4:server-3.domain.com:quorumManager:Z::perfmon: -# mmlscluster:clusterNode:0:1:::3:server-4.domain.com:10.0.0.4:server-4.domain.com:quorumManager:Z::perfmon: -# mmlscluster:clusterNode:0:1:::4:server-2.domain.com:10.0.0.2:server-2.domain.com::Z::perfmon: -# -# -# The above output is parsed and represented in JSON as follows: -# -# { -# "clusterSummary": { -# "version": "1", -# "clusterName": "cluster.domain.com", -# "clusterId": "2936932203756844651", -# "uidDomain": "cluster.domain.com", -# "rshPath": "/usr/bin/ssh", -# "rshSudoWrapper": "no", -# "rcpPath": "/usr/bin/scp", -# "rcpSudoWrapper": "no", -# "repositoryType": "CCR", -# "primaryServer": "server-1.domain.com", -# "secondaryServer": "" -# }, -# "clusterNode": [ -# { -# "version": "1", -# "nodeNumber": "1", -# "daemonNodeName": "server-1.domain.com", -# "ipAddress": "10.0.0.1", -# "adminNodeName": "server-1.domain.com", -# "designation": "quorum", -# "otherNodeRoles": "Z,X", -# "adminLoginName": "", -# "otherNodeRolesAlias": "perfmon,ces" -# }, -# { -# "version": "1", -# "nodeNumber": "2", -# "daemonNodeName": "server-3.domain.com", -# "ipAddress": "10.0.0.4", -# "adminNodeName": "server-3.domain.com", -# "designation": "quorumManager", -# "otherNodeRoles": "Z", -# "adminLoginName": "", -# "otherNodeRolesAlias": "perfmon" -# }, -# { -# "version": "1", -# "nodeNumber": "3", -# "daemonNodeName": "server-4.domain.com", -# "ipAddress": "10.0.0.4", -# "adminNodeName": "server-4.domain.com", -# "designation": "quorumManager", -# "otherNodeRoles": "Z", -# "adminLoginName": "", -# "otherNodeRolesAlias": "perfmon" -# }, -# { -# "version": "1", -# "nodeNumber": "4", -# "daemonNodeName": "server-2.domain.com", -# "ipAddress": "10.0.0.2", -# "adminNodeName": "server-2.domain.com", -# "designation": "", -# "otherNodeRoles": "Z,X", -# "adminLoginName": "", -# "otherNodeRolesAlias": "perfmon,ces" -# } -# ], -# "cesSummary": { -# "version": "1", -# "cesSharedRoot": "/ibm/cesSharedRoot", -# "EnabledServices": "SMB,NFS", -# "logLevel": "0", -# "addressPolicy": "even-coverage" -# }, -# "cesNode": [ -# { -# "version": "1", -# "nodeNumber": "1", -# "daemonNodeName": "server-1.domain.com", -# "ipAddress": "10.0.0.1", -# "cesGroup": "", -# "cesState": "e", -# "cesIpList": "10.0.0.5,10.0.0.6" -# }, -# { -# "version": "1", -# "nodeNumber": "4", -# "daemonNodeName": "server-2.domain.com", -# "ipAddress": "10.0.0.2", -# "cesGroup": "", -# "cesState": "e", -# "cesIpList": "10.0.0.7,10.0.0.8" -# } -# ] -#} -# -# TODO: Change function name to something more appropriate -def parse_aggregate_cmd_output(cmd_raw_out, summary_records, header_index=2): - data_out = OrderedDict() - headers = OrderedDict() - - if isinstance(cmd_raw_out, str): - lines = cmd_raw_out.splitlines() - else: - lines = cmd_raw_out - - for line in lines: - values = line.split(":") - if len(values) < 3: - continue - - command = values[0] - datatype = values[1] or values[0] - if datatype == "": - continue - - if values[header_index] == 'HEADER': - headers[datatype] = values - continue - - columnNames = headers[datatype] - - json_object = OrderedDict() - for key, value in zip(columnNames[header_index+1:], - values[header_index+1:]): - json_object[key] = decode(value) - - if "" in json_object: - del json_object[""] - if 'reserved' in json_object: - del json_object['reserved'] - - # Summary records should only exist once - if datatype in summary_records: - json_d_type = "object" - data_out[datatype] = json_object - else: - json_d_type = "array" - json_array = [] - if datatype in list(data_out.keys()): - # An element in the array already exists - json_array = data_out[datatype] - json_array.append(json_object) - data_out[datatype] = json_array - - return data_out - - -############################################# -# # -# TYPE 2 # -# # -############################################# -# -# "mm" command output type #2 -# -# mmlsfs::HEADER:version:reserved:reserved:deviceName:fieldName:data:remarks: -# mmlsfs::0:1:::FS1:minFragmentSize:8192:: -# mmlsfs::0:1:::FS1:inodeSize:4096:: -# mmlsfs::0:1:::FS1:indirectBlockSize:32768:: -# mmlsfs::0:1:::FS1:defaultMetadataReplicas:2:: -# mmlsfs::0:1:::FS1:maxMetadataReplicas:2:: -# mmlsfs::0:1:::FS1:defaultDataReplicas:1:: -# mmlsfs::0:1:::FS1:maxDataReplicas:2:: -# mmlsfs::0:1:::FS1:blockAllocationType:scatter:: -# mmlsfs::0:1:::FS1:fileLockingSemantics:nfs4:: -# mmlsfs::0:1:::FS1:ACLSemantics:nfs4:: -# mmlsfs::0:1:::FS1:numNodes:100:: -# mmlsfs::0:1:::FS1:blockSize:4194304:: -# mmlsfs::0:1:::FS1:quotasAccountingEnabled:none:: -# mmlsfs::0:1:::FS1:quotasEnforced:none:: -# mmlsfs::0:1:::FS1:defaultQuotasEnabled:none:: -# mmlsfs::0:1:::FS1:perfilesetQuotas:No:: -# mmlsfs::0:1:::FS1:filesetdfEnabled:No:: -# mmlsfs::0:1:::FS1:filesystemVersion:22.00 (5.0.4.0):: -# mmlsfs::0:1:::FS1:filesystemVersionLocal:22.00 (5.0.4.0):: -# mmlsfs::0:1:::FS1:filesystemVersionManager:22.00 (5.0.4.0):: -# mmlsfs::0:1:::FS1:filesystemVersionOriginal:22.00 (5.0.4.0):: -# mmlsfs::0:1:::FS1:filesystemHighestSupported:22.00 (5.0.4.0):: -# mmlsfs::0:1:::FS1:create-time:Fri Feb 21 01%3A36%3A21 2020:: -# mmlsfs::0:1:::FS1:DMAPIEnabled:No:: -# mmlsfs::0:1:::FS1:logfileSize:33554432:: -# mmlsfs::0:1:::FS1:exactMtime:Yes:: -# mmlsfs::0:1:::FS1:suppressAtime:relatime:: -# mmlsfs::0:1:::FS1:strictReplication:whenpossible:: -# mmlsfs::0:1:::FS1:fastEAenabled:Yes:: -# mmlsfs::0:1:::FS1:encryption:No:: -# mmlsfs::0:1:::FS1:maxNumberOfInodes:513024:: -# mmlsfs::0:1:::FS1:maxSnapshotId:0:: -# mmlsfs::0:1:::FS1:UID:090B5475%3A5E4F9685:: -# mmlsfs::0:1:::FS1:logReplicas:0:: -# mmlsfs::0:1:::FS1:is4KAligned:Yes:: -# mmlsfs::0:1:::FS1:rapidRepairEnabled:Yes:: -# mmlsfs::0:1:::FS1:write-cache-threshold:0:: -# mmlsfs::0:1:::FS1:subblocksPerFullBlock:512:: -# mmlsfs::0:1:::FS1:storagePools:system:: -# mmlsfs::0:1:::FS1:file-audit-log:No:: -# mmlsfs::0:1:::FS1:maintenance-mode:No:: -# mmlsfs::0:1:::FS1:disks:nsd1;nsd2:: -# mmlsfs::0:1:::FS1:automaticMountOption:yes:: -# mmlsfs::0:1:::FS1:additionalMountOptions:none:: -# mmlsfs::0:1:::FS1:defaultMountPoint:%2Fibm%2FFS1:: -# -# The above output is parsed and represented in JSON as follows: -# -#{ -# filesystems : [ -# { -# deviceName : FS1 -# properties : [ -# { -# fieldName: minFragmentSize -# data : 8192 -# remarks : "" -# }, -# { -# fieldName: inodeSize -# data : 4096 -# remarks : "" -# } -# ] -# -# }, -# { -# deviceName : FS2 -# properties : [ -# { -# fieldName: minFragmentSize -# data : 8192 -# remarks : "" -# }, -# { -# fieldName: inodeSize -# data : 4096 -# remarks : "" -# } -# ] -# -# } -# ] -#} -# -# TODO: Change function name to something more appropriate -def parse_simple_cmd_output(cmd_raw_out, cmd_key, cmd_prop_name, - datatype="", header_index=2): - data_out = OrderedDict() - headers = OrderedDict() - - if isinstance(cmd_raw_out, str): - lines = cmd_raw_out.splitlines() - else: - lines = cmd_raw_out - - for line in lines: - values = line.split(":") - if len(values) < 3: - continue - - command = values[0] - - if not datatype: - datatype = values[1] or values[0] - if datatype == "": - continue - - if values[header_index] == 'HEADER': - headers[datatype] = values - continue - - columnNames = headers[datatype] - - json_object = OrderedDict() - instance_key = "" - for key, value in zip(columnNames[header_index+1:], - values[header_index+1:]): - if cmd_key in key: - instance_key = value - else: - json_object[key] = decode(value) - - if "" in json_object: - del json_object[""] - if 'reserved' in json_object: - del json_object['reserved'] - - json_array = [] - obj_found = False - if datatype in list(data_out.keys()): - # List of OrederDict - json_array = data_out[datatype] - prop_list = [] - # Each obj is an OrderDict - for obj in json_array: - key_val = obj[cmd_key] - if instance_key in key_val: - # We found the obj to which this record should be added - prop_list = obj[cmd_prop_name] - prop_list.append(json_object) - obj[cmd_prop_name] = prop_list - obj_found = True - break - - if not obj_found: - prop_list = [] - prop_list.append(json_object) - device_dict = OrderedDict() - device_dict[cmd_key] = instance_key - device_dict[cmd_prop_name] = prop_list - json_array.append(device_dict) - - data_out[datatype] = json_array - - return data_out - - -############################################# -# # -# TYPE 3 # -# # -############################################# -# -# "mm" command output type #3 -# -# mmlsnsd:nsd:HEADER:version:reserved:reserved:fileSystem:diskName:volumeId:serverList:thinDisk: -# mmlsnsd:nsd:0:1:::FS1:nsd1:090B54755E4F84E6:server-3.domain.com,server-4.domain.com:: -# mmlsnsd:nsd:0:1:::FS1:nsd2:090B54765E4F84E8:server-4.domain.com,server-3.domain.com:: -# mmlsnsd:nsd:0:1:::FS2:nsd3:090B54755E4F84EA:server-3.domain.com,server-4.domain.com:: -# mmlsnsd:nsd:0:1:::FS2:nsd4:090B54765E4F84EC:server-4.domain.com,server-3.domain.com:: -# -# The above output is parsed and represented in JSON as follows: -# -# { -# mmlsnsd : [ -# { -# diskName : "nsd1" -# fileSystem: "FS1" -# volumeId : "090B54755E4F84E6" -# serverList: "server-3.domain.com,server-4.domain.com" -# thinDisk : "" -# } -# { -# diskName : "nsd2" -# fileSystem: "FS1" -# volumeId : "090B54765E4F84E8" -# serverList: "server-4.domain.com,server-3.domain.com" -# thinDisk : "" -# } -# { -# diskName : "nsd3" -# fileSystem: "FS2" -# volumeId : "090B54755E4F84EA" -# serverList: "server-3.domain.com,server-4.domain.com" -# thinDisk : "" -# } -# { -# diskName : "nsd4" -# fileSystem: "FS2" -# volumeId : "090B54765E4F84EC" -# serverList: "server-4.domain.com,server-3.domain.com" -# thinDisk : "" -# } -# ] -# } -# -# TODO: Change function name to something more appropriate -def parse_unique_records(cmd_raw_out, datatype="", header_index=2): - data_out = OrderedDict() - headers = OrderedDict() - - if isinstance(cmd_raw_out, str): - lines = cmd_raw_out.splitlines() - else: - lines = cmd_raw_out - - for line in lines: - values = line.split(":") - if len(values) < 3: - continue - - command = values[0] - - if not datatype: - datatype = values[1] or values[0] - if datatype == "": - continue - - if values[header_index] == 'HEADER': - headers[datatype] = values - continue - - columnNames = headers[datatype] - - json_object = OrderedDict() - for key, value in zip(columnNames[header_index+1:], - values[header_index+1:]): - json_object[key] = decode(value) - - if "" in json_object: - del json_object[""] - if 'reserved' in json_object: - del json_object['reserved'] - - json_array = [] - if datatype in list(data_out.keys()): - # List of OrederDict - json_array = data_out[datatype] - json_array.append(json_object) - - data_out[datatype] = json_array - - return data_out - - -############################################################################### -## ## -## Main Function ## -## ## -############################################################################### - -def main(): - cmd = "cluster" - if len(sys.argv) > 1: - cmd = sys.argv[1] - if "fs" in cmd: - cmd = "filesystem" - - sout = "" - serr = "" - rc = 0 - if "cluster" in cmd: - sout, serr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmlscluster"),"-Y"], sh=False) - out_list = parse_aggregate_cmd_output(sout, ["clusterSummary", "cnfsSummary", "cesSummary"]) - elif "filesystem" in cmd: - sout, serr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmlsfs"),"all","-Y"], sh=False) - out_list = parse_simple_cmd_output(sout, "deviceName", "properties", "filesystems") - elif "mount" in cmd: - sout, serr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmlsmount"),"all","-Y"], sh=False) - out_list = parse_simple_cmd_output(sout, "realDevName", "mounts", "filesystem_mounts") - elif "config" in cmd: - sout, serr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmlsconfig"),"-Y"], sh=False) - out_list = parse_unique_records(sout) - elif "df" in cmd: - sout, serr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmdf"),"FS1", "-Y"], sh=False) - out_list = parse_aggregate_cmd_output(sout, ["poolTotal", "data", "metadata", "fsTotal", "inode"]) - - - if rc: - print(("Error executing command: %s %s", sout, serr)) - - json_str = json.dumps(out_list, indent=2) - print(json_str) - - -if __name__ == "__main__": - main() - diff --git a/roles/custom_module/module_utils/ibm_spectrumscale_zimon_utils.py b/roles/custom_module/module_utils/ibm_spectrumscale_zimon_utils.py deleted file mode 100644 index d65c7d63..00000000 --- a/roles/custom_module/module_utils/ibm_spectrumscale_zimon_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import os -import re -import json -import time - -try: - from ansible.module_utils.ibm_spectrumscale_utils import runCmd, \ - GPFS_CMD_PATH, RC_SUCCESS, SpectrumScaleException -except: - from ibm_spectrumscale_utils import runCmd, GPFS_CMD_PATH, \ - RC_SUCCESS, SpectrumScaleException - - -def get_zimon_collectors(): - """ - This function returns zimon collector node ip's. - """ - stdout = stderr = "" - rc = RC_SUCCESS - - cmd = [] - mmcmd_idx = 1 - if admin_ip: - cmd.extend(["ssh", admin_ip]) - mmcmd_idx = len(cmd) + 1 - - cmd.extend([os.path.join(GPFS_CMD_PATH, "mmperfmon"), "config", "show"]) - - try: - stdout, stderr, rc = runCmd(cmd, sh=False) - except Exception as e: - raise SpectrumScaleException(str(e), cmd[0:mmcmd_idx], cmd[mmcmd_idx:], - -1, stdout, stderr) - - if rc != RC_SUCCESS: - raise SpectrumScaleException("Retrieving Zimon information failed", - cmd[0:mmcmd_idx], cmd[mmcmd_idx:], rc, - stdout, stderr) - - output = stdout.splitlines() - col_regex = re.compile(r'colCandidates\s=\s(?P.*)') - for cmd_line in output: - if col_regex.match(cmd_line): - collectors = col_regex.match(cmd_line).group('collectors') - - collectors = collectors.replace("\"", '').replace(" ", '') - collectors = collectors.split(',') - - return collectors - - -def main(): - zimon_collectors_list = get_zimon_collectors() - for collector in zimon_collectors_list: - print(collector) - - -if __name__ == "__main__": - main() - diff --git a/roles/custom_module/tasks/main.yml b/roles/custom_module/tasks/main.yml deleted file mode 100644 index 2485fbab..00000000 --- a/roles/custom_module/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# Execute custom roles diff --git a/roles/custom_module/test/cluster/playbooks/cluster-get-test.yaml b/roles/custom_module/test/cluster/playbooks/cluster-get-test.yaml deleted file mode 100644 index 978da4b9..00000000 --- a/roles/custom_module/test/cluster/playbooks/cluster-get-test.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -- name: Test Spectrum Scale Cluster Module - hosts: controller - gather_facts: no - tasks: - - name: Query cluster information - ibm_spectrumscale_cluster: - op: 'get' - register: cluster_info_out - run_once: true - - - name: Display cluster information - debug: - msg: "{{ cluster_info_out }}" - - - name: Display cluster name - debug: - msg: "{{ (cluster_info_out.result | from_json).cluster_info.clusterSummary.clusterName }}" - - - name: Display all node name(s) - admin node name - debug: - msg: "{{ (cluster_info_out.result | from_json).cluster_info.clusterNode | map(attribute='adminNodeName') | list }}" - - # Example of using json_query - - name: Display all node name(s) - daemon node name - debug: - var: item - loop: "{{ (cluster_info_out.result | from_json).cluster_info | json_query('clusterNode[*].daemonNodeName') }}" - - - name: Display all quorum nodes - debug: - var: item - loop: "{{ (cluster_info_out.result | from_json).cluster_info | json_query(node_designation_query) }}" - vars: - node_designation_query: "clusterNode[?contains(designation, 'quorum')].adminNodeName" diff --git a/roles/custom_module/test/filesystem/playbooks/filesystem-test.yaml b/roles/custom_module/test/filesystem/playbooks/filesystem-test.yaml deleted file mode 100644 index 953cf5d7..00000000 --- a/roles/custom_module/test/filesystem/playbooks/filesystem-test.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -- name: Test Spectrum Scale Cluster Module - hosts: controller - tasks: - - name: Run Filesystem Module - ibm_spectrumscale_filesystem: - op: 'get' - register: module_out - - - name: Dump Filesystem output - debug: - msg: "{{ (module_out.result | from_json).filesystems | map(attribute='deviceName') | list }}" - diff --git a/roles/custom_module/test/node/common/AddNodeStanza.j2 b/roles/custom_module/test/node/common/AddNodeStanza.j2 deleted file mode 100644 index 1d67590a..00000000 --- a/roles/custom_module/test/node/common/AddNodeStanza.j2 +++ /dev/null @@ -1,4 +0,0 @@ -{% for host in groups['test_add_nodes'] | sort %} -{{ hostvars[host].inventory_hostname }}:{{hostvars[host].designation }} -{% endfor %} - diff --git a/roles/custom_module/test/node/playbooks/node-add-test.yaml b/roles/custom_module/test/node/playbooks/node-add-test.yaml deleted file mode 100644 index 9c93ca30..00000000 --- a/roles/custom_module/test/node/playbooks/node-add-test.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -- name: Test adding node to Spectrum Scale cluster - hosts: controller - gather_facts: no - tasks: - - name: Generate Stanza file to add Node(s) - template: - src: ../common/AddNodeStanza.j2 - dest: /tmp/NodeFile - - - name: Add Node(s) to cluster - ibm_spectrumscale_node: - state: "present" - name: "{{ groups['test_add_nodes'][0] }}" - nodefile: "/tmp/NodeFile" - license: "client" - register: add_node_result - - diff --git a/roles/custom_module/test/node/playbooks/node-get-test.yaml b/roles/custom_module/test/node/playbooks/node-get-test.yaml deleted file mode 100644 index 3746916a..00000000 --- a/roles/custom_module/test/node/playbooks/node-get-test.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -- name: Test Spectrum Scale Node Module - hosts: controller - gather_facts: no - tasks: - - name: Query all nodes in the cluster - ibm_spectrumscale_node: - op: "get" - register: nodes_info - - - name: Display all nodes in the cluster - debug: - msg: "{{ nodes_info }}" - - - name: Query a single node in the cluster - ibm_spectrumscale_node: - op: "get" - name: "node1.domain.com" - register: node_1_info - - - name: Display a single node information - debug: - msg: "{{ node_1_info }}" - diff --git a/roles/custom_module/test/node/playbooks/node-remove-test.yaml b/roles/custom_module/test/node/playbooks/node-remove-test.yaml deleted file mode 100644 index f1432b94..00000000 --- a/roles/custom_module/test/node/playbooks/node-remove-test.yaml +++ /dev/null @@ -1,162 +0,0 @@ -# -# Copyright 2020 IBM Corporation -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -- name: Test removing node that has been shutdown (Negative Test) - hosts: controller - gather_facts: no - tasks: - - name: Shutdown node - ibm_spectrumscale_node: - op: "stop" - name: "{{ groups['test_remove_nodes'][0] }}" - - - name: Attempt to remove node that is shutdown and ensure it is note removed - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['test_remove_nodes'][0] }}" - register: result_node_remove - failed_when: > - (result_node_remove.rc == 0) or - ('FAILED' not in result_node_remove.msg) - - - name: Check to ensure node still exists - ibm_spectrumscale_node: - op: "get" - name: "{{ groups['test_remove_nodes'][0] }}" - register: result_node_info - failed_when: > - (((result_node_info.result | from_json).clusterNodes | length) != 1) or - (groups['test_remove_nodes'][0] not in (result_node_info.result | from_json).clusterNodes[0].adminNodeName) - - - name: Startup node - ibm_spectrumscale_node: - op: "start" - name: "{{ groups['test_remove_nodes'][0] }}" - - tags: - - remove_node_down - - -- name: Test removing node that has unhealthy disks (Negative Test) - hosts: controller - gather_facts: no - tasks: - - name: Test removing node that has unhealthy disks - block: - - name: Stop disk(s) - command: /usr/lpp/mmfs/bin/mmchdisk - "{{ hostvars[groups['test_remove_storage_nodes'][0]].filesystem }}" - "stop" - "-d" - "{{hostvars[groups['test_remove_storage_nodes'][0]].nsds}}" - - - name: Attempt to remove node with unhealthy disks and ensure node is not removed - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['test_remove_storage_nodes'][0] }}" - register: result_node_remove - failed_when: > - (result_node_remove.rc == 0) or - ('FAILED' not in result_node_remove.msg) - - - name: Check to ensure node still exists - ibm_spectrumscale_node: - op: "get" - name: "{{ groups['test_remove_storage_nodes'][0] }}" - register: result_node_info - failed_when: > - (((result_node_info.result | from_json).clusterNodes | length) != 1) or - (groups['test_remove_nodes'][0] not in (result_node_info.result | from_json).clusterNodes[0].adminNodeName) - - - name: Start disk(s) - command: /usr/lpp/mmfs/bin/mmchdisk - "{{ hostvars[groups['test_remove_storage_nodes'][0]].filesystem }}" - "start" - "-d" - "{{ hostvars[groups['test_remove_storage_nodes'][0]].nsds }}" - when: (groups['test_remove_storage_nodes']|length > 0) - tags: - - remove_node_with_unhealthy_disk - - -- name: Test removing a quorum node (Negative Test) - hosts: controller - gather_facts: no - tasks: - - name: Attempt to remove quorum node and ensure node is not removed - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['quorum_nodes'][0] }}" - register: result_quorum_node_remove - failed_when: > - (result_quorum_node_remove.rc == 0) or - ('FAILED' not in result_quorum_node_remove.msg) - - tags: - - remove_quorum_node - - -- name: Test removing a manager node (Negative Test) - hosts: controller - gather_facts: no - tasks: - - name: Attempt to remove manager node and ensure node is not removed - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['manager_nodes'][0] }}" - register: result_manager_node_remove - failed_when: > - (result_manager_node_remove.rc == 0) or - ('FAILED' not in result_manager_node_remove.msg) - tags: - - remove_manager_node - - -- name: Test removing a node - hosts: controller - gather_facts: no - tasks: - - name: Remove a single node from the cluster - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['test_remove_nodes'][0] }}" - register: node_remove_result - - - name: Check to ensure node has been deleted - ibm_spectrumscale_node: - op: "get" - name: "{{ groups['test_remove_nodes'][0] }}" - register: result_node_info - failed_when: > - (((result_node_info.result | from_json).clusterNodes | length) != 0) - tags: - - remove_valid_node - - -- name: Test removing a non existing node - hosts: controller - gather_facts: no - tasks: - - name: Remove a non existing node from the cluster - ibm_spectrumscale_node: - state: "absent" - name: "{{ groups['test_remove_nodes'][0] }}" - register: node_remove_result - - tags: - - remove_duplicate_node - diff --git a/roles/custom_module/test/node/playbooks/node-status-test.yaml b/roles/custom_module/test/node/playbooks/node-status-test.yaml deleted file mode 100644 index 9bc90670..00000000 --- a/roles/custom_module/test/node/playbooks/node-status-test.yaml +++ /dev/null @@ -1,23 +0,0 @@ -- name: Test Spectrum Scale Node Module - hosts: controller - gather_facts: no - tasks: - - name: Query all node status in the cluster - ibm_spectrumscale_node: - op: "status" - register: nodes_status_info - - - name: Display all node statuses in the cluster - debug: - msg: "{{ nodes_status_info }}" - - - name: Query a single node status in the cluster - ibm_spectrumscale_node: - op: "status" - name: "node1.domain.com" - register: node_status_1_info - - - name: Display a singles node status information - debug: - msg: "{{ node_status_1_info }}" - diff --git a/roles/custom_module/test/node/python/add-node.json b/roles/custom_module/test/node/python/add-node.json deleted file mode 100644 index 2f5dd04a..00000000 --- a/roles/custom_module/test/node/python/add-node.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "ANSIBLE_MODULE_ARGS": { - "state": "present", - "name": "node1.domain.com,node2.domain.com", - "nodefile": "../common/stanza-add-node", - "license": "client" - } -} diff --git a/roles/custom_module/test/node/python/remove-node.json b/roles/custom_module/test/node/python/remove-node.json deleted file mode 100644 index e243d690..00000000 --- a/roles/custom_module/test/node/python/remove-node.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "ANSIBLE_MODULE_ARGS": { - "state": "absent", - "name": "node1.doamin.com" - } -} diff --git a/roles/ece_configure/README.md b/roles/ece_configure/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/ece_configure/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_ece/cluster/defaults/main.yml b/roles/ece_configure/defaults/main.yml similarity index 100% rename from roles/scale_ece/cluster/defaults/main.yml rename to roles/ece_configure/defaults/main.yml diff --git a/roles/scale_ece/cluster/handlers/main.yml b/roles/ece_configure/handlers/main.yml similarity index 100% rename from roles/scale_ece/cluster/handlers/main.yml rename to roles/ece_configure/handlers/main.yml diff --git a/roles/scale_ece/cluster/meta/main.yml b/roles/ece_configure/meta/main.yml similarity index 63% rename from roles/scale_ece/cluster/meta/main.yml rename to roles/ece_configure/meta/main.yml index d09b13b7..fb8c5b02 100644 --- a/roles/scale_ece/cluster/meta/main.yml +++ b/roles/ece_configure/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: scale_ece author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,13 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_ece/cluster/tasks/create_filesystem.yml b/roles/ece_configure/tasks/create_filesystem.yml similarity index 100% rename from roles/scale_ece/cluster/tasks/create_filesystem.yml rename to roles/ece_configure/tasks/create_filesystem.yml diff --git a/roles/scale_ece/cluster/tasks/create_recoverygroup.yml b/roles/ece_configure/tasks/create_recoverygroup.yml similarity index 100% rename from roles/scale_ece/cluster/tasks/create_recoverygroup.yml rename to roles/ece_configure/tasks/create_recoverygroup.yml diff --git a/roles/scale_ece/cluster/tasks/create_vdisk.yml b/roles/ece_configure/tasks/create_vdisk.yml similarity index 50% rename from roles/scale_ece/cluster/tasks/create_vdisk.yml rename to roles/ece_configure/tasks/create_vdisk.yml index 6508ca50..55b25569 100644 --- a/roles/scale_ece/cluster/tasks/create_vdisk.yml +++ b/roles/ece_configure/tasks/create_vdisk.yml @@ -6,6 +6,22 @@ changed_when: false failed_when: false + - name: create | Initialize + set_fact: + extra_option_flag: "" + + - name: create | Set NSD usase if it is defined + set_fact: + extra_option_flag: "{{ extra_option_flag }} --nsd-usage {{ item.nsdUsage }}" + when: + - item.nsdUsage is defined + + - name: create | Set Storage pool if it is defined + set_fact: + extra_option_flag: "{{ extra_option_flag }} --storage-pool {{ item.poolName }}" + when: + - item.poolName is defined + - name: create | Define Vdiskset vars: current_vs: "{{ item.vdisk | default('vs_' + (item.rg | regex_replace('\\W', '_')) | basename) }}" @@ -13,7 +29,7 @@ current_code: "{{ item.ec }}" current_bs: "{{ item.blocksize }}" current_size: "{{ item.Size }}" - command: "{{ scale_command_path }}mmvdisk vs define --vs {{ current_vs }} --rg {{ current_rg }} --code {{ current_code }} --bs {{ current_bs }} --ss {{ current_size }}%" + command: "{{ scale_command_path }}mmvdisk vs define --vs {{ current_vs }} --rg {{ current_rg }} --code {{ current_code }} --bs {{ current_bs }} --ss {{ current_size }} {{ extra_option_flag }}" register: scale_vs_define failed_when: scale_vs_define.rc != 0 when: @@ -23,6 +39,27 @@ - item.ec is defined - item.blocksize is defined - item.Size is defined + - item.da is not defined + + - name: create | Define Vdiskset + vars: + current_vs: "{{ item.vdisk | default('vs_' + (item.rg | regex_replace('\\W', '_')) | basename) }}" + current_rg: "{{ item.rg }}" + current_code: "{{ item.ec }}" + current_bs: "{{ item.blocksize }}" + current_size: "{{ item.Size }}" + extra_option: "{{ item.da }}" + command: "{{ scale_command_path }}mmvdisk vs define --vs {{ current_vs }} --rg {{ current_rg }} --code {{ current_code }} --bs {{ current_bs }} --ss {{ current_size }} --da {{ extra_option }} {{ extra_option_flag }}" + register: scale_vs_define + failed_when: scale_vs_define.rc != 0 + when: + - current_vs not in scale_existing_vs.stdout_lines + - item.vdisk is defined + - item.rg is defined + - item.ec is defined + - item.blocksize is defined + - item.Size is defined + - item.da is defined - name: create | Create Vdiskset vars: @@ -33,6 +70,10 @@ when: - item.vdisk is defined + - debug: + msg: "{{ scale_vs_define.cmd }}" + when: scale_vs_define.cmd is defined + - name: create | Add vdisks to desire filesystem debug: msg: Vdisks created, add them to your filesystem using mmadddisk diff --git a/roles/scale_ece/cluster/tasks/main.yml b/roles/ece_configure/tasks/main.yml similarity index 100% rename from roles/scale_ece/cluster/tasks/main.yml rename to roles/ece_configure/tasks/main.yml diff --git a/roles/remote_mount/tests/inventory b/roles/ece_configure/tests/inventory similarity index 100% rename from roles/remote_mount/tests/inventory rename to roles/ece_configure/tests/inventory diff --git a/roles/scale_ece/cluster/tests/test.yml b/roles/ece_configure/tests/test.yml similarity index 100% rename from roles/scale_ece/cluster/tests/test.yml rename to roles/ece_configure/tests/test.yml diff --git a/roles/scale_ece/cluster/vars/main.yml b/roles/ece_configure/vars/main.yml similarity index 100% rename from roles/scale_ece/cluster/vars/main.yml rename to roles/ece_configure/vars/main.yml diff --git a/roles/ece_install/README.md b/roles/ece_install/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/ece_install/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_ece/node/defaults/main.yml b/roles/ece_install/defaults/main.yml similarity index 100% rename from roles/scale_ece/node/defaults/main.yml rename to roles/ece_install/defaults/main.yml diff --git a/roles/nfs/node/handlers/main.yml b/roles/ece_install/handlers/main.yml similarity index 100% rename from roles/nfs/node/handlers/main.yml rename to roles/ece_install/handlers/main.yml diff --git a/roles/scale_ece/node/meta/main.yml b/roles/ece_install/meta/main.yml similarity index 60% rename from roles/scale_ece/node/meta/main.yml rename to roles/ece_install/meta/main.yml index f814fc31..82795402 100644 --- a/roles/scale_ece/node/meta/main.yml +++ b/roles/ece_install/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: scale_ece author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,14 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common diff --git a/roles/scale_ece/node/tasks/install.yml b/roles/ece_install/tasks/install.yml similarity index 100% rename from roles/scale_ece/node/tasks/install.yml rename to roles/ece_install/tasks/install.yml diff --git a/roles/scale_ece/node/tasks/install_dir_pkg.yml b/roles/ece_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_ece/node/tasks/install_dir_pkg.yml rename to roles/ece_install/tasks/install_dir_pkg.yml diff --git a/roles/scale_ece/node/tasks/install_local_pkg.yml b/roles/ece_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_ece/node/tasks/install_local_pkg.yml rename to roles/ece_install/tasks/install_local_pkg.yml diff --git a/roles/scale_ece/node/tasks/install_remote_pkg.yml b/roles/ece_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/scale_ece/node/tasks/install_remote_pkg.yml rename to roles/ece_install/tasks/install_remote_pkg.yml diff --git a/roles/scale_ece/node/tasks/install_repository.yml b/roles/ece_install/tasks/install_repository.yml similarity index 94% rename from roles/scale_ece/node/tasks/install_repository.yml rename to roles/ece_install/tasks/install_repository.yml index 2e952a3c..4cb384cd 100644 --- a/roles/scale_ece/node/tasks/install_repository.yml +++ b/roles/ece_install/tasks/install_repository.yml @@ -20,6 +20,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Add GPFS gnr packages to list diff --git a/roles/scale_ece/node/tasks/main.yml b/roles/ece_install/tasks/main.yml similarity index 100% rename from roles/scale_ece/node/tasks/main.yml rename to roles/ece_install/tasks/main.yml diff --git a/roles/scale_ece/node/tasks/yum/install.yml b/roles/ece_install/tasks/yum/install.yml similarity index 100% rename from roles/scale_ece/node/tasks/yum/install.yml rename to roles/ece_install/tasks/yum/install.yml diff --git a/roles/scale_ece/node/vars/main.yml b/roles/ece_install/vars/main.yml similarity index 100% rename from roles/scale_ece/node/vars/main.yml rename to roles/ece_install/vars/main.yml diff --git a/roles/ece_prepare/README.md b/roles/ece_prepare/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/ece_prepare/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_ece/precheck/meta/main.yml b/roles/ece_prepare/meta/main.yml similarity index 63% rename from roles/scale_ece/precheck/meta/main.yml rename to roles/ece_prepare/meta/main.yml index d09b13b7..fb8c5b02 100644 --- a/roles/scale_ece/precheck/meta/main.yml +++ b/roles/ece_prepare/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: scale_ece author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,13 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_ece/precheck/tasks/check.yml b/roles/ece_prepare/tasks/check.yml similarity index 86% rename from roles/scale_ece/precheck/tasks/check.yml rename to roles/ece_prepare/tasks/check.yml index 6367d4f3..030b8b3d 100644 --- a/roles/scale_ece/precheck/tasks/check.yml +++ b/roles/ece_prepare/tasks/check.yml @@ -22,9 +22,9 @@ - name: check | Check if ece node is not protocol node assert: that: - - not hostvars[item]['is_protocol_node']|bool + - not hostvars[item]['scale_protocol_node']|bool fail_msg: "ECE node cannot be protocol node" - when: hostvars[item]['is_protocol_node'] is defined + when: hostvars[item]['scale_protocol_node'] is defined with_items: - "{{ scale_ece_nodes_list }}" run_once: true @@ -44,11 +44,10 @@ - name: check | Check if ece node is not nsd node assert: that: - - not hostvars[item]['is_nsd_server']|bool + - not hostvars[item]['scale_nsd_server']|bool fail_msg: "ECE node cannot be nsd server" - when: hostvars[item]['is_nsd_server'] is defined + when: hostvars[item]['scale_nsd_server'] is defined with_items: - "{{ scale_ece_nodes_list }}" run_once: true any_errors_fatal: true - diff --git a/roles/scale_ece/precheck/tasks/main.yml b/roles/ece_prepare/tasks/main.yml similarity index 100% rename from roles/scale_ece/precheck/tasks/main.yml rename to roles/ece_prepare/tasks/main.yml diff --git a/roles/ece_upgrade/README.md b/roles/ece_upgrade/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/ece_upgrade/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_ece/upgrade/defaults/main.yml b/roles/ece_upgrade/defaults/main.yml similarity index 100% rename from roles/scale_ece/upgrade/defaults/main.yml rename to roles/ece_upgrade/defaults/main.yml diff --git a/roles/nfs/upgrade/handlers/main.yml b/roles/ece_upgrade/handlers/main.yml similarity index 100% rename from roles/nfs/upgrade/handlers/main.yml rename to roles/ece_upgrade/handlers/main.yml diff --git a/roles/scale_ece/upgrade/meta/main.yml b/roles/ece_upgrade/meta/main.yml similarity index 60% rename from roles/scale_ece/upgrade/meta/main.yml rename to roles/ece_upgrade/meta/main.yml index f814fc31..82795402 100644 --- a/roles/scale_ece/upgrade/meta/main.yml +++ b/roles/ece_upgrade/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: scale_ece author: IBM Corporation description: Role for configuring IBM Spectrum Scale (GPFS) Erasure Code Edition configuration role company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,14 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common diff --git a/roles/scale_ece/upgrade/tasks/install.yml b/roles/ece_upgrade/tasks/install.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/install.yml rename to roles/ece_upgrade/tasks/install.yml diff --git a/roles/scale_ece/upgrade/tasks/install_dir_pkg.yml b/roles/ece_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/install_dir_pkg.yml rename to roles/ece_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/scale_ece/upgrade/tasks/install_local_pkg.yml b/roles/ece_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/install_local_pkg.yml rename to roles/ece_upgrade/tasks/install_local_pkg.yml diff --git a/roles/scale_ece/upgrade/tasks/install_remote_pkg.yml b/roles/ece_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/install_remote_pkg.yml rename to roles/ece_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/scale_ece/upgrade/tasks/install_repository.yml b/roles/ece_upgrade/tasks/install_repository.yml similarity index 94% rename from roles/scale_ece/upgrade/tasks/install_repository.yml rename to roles/ece_upgrade/tasks/install_repository.yml index 44c462c2..a9408e84 100644 --- a/roles/scale_ece/upgrade/tasks/install_repository.yml +++ b/roles/ece_upgrade/tasks/install_repository.yml @@ -20,6 +20,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Add GPFS gnr packages to list diff --git a/roles/scale_ece/upgrade/tasks/main.yml b/roles/ece_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/main.yml rename to roles/ece_upgrade/tasks/main.yml diff --git a/roles/scale_ece/upgrade/tasks/yum/install.yml b/roles/ece_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/scale_ece/upgrade/tasks/yum/install.yml rename to roles/ece_upgrade/tasks/yum/install.yml diff --git a/roles/scale_ece/upgrade/vars/main.yml b/roles/ece_upgrade/vars/main.yml similarity index 100% rename from roles/scale_ece/upgrade/vars/main.yml rename to roles/ece_upgrade/vars/main.yml diff --git a/roles/fal_configure/README.md b/roles/fal_configure/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/fal_configure/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_fileauditlogging/cluster/defaults/main.yml b/roles/fal_configure/defaults/main.yml similarity index 93% rename from roles/scale_fileauditlogging/cluster/defaults/main.yml rename to roles/fal_configure/defaults/main.yml index efa0389d..687fdbd7 100644 --- a/roles/scale_fileauditlogging/cluster/defaults/main.yml +++ b/roles/fal_configure/defaults/main.yml @@ -3,7 +3,7 @@ # either edit this file or define your own variables to override the defaults ## Flag to enable fileauditlogging -scale_fileauditlogging_enable: true +scale_fal_enable: true ## Default filesystem parameters for file audit logging- ## can be overridden for each filesystem individually diff --git a/roles/scale_fileauditlogging/cluster/handlers/main.yml b/roles/fal_configure/handlers/main.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/handlers/main.yml rename to roles/fal_configure/handlers/main.yml diff --git a/roles/scale_fileauditlogging/precheck/meta/main.yml b/roles/fal_configure/meta/main.yml similarity index 61% rename from roles/scale_fileauditlogging/precheck/meta/main.yml rename to roles/fal_configure/meta/main.yml index b3e6d5b4..171c6720 100644 --- a/roles/scale_fileauditlogging/precheck/meta/main.yml +++ b/roles/fal_configure/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: fileauditlogging author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,13 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_fileauditlogging/cluster/tasks/configure.yml b/roles/fal_configure/tasks/configure.yml similarity index 85% rename from roles/scale_fileauditlogging/cluster/tasks/configure.yml rename to roles/fal_configure/tasks/configure.yml index ee8c5ac1..340dc8a8 100644 --- a/roles/scale_fileauditlogging/cluster/tasks/configure.yml +++ b/roles/fal_configure/tasks/configure.yml @@ -23,6 +23,13 @@ changed_when: false failed_when: false + - name: configure | Find existing File audit logging filesystem(s) + shell: + cmd: "/usr/lpp/mmfs/bin/mmaudit all list -Y | grep -v HEADER | cut -d ':' -f 8 | uniq" + register: scale_existing_audit + changed_when: false + failed_when: false + - name: configure | configure file audit logging vars: scale_fal_localspace_force: "{{ '--skip-local-space-check' if scale_fal_skip_localspace is defined else '' }}" @@ -33,6 +40,7 @@ {{ scale_fal_localspace_force }} register: scale_audit_command when: + - item not in scale_existing_audit.stdout_lines - scale_storage_fsdefs_audit is defined and scale_storage_fsdefs_audit | length >= 1 - (scale_storage_fsparams_audit[item].scale_fal_enable is defined) and (scale_storage_fsparams_audit[item].scale_fal_enable | bool) with_items: "{{ scale_storage_fsdefs_audit }}" diff --git a/roles/scale_fileauditlogging/cluster/tasks/configure_fal.yml b/roles/fal_configure/tasks/configure_fal.yml similarity index 85% rename from roles/scale_fileauditlogging/cluster/tasks/configure_fal.yml rename to roles/fal_configure/tasks/configure_fal.yml index 8c8b1fb1..d0228b65 100644 --- a/roles/scale_fileauditlogging/cluster/tasks/configure_fal.yml +++ b/roles/fal_configure/tasks/configure_fal.yml @@ -23,6 +23,13 @@ changed_when: false failed_when: false + - name: configure | Find existing File audit logging filesystem(s) + shell: + cmd: "/usr/lpp/mmfs/bin/mmaudit all list -Y | grep -v HEADER | cut -d ':' -f 8 | uniq" + register: scale_existing_audit + changed_when: false + failed_when: false + - name: configure | configure file audit logging vars: scale_fal_localspace_force: "{{ '--skip-local-space-check' if scale_fal_skip_localspace is defined else '' }}" @@ -33,6 +40,7 @@ {{ scale_fal_localspace_force }} register: scale_audit_command when: + - item not in scale_existing_audit.stdout_lines - scale_storage_fsdefs_audit is defined and scale_storage_fsdefs_audit | length >= 1 - (scale_storage_fsparams_audit[item].scale_fal_enable is defined) and (scale_storage_fsparams_audit[item].scale_fal_enable | bool) with_items: "{{ scale_storage_fsdefs_audit }}" diff --git a/roles/scale_fileauditlogging/cluster/tasks/main.yml b/roles/fal_configure/tasks/main.yml similarity index 76% rename from roles/scale_fileauditlogging/cluster/tasks/main.yml rename to roles/fal_configure/tasks/main.yml index 54ba3778..2fda5775 100644 --- a/roles/scale_fileauditlogging/cluster/tasks/main.yml +++ b/roles/fal_configure/tasks/main.yml @@ -3,13 +3,13 @@ - import_tasks: configure.yml tags: configure when: - - scale_fileauditlogging_enable | bool + - scale_fal_enable | bool - scale_filesystem is undefined - scale_storage is defined - import_tasks: configure_fal.yml tags: configure when: - - scale_fileauditlogging_enable | bool + - scale_fal_enable | bool - scale_filesystem is defined - scale_storage is undefined diff --git a/roles/scale_ece/cluster/tests/inventory b/roles/fal_configure/tests/inventory similarity index 100% rename from roles/scale_ece/cluster/tests/inventory rename to roles/fal_configure/tests/inventory diff --git a/roles/scale_fileauditlogging/cluster/tests/test.yml b/roles/fal_configure/tests/test.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/tests/test.yml rename to roles/fal_configure/tests/test.yml diff --git a/roles/scale_fileauditlogging/cluster/vars/main.yml b/roles/fal_configure/vars/main.yml similarity index 100% rename from roles/scale_fileauditlogging/cluster/vars/main.yml rename to roles/fal_configure/vars/main.yml diff --git a/roles/fal_install/README.md b/roles/fal_install/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/fal_install/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_fileauditlogging/node/defaults/main.yml b/roles/fal_install/defaults/main.yml similarity index 96% rename from roles/scale_fileauditlogging/node/defaults/main.yml rename to roles/fal_install/defaults/main.yml index efdccd2b..12bfc9f1 100644 --- a/roles/scale_fileauditlogging/node/defaults/main.yml +++ b/roles/fal_install/defaults/main.yml @@ -19,7 +19,7 @@ scale_auditlogging_packages: - gpfs.java ## Flag to enable fileauditlogging -scale_fileauditlogging_enable: true +scale_fal_enable: true ## To Enabled output from Ansible task in stdout and stderr for some tasks. ## Run the playbook with -vv diff --git a/roles/scale_fileauditlogging/node/meta/main.yml b/roles/fal_install/meta/main.yml similarity index 59% rename from roles/scale_fileauditlogging/node/meta/main.yml rename to roles/fal_install/meta/main.yml index bfbb1825..ee84e753 100644 --- a/roles/scale_fileauditlogging/node/meta/main.yml +++ b/roles/fal_install/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: fileauditlogging author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,14 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common diff --git a/roles/scale_fileauditlogging/node/tasks/apt/install.yml b/roles/fal_install/tasks/apt/install.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/apt/install.yml rename to roles/fal_install/tasks/apt/install.yml diff --git a/roles/scale_fileauditlogging/node/tasks/install.yml b/roles/fal_install/tasks/install.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/install.yml rename to roles/fal_install/tasks/install.yml diff --git a/roles/scale_fileauditlogging/node/tasks/install_dir_pkg.yml b/roles/fal_install/tasks/install_dir_pkg.yml similarity index 98% rename from roles/scale_fileauditlogging/node/tasks/install_dir_pkg.yml rename to roles/fal_install/tasks/install_dir_pkg.yml index cb880dfe..2b49813d 100644 --- a/roles/scale_fileauditlogging/node/tasks/install_dir_pkg.yml +++ b/roles/fal_install/tasks/install_dir_pkg.yml @@ -75,4 +75,4 @@ with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/node/tasks/install_local_pkg.yml b/roles/fal_install/tasks/install_local_pkg.yml similarity index 84% rename from roles/scale_fileauditlogging/node/tasks/install_local_pkg.yml rename to roles/fal_install/tasks/install_local_pkg.yml index 6c221455..d83c841f 100644 --- a/roles/scale_fileauditlogging/node/tasks/install_local_pkg.yml +++ b/roles/fal_install/tasks/install_local_pkg.yml @@ -102,10 +102,32 @@ scale_fal_url: 'gpfs_rpms/rhel/' when: ansible_distribution in scale_rhel_distribution +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' #todo wrong - name: install | file audit logging path set_fact: @@ -139,4 +161,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/node/tasks/install_remote_pkg.yml b/roles/fal_install/tasks/install_remote_pkg.yml similarity index 80% rename from roles/scale_fileauditlogging/node/tasks/install_remote_pkg.yml rename to roles/fal_install/tasks/install_remote_pkg.yml index ba07df92..1996c3ec 100644 --- a/roles/scale_fileauditlogging/node/tasks/install_remote_pkg.yml +++ b/roles/fal_install/tasks/install_remote_pkg.yml @@ -90,10 +90,32 @@ scale_fal_url: 'gpfs_rpms/rhel/' when: ansible_distribution in scale_rhel_distribution +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' - name: install | file audit logging path set_fact: @@ -127,4 +149,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/node/tasks/install_repository.yml b/roles/fal_install/tasks/install_repository.yml similarity index 76% rename from roles/scale_fileauditlogging/node/tasks/install_repository.yml rename to roles/fal_install/tasks/install_repository.yml index 8e2e9b39..45f9f350 100644 --- a/roles/scale_fileauditlogging/node/tasks/install_repository.yml +++ b/roles/fal_install/tasks/install_repository.yml @@ -18,10 +18,27 @@ scale_fal_url: 'gpfs_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution + when: ansible_distribution in scale_ubuntu_distribution + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' - name: install | file audit logging path set_fact: @@ -40,6 +57,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure apt repository @@ -55,6 +73,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure zypper repository @@ -69,8 +88,9 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - + - name: install | Configure fal YUM repository yum_repository: name: spectrum-scale-fal @@ -83,6 +103,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure fal APT repository @@ -96,6 +117,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure fal repository @@ -107,6 +129,8 @@ state: present when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined + - scale_install_repository_url != 'existing' # # Add FAL packages @@ -116,4 +140,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ item ] }}" with_items: - "{{ scale_auditlogging_packages }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/node/tasks/main.yml b/roles/fal_install/tasks/main.yml similarity index 60% rename from roles/scale_fileauditlogging/node/tasks/main.yml rename to roles/fal_install/tasks/main.yml index ba0074f1..42e3a01d 100644 --- a/roles/scale_fileauditlogging/node/tasks/main.yml +++ b/roles/fal_install/tasks/main.yml @@ -2,4 +2,4 @@ # tasks file for install - import_tasks: install.yml tags: install - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/node/tasks/yum/install.yml b/roles/fal_install/tasks/yum/install.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/yum/install.yml rename to roles/fal_install/tasks/yum/install.yml diff --git a/roles/scale_fileauditlogging/node/tasks/zypper/install.yml b/roles/fal_install/tasks/zypper/install.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tasks/zypper/install.yml rename to roles/fal_install/tasks/zypper/install.yml diff --git a/roles/scale_fileauditlogging/cluster/tests/inventory b/roles/fal_install/tests/inventory similarity index 100% rename from roles/scale_fileauditlogging/cluster/tests/inventory rename to roles/fal_install/tests/inventory diff --git a/roles/scale_fileauditlogging/node/tests/test.yml b/roles/fal_install/tests/test.yml similarity index 100% rename from roles/scale_fileauditlogging/node/tests/test.yml rename to roles/fal_install/tests/test.yml diff --git a/roles/scale_fileauditlogging/node/vars/main.yml b/roles/fal_install/vars/main.yml similarity index 100% rename from roles/scale_fileauditlogging/node/vars/main.yml rename to roles/fal_install/vars/main.yml diff --git a/roles/fal_prepare/README.md b/roles/fal_prepare/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/fal_prepare/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_fileauditlogging/precheck/defaults/main.yml b/roles/fal_prepare/defaults/main.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/defaults/main.yml rename to roles/fal_prepare/defaults/main.yml diff --git a/roles/scale_fileauditlogging/precheck/handlers/main.yml b/roles/fal_prepare/handlers/main.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/handlers/main.yml rename to roles/fal_prepare/handlers/main.yml diff --git a/roles/scale_fileauditlogging/postcheck/meta/main.yml b/roles/fal_prepare/meta/main.yml similarity index 61% rename from roles/scale_fileauditlogging/postcheck/meta/main.yml rename to roles/fal_prepare/meta/main.yml index b3e6d5b4..171c6720 100644 --- a/roles/scale_fileauditlogging/postcheck/meta/main.yml +++ b/roles/fal_prepare/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: fileauditlogging author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,13 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_fileauditlogging/precheck/tasks/main.yml b/roles/fal_prepare/tasks/main.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/tasks/main.yml rename to roles/fal_prepare/tasks/main.yml diff --git a/roles/scale_fileauditlogging/node/tests/inventory b/roles/fal_prepare/tests/inventory similarity index 100% rename from roles/scale_fileauditlogging/node/tests/inventory rename to roles/fal_prepare/tests/inventory diff --git a/roles/scale_fileauditlogging/precheck/tests/test.yml b/roles/fal_prepare/tests/test.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/tests/test.yml rename to roles/fal_prepare/tests/test.yml diff --git a/roles/scale_fileauditlogging/precheck/vars/main.yml b/roles/fal_prepare/vars/main.yml similarity index 100% rename from roles/scale_fileauditlogging/precheck/vars/main.yml rename to roles/fal_prepare/vars/main.yml diff --git a/roles/fal_upgrade/README.md b/roles/fal_upgrade/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/fal_upgrade/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_fileauditlogging/upgrade/defaults/main.yml b/roles/fal_upgrade/defaults/main.yml similarity index 96% rename from roles/scale_fileauditlogging/upgrade/defaults/main.yml rename to roles/fal_upgrade/defaults/main.yml index efdccd2b..12bfc9f1 100644 --- a/roles/scale_fileauditlogging/upgrade/defaults/main.yml +++ b/roles/fal_upgrade/defaults/main.yml @@ -19,7 +19,7 @@ scale_auditlogging_packages: - gpfs.java ## Flag to enable fileauditlogging -scale_fileauditlogging_enable: true +scale_fal_enable: true ## To Enabled output from Ansible task in stdout and stderr for some tasks. ## Run the playbook with -vv diff --git a/roles/scale_fileauditlogging/upgrade/handlers/main.yml b/roles/fal_upgrade/handlers/main.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/handlers/main.yml rename to roles/fal_upgrade/handlers/main.yml diff --git a/roles/scale_fileauditlogging/upgrade/meta/main.yml b/roles/fal_upgrade/meta/main.yml similarity index 59% rename from roles/scale_fileauditlogging/upgrade/meta/main.yml rename to roles/fal_upgrade/meta/main.yml index bfbb1825..ee84e753 100644 --- a/roles/scale_fileauditlogging/upgrade/meta/main.yml +++ b/roles/fal_upgrade/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: fileauditlogging author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,14 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common diff --git a/roles/scale_fileauditlogging/upgrade/tasks/apt/install.yml b/roles/fal_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/apt/install.yml rename to roles/fal_upgrade/tasks/apt/install.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/install.yml b/roles/fal_upgrade/tasks/install.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/install.yml rename to roles/fal_upgrade/tasks/install.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/install_dir_pkg.yml b/roles/fal_upgrade/tasks/install_dir_pkg.yml similarity index 98% rename from roles/scale_fileauditlogging/upgrade/tasks/install_dir_pkg.yml rename to roles/fal_upgrade/tasks/install_dir_pkg.yml index b43ca33a..9ed654dd 100644 --- a/roles/scale_fileauditlogging/upgrade/tasks/install_dir_pkg.yml +++ b/roles/fal_upgrade/tasks/install_dir_pkg.yml @@ -75,4 +75,4 @@ with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/upgrade/tasks/install_local_pkg.yml b/roles/fal_upgrade/tasks/install_local_pkg.yml similarity index 90% rename from roles/scale_fileauditlogging/upgrade/tasks/install_local_pkg.yml rename to roles/fal_upgrade/tasks/install_local_pkg.yml index 070440fc..99d3e904 100644 --- a/roles/scale_fileauditlogging/upgrade/tasks/install_local_pkg.yml +++ b/roles/fal_upgrade/tasks/install_local_pkg.yml @@ -101,11 +101,23 @@ set_fact: scale_fal_url: 'gpfs_rpms/rhel/' when: ansible_distribution in scale_rhel_distribution - + - name: upgrade | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution + +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' #todo wrong - name: upgrade | file audit logging path set_fact: @@ -139,4 +151,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/upgrade/tasks/install_remote_pkg.yml b/roles/fal_upgrade/tasks/install_remote_pkg.yml similarity index 87% rename from roles/scale_fileauditlogging/upgrade/tasks/install_remote_pkg.yml rename to roles/fal_upgrade/tasks/install_remote_pkg.yml index 195bb418..31b0c0c7 100644 --- a/roles/scale_fileauditlogging/upgrade/tasks/install_remote_pkg.yml +++ b/roles/fal_upgrade/tasks/install_remote_pkg.yml @@ -89,12 +89,24 @@ set_fact: scale_fal_url: 'gpfs_rpms/rhel/' when: ansible_distribution in scale_rhel_distribution - + - name: upgrade | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' + - name: upgrade | file audit logging path set_fact: scale_fal_url: 'gpfs_rpms/rhel/' @@ -127,4 +139,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" with_items: - "{{ scale_install_gpfs_fal.files.0.path | basename }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/upgrade/tasks/install_repository.yml b/roles/fal_upgrade/tasks/install_repository.yml similarity index 78% rename from roles/scale_fileauditlogging/upgrade/tasks/install_repository.yml rename to roles/fal_upgrade/tasks/install_repository.yml index 8420eaa7..8d07a61b 100644 --- a/roles/scale_fileauditlogging/upgrade/tasks/install_repository.yml +++ b/roles/fal_upgrade/tasks/install_repository.yml @@ -25,11 +25,28 @@ scale_fal_url: 'gpfs_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | file audit logging path + set_fact: + scale_fal_url: 'gpfs_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: upgrade | file audit logging path set_fact: scale_fal_url: 'gpfs_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: upgrade | file audit logging path + set_fact: + scale_fal_url: 'gpfs_debs/ubuntu/ubuntu22/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' + - name: upgrade | file audit logging path set_fact: scale_fal_url: 'gpfs_rpms/sles15/' @@ -47,6 +64,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure apt repository @@ -62,6 +80,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # # Configure zypper repository @@ -76,8 +95,9 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - + - name: upgrade | Configure fal YUM repository yum_repository: name: spectrum-scale-fal @@ -90,6 +110,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure fal APT repository @@ -103,6 +124,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure fal repository @@ -114,6 +136,8 @@ state: present when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined + - scale_install_repository_url != 'existing' # # Add FAL packages @@ -123,4 +147,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ item ] }}" with_items: - "{{ scale_auditlogging_packages }}" - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/upgrade/tasks/main.yml b/roles/fal_upgrade/tasks/main.yml similarity index 60% rename from roles/scale_fileauditlogging/upgrade/tasks/main.yml rename to roles/fal_upgrade/tasks/main.yml index ba0074f1..42e3a01d 100644 --- a/roles/scale_fileauditlogging/upgrade/tasks/main.yml +++ b/roles/fal_upgrade/tasks/main.yml @@ -2,4 +2,4 @@ # tasks file for install - import_tasks: install.yml tags: install - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/upgrade/tasks/yum/install.yml b/roles/fal_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/yum/install.yml rename to roles/fal_upgrade/tasks/yum/install.yml diff --git a/roles/scale_fileauditlogging/upgrade/tasks/zypper/install.yml b/roles/fal_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tasks/zypper/install.yml rename to roles/fal_upgrade/tasks/zypper/install.yml diff --git a/roles/scale_fileauditlogging/postcheck/tests/inventory b/roles/fal_upgrade/tests/inventory similarity index 100% rename from roles/scale_fileauditlogging/postcheck/tests/inventory rename to roles/fal_upgrade/tests/inventory diff --git a/roles/scale_fileauditlogging/upgrade/tests/test.yml b/roles/fal_upgrade/tests/test.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tests/test.yml rename to roles/fal_upgrade/tests/test.yml diff --git a/roles/scale_fileauditlogging/upgrade/vars/main.yml b/roles/fal_upgrade/vars/main.yml similarity index 100% rename from roles/scale_fileauditlogging/upgrade/vars/main.yml rename to roles/fal_upgrade/vars/main.yml diff --git a/roles/fal_verify/README.md b/roles/fal_verify/README.md new file mode 120000 index 00000000..fe840054 --- /dev/null +++ b/roles/fal_verify/README.md @@ -0,0 +1 @@ +../../README.md \ No newline at end of file diff --git a/roles/scale_fileauditlogging/postcheck/defaults/main.yml b/roles/fal_verify/defaults/main.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/defaults/main.yml rename to roles/fal_verify/defaults/main.yml diff --git a/roles/scale_fileauditlogging/postcheck/handlers/main.yml b/roles/fal_verify/handlers/main.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/handlers/main.yml rename to roles/fal_verify/handlers/main.yml diff --git a/roles/scale_fileauditlogging/cluster/meta/main.yml b/roles/fal_verify/meta/main.yml similarity index 61% rename from roles/scale_fileauditlogging/cluster/meta/main.yml rename to roles/fal_verify/meta/main.yml index b3e6d5b4..171c6720 100644 --- a/roles/scale_fileauditlogging/cluster/meta/main.yml +++ b/roles/fal_verify/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: fileauditlogging author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) fileauditlogging company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,13 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_fileauditlogging/postcheck/tasks/check.yml b/roles/fal_verify/tasks/check.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/tasks/check.yml rename to roles/fal_verify/tasks/check.yml diff --git a/roles/scale_fileauditlogging/postcheck/tasks/main.yml b/roles/fal_verify/tasks/main.yml similarity index 60% rename from roles/scale_fileauditlogging/postcheck/tasks/main.yml rename to roles/fal_verify/tasks/main.yml index 9b15e230..f1fb03f1 100644 --- a/roles/scale_fileauditlogging/postcheck/tasks/main.yml +++ b/roles/fal_verify/tasks/main.yml @@ -2,4 +2,4 @@ # tasks file for postcheck - include_tasks: check.yml tags: always - when: (scale_fileauditlogging_enable | bool) + when: (scale_fal_enable | bool) diff --git a/roles/scale_fileauditlogging/precheck/tests/inventory b/roles/fal_verify/tests/inventory similarity index 100% rename from roles/scale_fileauditlogging/precheck/tests/inventory rename to roles/fal_verify/tests/inventory diff --git a/roles/scale_fileauditlogging/postcheck/tests/test.yml b/roles/fal_verify/tests/test.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/tests/test.yml rename to roles/fal_verify/tests/test.yml diff --git a/roles/scale_fileauditlogging/postcheck/vars/main.yml b/roles/fal_verify/vars/main.yml similarity index 100% rename from roles/scale_fileauditlogging/postcheck/vars/main.yml rename to roles/fal_verify/vars/main.yml diff --git a/roles/gui/cluster/meta/main.yml b/roles/gui/cluster/meta/main.yml deleted file mode 100644 index 211a3da2..00000000 --- a/roles/gui/cluster/meta/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -galaxy_info: - role_name: gui_cluster - author: IBM Corporation - description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.8 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui - -dependencies: - - core/common - - zimon/cluster diff --git a/roles/gui_configure/README.md b/roles/gui_configure/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/gui_configure/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/gui/cluster/defaults/main.yml b/roles/gui_configure/defaults/main.yml similarity index 100% rename from roles/gui/cluster/defaults/main.yml rename to roles/gui_configure/defaults/main.yml diff --git a/roles/gui/upgrade/meta/main.yml b/roles/gui_configure/meta/main.yml similarity index 61% rename from roles/gui/upgrade/meta/main.yml rename to roles/gui_configure/meta/main.yml index aa0a3dab..3896f0fa 100644 --- a/roles/gui/upgrade/meta/main.yml +++ b/roles/gui_configure/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: gui_node author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,14 +14,8 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.perfmon_configure diff --git a/roles/gui/cluster/tasks/chpasswdpolicy.yml b/roles/gui_configure/tasks/chpasswdpolicy.yml similarity index 100% rename from roles/gui/cluster/tasks/chpasswdpolicy.yml rename to roles/gui_configure/tasks/chpasswdpolicy.yml diff --git a/roles/gui/cluster/tasks/configure.yml b/roles/gui_configure/tasks/configure.yml similarity index 83% rename from roles/gui/cluster/tasks/configure.yml rename to roles/gui_configure/tasks/configure.yml index 0ff8e7d8..c96bc766 100644 --- a/roles/gui/cluster/tasks/configure.yml +++ b/roles/gui_configure/tasks/configure.yml @@ -15,8 +15,17 @@ name: gpfsgui state: started enabled: true + no_block: true when: scale_cluster_gui | bool +# Verify GUI is up and running +- name: Wait until gpfsgui is up and running + shell: "systemctl is-active gpfsgui" + register: systemctl_out + until: systemctl_out.stdout == "active" + retries: 10 + delay: 20 + # # Initialize the GUI so that user dont need to wait and HTTPs certificate and be imported. # diff --git a/roles/gui/cluster/tasks/email.yml b/roles/gui_configure/tasks/email.yml similarity index 100% rename from roles/gui/cluster/tasks/email.yml rename to roles/gui_configure/tasks/email.yml diff --git a/roles/gui/cluster/tasks/hasi_vault_certificate.yml b/roles/gui_configure/tasks/hasi_vault_certificate.yml similarity index 100% rename from roles/gui/cluster/tasks/hasi_vault_certificate.yml rename to roles/gui_configure/tasks/hasi_vault_certificate.yml diff --git a/roles/gui/cluster/tasks/hasi_vault_user.yml b/roles/gui_configure/tasks/hasi_vault_user.yml similarity index 100% rename from roles/gui/cluster/tasks/hasi_vault_user.yml rename to roles/gui_configure/tasks/hasi_vault_user.yml diff --git a/roles/gui/cluster/tasks/ldap.yml b/roles/gui_configure/tasks/ldap.yml similarity index 100% rename from roles/gui/cluster/tasks/ldap.yml rename to roles/gui_configure/tasks/ldap.yml diff --git a/roles/gui/cluster/tasks/main.yml b/roles/gui_configure/tasks/main.yml similarity index 68% rename from roles/gui/cluster/tasks/main.yml rename to roles/gui_configure/tasks/main.yml index c82851c7..8d0c0ca2 100644 --- a/roles/gui/cluster/tasks/main.yml +++ b/roles/gui_configure/tasks/main.yml @@ -13,11 +13,22 @@ - scale_gui_password_policy_change | bool tags: chpasswdpolicy +- name: check | Check gui nodes if defined + add_host: + name: "{{ hostvars[item]['inventory_hostname'] }}" + groups: scale_gui_defined_listnodes + when: + - hostvars[item].scale_cluster_gui is defined + - (hostvars[item].scale_cluster_gui is defined and hostvars[item].scale_cluster_gui | bool) + with_items: "{{ ansible_play_hosts }}" + changed_when: false + - import_tasks: users.yml when: - - scale_cluster_gui | bool - scale_gui_admin_user is defined - scale_gui_admin_hc_vault_user is not defined + - groups['scale_gui_defined_listnodes'] is defined and groups['scale_gui_defined_listnodes'] | length > 0 + delegate_to: "{{ groups['scale_gui_defined_listnodes'].0 }}" tags: users - import_tasks: ldap.yml diff --git a/roles/gui/cluster/tasks/snmp.yml b/roles/gui_configure/tasks/snmp.yml similarity index 100% rename from roles/gui/cluster/tasks/snmp.yml rename to roles/gui_configure/tasks/snmp.yml diff --git a/roles/gui/cluster/tasks/users.yml b/roles/gui_configure/tasks/users.yml similarity index 100% rename from roles/gui/cluster/tasks/users.yml rename to roles/gui_configure/tasks/users.yml diff --git a/roles/gui_install/README.md b/roles/gui_install/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/gui_install/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/gui/node/defaults/main.yml b/roles/gui_install/defaults/main.yml similarity index 100% rename from roles/gui/node/defaults/main.yml rename to roles/gui_install/defaults/main.yml diff --git a/roles/gui/precheck/meta/main.yml b/roles/gui_install/meta/main.yml similarity index 61% rename from roles/gui/precheck/meta/main.yml rename to roles/gui_install/meta/main.yml index ed1f3091..f8d2be23 100644 --- a/roles/gui/precheck/meta/main.yml +++ b/roles/gui_install/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: gui_precheck author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,14 +14,8 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.perfmon_install diff --git a/roles/gui/node/tasks/apt/install.yml b/roles/gui_install/tasks/apt/install.yml similarity index 100% rename from roles/gui/node/tasks/apt/install.yml rename to roles/gui_install/tasks/apt/install.yml diff --git a/roles/gui/node/tasks/install.yml b/roles/gui_install/tasks/install.yml similarity index 100% rename from roles/gui/node/tasks/install.yml rename to roles/gui_install/tasks/install.yml diff --git a/roles/gui/node/tasks/install_dir_pkg.yml b/roles/gui_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/gui/node/tasks/install_dir_pkg.yml rename to roles/gui_install/tasks/install_dir_pkg.yml diff --git a/roles/gui/node/tasks/install_local_pkg.yml b/roles/gui_install/tasks/install_local_pkg.yml similarity index 100% rename from roles/gui/node/tasks/install_local_pkg.yml rename to roles/gui_install/tasks/install_local_pkg.yml diff --git a/roles/gui/node/tasks/install_remote_pkg.yml b/roles/gui_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/gui/node/tasks/install_remote_pkg.yml rename to roles/gui_install/tasks/install_remote_pkg.yml diff --git a/roles/gui/node/tasks/install_repository.yml b/roles/gui_install/tasks/install_repository.yml similarity index 91% rename from roles/gui/node/tasks/install_repository.yml rename to roles/gui_install/tasks/install_repository.yml index 21156435..70396d60 100644 --- a/roles/gui/node/tasks/install_repository.yml +++ b/roles/gui_install/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure gui APT repository @@ -29,6 +30,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure GUI repository @@ -41,6 +43,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/gui/node/tasks/main.yml b/roles/gui_install/tasks/main.yml similarity index 100% rename from roles/gui/node/tasks/main.yml rename to roles/gui_install/tasks/main.yml diff --git a/roles/gui/node/tasks/yum/install.yml b/roles/gui_install/tasks/yum/install.yml similarity index 100% rename from roles/gui/node/tasks/yum/install.yml rename to roles/gui_install/tasks/yum/install.yml diff --git a/roles/gui/node/tasks/zypper/install.yml b/roles/gui_install/tasks/zypper/install.yml similarity index 100% rename from roles/gui/node/tasks/zypper/install.yml rename to roles/gui_install/tasks/zypper/install.yml diff --git a/roles/gui/node/vars/main.yml b/roles/gui_install/vars/main.yml similarity index 100% rename from roles/gui/node/vars/main.yml rename to roles/gui_install/vars/main.yml diff --git a/roles/gui_prepare/README.md b/roles/gui_prepare/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/gui_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/gui/precheck/defaults/main.yml b/roles/gui_prepare/defaults/main.yml similarity index 100% rename from roles/gui/precheck/defaults/main.yml rename to roles/gui_prepare/defaults/main.yml diff --git a/roles/gui/node/meta/main.yml b/roles/gui_prepare/meta/main.yml similarity index 59% rename from roles/gui/node/meta/main.yml rename to roles/gui_prepare/meta/main.yml index 4b239948..bdbab499 100644 --- a/roles/gui/node/meta/main.yml +++ b/roles/gui_prepare/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: gui_node author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,15 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common - - zimon/node + - ibm.spectrum_scale.core_common diff --git a/roles/gui/precheck/tasks/inventory_check.yml b/roles/gui_prepare/tasks/inventory_check.yml similarity index 100% rename from roles/gui/precheck/tasks/inventory_check.yml rename to roles/gui_prepare/tasks/inventory_check.yml diff --git a/roles/gui/precheck/tasks/main.yml b/roles/gui_prepare/tasks/main.yml similarity index 100% rename from roles/gui/precheck/tasks/main.yml rename to roles/gui_prepare/tasks/main.yml diff --git a/roles/gui_upgrade/README.md b/roles/gui_upgrade/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/gui_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/gui/upgrade/defaults/main.yml b/roles/gui_upgrade/defaults/main.yml similarity index 100% rename from roles/gui/upgrade/defaults/main.yml rename to roles/gui_upgrade/defaults/main.yml diff --git a/roles/gui_upgrade/meta/main.yml b/roles/gui_upgrade/meta/main.yml new file mode 100644 index 00000000..bdbab499 --- /dev/null +++ b/roles/gui_upgrade/meta/main.yml @@ -0,0 +1,20 @@ +--- +galaxy_info: + author: IBM Corporation + description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common diff --git a/roles/gui/upgrade/tasks/apt/install.yml b/roles/gui_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/gui/upgrade/tasks/apt/install.yml rename to roles/gui_upgrade/tasks/apt/install.yml diff --git a/roles/gui/upgrade/tasks/install.yml b/roles/gui_upgrade/tasks/install.yml similarity index 100% rename from roles/gui/upgrade/tasks/install.yml rename to roles/gui_upgrade/tasks/install.yml diff --git a/roles/gui/upgrade/tasks/install_dir_pkg.yml b/roles/gui_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/gui/upgrade/tasks/install_dir_pkg.yml rename to roles/gui_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/gui/upgrade/tasks/install_local_pkg.yml b/roles/gui_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/gui/upgrade/tasks/install_local_pkg.yml rename to roles/gui_upgrade/tasks/install_local_pkg.yml diff --git a/roles/gui/upgrade/tasks/install_remote_pkg.yml b/roles/gui_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/gui/upgrade/tasks/install_remote_pkg.yml rename to roles/gui_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/gui/upgrade/tasks/install_repository.yml b/roles/gui_upgrade/tasks/install_repository.yml similarity index 91% rename from roles/gui/upgrade/tasks/install_repository.yml rename to roles/gui_upgrade/tasks/install_repository.yml index 6b4a50d8..c1f1fb00 100644 --- a/roles/gui/upgrade/tasks/install_repository.yml +++ b/roles/gui_upgrade/tasks/install_repository.yml @@ -16,6 +16,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure gui APT repository @@ -29,6 +30,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure GUI repository @@ -41,6 +43,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' # diff --git a/roles/gui/upgrade/tasks/main.yml b/roles/gui_upgrade/tasks/main.yml similarity index 100% rename from roles/gui/upgrade/tasks/main.yml rename to roles/gui_upgrade/tasks/main.yml diff --git a/roles/gui/upgrade/tasks/yum/install.yml b/roles/gui_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/gui/upgrade/tasks/yum/install.yml rename to roles/gui_upgrade/tasks/yum/install.yml diff --git a/roles/gui/upgrade/tasks/zypper/install.yml b/roles/gui_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/gui/upgrade/tasks/zypper/install.yml rename to roles/gui_upgrade/tasks/zypper/install.yml diff --git a/roles/gui/upgrade/vars/main.yml b/roles/gui_upgrade/vars/main.yml similarity index 100% rename from roles/gui/upgrade/vars/main.yml rename to roles/gui_upgrade/vars/main.yml diff --git a/roles/gui_verify/README.md b/roles/gui_verify/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/gui_verify/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/gui/postcheck/defaults/main.yml b/roles/gui_verify/defaults/main.yml similarity index 100% rename from roles/gui/postcheck/defaults/main.yml rename to roles/gui_verify/defaults/main.yml diff --git a/roles/gui/postcheck/meta/main.yml b/roles/gui_verify/meta/main.yml similarity index 63% rename from roles/gui/postcheck/meta/main.yml rename to roles/gui_verify/meta/main.yml index 26b463e5..9012eb68 100644 --- a/roles/gui/postcheck/meta/main.yml +++ b/roles/gui_verify/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: gui_postcheck author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,13 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/gui/postcheck/tasks/main.yml b/roles/gui_verify/tasks/main.yml similarity index 100% rename from roles/gui/postcheck/tasks/main.yml rename to roles/gui_verify/tasks/main.yml diff --git a/roles/scale_hdfs/cluster/.travis.yml b/roles/hdfs_configure/.travis.yml similarity index 100% rename from roles/scale_hdfs/cluster/.travis.yml rename to roles/hdfs_configure/.travis.yml diff --git a/roles/hdfs_configure/README.md b/roles/hdfs_configure/README.md new file mode 120000 index 00000000..0c5f1b61 --- /dev/null +++ b/roles/hdfs_configure/README.md @@ -0,0 +1 @@ +../../docs/README.HDFS.md \ No newline at end of file diff --git a/roles/scale_hdfs/cluster/defaults/main.yml b/roles/hdfs_configure/defaults/main.yml similarity index 100% rename from roles/scale_hdfs/cluster/defaults/main.yml rename to roles/hdfs_configure/defaults/main.yml diff --git a/roles/hdfs_configure/meta/main.yml b/roles/hdfs_configure/meta/main.yml new file mode 100644 index 00000000..c42d702c --- /dev/null +++ b/roles/hdfs_configure/meta/main.yml @@ -0,0 +1,20 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.ces_common diff --git a/roles/scale_hdfs/cluster/tasks/append_dict.yml b/roles/hdfs_configure/tasks/append_dict.yml similarity index 100% rename from roles/scale_hdfs/cluster/tasks/append_dict.yml rename to roles/hdfs_configure/tasks/append_dict.yml diff --git a/roles/scale_hdfs/cluster/tasks/configure.yml b/roles/hdfs_configure/tasks/configure.yml similarity index 98% rename from roles/scale_hdfs/cluster/tasks/configure.yml rename to roles/hdfs_configure/tasks/configure.yml index e7428461..e489bc59 100644 --- a/roles/scale_hdfs/cluster/tasks/configure.yml +++ b/roles/hdfs_configure/tasks/configure.yml @@ -328,8 +328,10 @@ command: /usr/lpp/mmfs/bin/mmces service enable HDFS - name: Start Namenodes - shell: /usr/lpp/mmfs/hadoop/sbin/mmhdfs hdfs-nn restart - register: start_nn_status + shell: /usr/lpp/mmfs/bin/mmces service start HDFS -N "{{ nn_items }}" + with_items: "{{ scale_hdfs_namenodes_list }}" + loop_control: + loop_var: nn_items - name: Check Namenodes running status shell: /usr/lpp/mmfs/hadoop/sbin/mmhdfs hdfs-nn status | grep 'namenode pid is' | wc -l diff --git a/roles/scale_hdfs/cluster/tasks/env_setup.yml b/roles/hdfs_configure/tasks/env_setup.yml similarity index 86% rename from roles/scale_hdfs/cluster/tasks/env_setup.yml rename to roles/hdfs_configure/tasks/env_setup.yml index b090ebc7..cc099e5d 100644 --- a/roles/scale_hdfs/cluster/tasks/env_setup.yml +++ b/roles/hdfs_configure/tasks/env_setup.yml @@ -38,7 +38,7 @@ state: present line: "export JAVA_HOME={{ javahome_path }}" when: - - ansible_fqdn in scale_hdfs_nodes_list + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: "env_setup | HDFS and GPFS bin to PATH" lineinfile: @@ -46,7 +46,7 @@ state: present line: 'export PATH=$PATH:$JAVA_HOME/bin:/usr/lpp/mmfs/bin:/usr/lpp/mmfs/hadoop/sbin:/usr/lpp/mmfs/hadoop/bin' when: - - ansible_fqdn in scale_hdfs_nodes_list + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: "env_setup | ulimit tunning" lineinfile: @@ -61,4 +61,4 @@ loop_control: loop_var: limit_items when: - - ansible_fqdn in scale_hdfs_nodes_list \ No newline at end of file + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list \ No newline at end of file diff --git a/roles/scale_hdfs/cluster/tasks/main.yml b/roles/hdfs_configure/tasks/main.yml similarity index 100% rename from roles/scale_hdfs/cluster/tasks/main.yml rename to roles/hdfs_configure/tasks/main.yml diff --git a/roles/scale_hdfs/cluster/vars/main.yml b/roles/hdfs_configure/vars/main.yml similarity index 72% rename from roles/scale_hdfs/cluster/vars/main.yml rename to roles/hdfs_configure/vars/main.yml index afc76b28..c4c08015 100644 --- a/roles/scale_hdfs/cluster/vars/main.yml +++ b/roles/hdfs_configure/vars/main.yml @@ -6,8 +6,7 @@ scale_command_path: /usr/lpp/mmfs/bin/ # default mm command exection path for hdfs -hdfs_command_path: /usr/lpp/mmfs/hadoop/bin/ +scale_hdfs_command_path: /usr/lpp/mmfs/hadoop/bin/ # default mmhdfs command exection path -mmhdfs_command_path: /usr/lpp/mmfs/hadoop/sbin/ - +scale_mmhdfs_command_path: /usr/lpp/mmfs/hadoop/sbin/ diff --git a/roles/scale_hdfs/node/.travis.yml b/roles/hdfs_install/.travis.yml similarity index 100% rename from roles/scale_hdfs/node/.travis.yml rename to roles/hdfs_install/.travis.yml diff --git a/roles/hdfs_install/README.md b/roles/hdfs_install/README.md new file mode 120000 index 00000000..0c5f1b61 --- /dev/null +++ b/roles/hdfs_install/README.md @@ -0,0 +1 @@ +../../docs/README.HDFS.md \ No newline at end of file diff --git a/roles/scale_hdfs/node/defaults/main.yml b/roles/hdfs_install/defaults/main.yml similarity index 62% rename from roles/scale_hdfs/node/defaults/main.yml rename to roles/hdfs_install/defaults/main.yml index 054aec8a..a7c01e37 100644 --- a/roles/scale_hdfs/node/defaults/main.yml +++ b/roles/hdfs_install/defaults/main.yml @@ -12,16 +12,19 @@ scale_hdfs_packages: gpfs.hdfs-protocol scale_install_localpkg_tmpdir_path: /tmp ## Flag to install hdfs debug package -install_debuginfo: true +scale_hdfs_install_debuginfo: true # Directory to install 3.1.1.x hdfs package -hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' +scale_hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' # Directory to install 3.3.x hdfs package -hdfs_rhel_version_path_33: 'hdfs_rpms/rhel/hdfs_3.3.x/' +scale_hdfs_rhel_version_path_33: 'hdfs_rpms/rhel/hdfs_3.3.x/' + +# Directory to install 3.2.2.x hdfs package +scale_hdfs_rhel_version_path_322: 'hdfs_rpms/rhel/hdfs_3.2.2.x/' # Directory to install 3.1.1.x hdfs package -hdfs_sles_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' +scale_hdfs_sles_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' # Directory to install 3.1.1.x hdfs package -hdfs_ubuntu_version_path: 'hdfs_debs/ubuntu/hdfs_3.1.1.x/' +scale_hdfs_ubuntu_version_path: 'hdfs_debs/ubuntu/hdfs_3.1.1.x/' diff --git a/roles/scale_ece/node/handlers/main.yml b/roles/hdfs_install/handlers/main.yml similarity index 100% rename from roles/scale_ece/node/handlers/main.yml rename to roles/hdfs_install/handlers/main.yml diff --git a/roles/nfs/upgrade/meta/main.yml b/roles/hdfs_install/meta/main.yml similarity index 67% rename from roles/nfs/upgrade/meta/main.yml rename to roles/hdfs_install/meta/main.yml index 07ad9f01..c75a53b2 100644 --- a/roles/nfs/upgrade/meta/main.yml +++ b/roles/hdfs_install/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: nfs_node author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,11 +14,8 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.hdfs_prepare diff --git a/roles/scale_hdfs/node/tasks/install.yml b/roles/hdfs_install/tasks/install.yml similarity index 85% rename from roles/scale_hdfs/node/tasks/install.yml rename to roles/hdfs_install/tasks/install.yml index 5f7589a6..88fa0cf6 100644 --- a/roles/scale_hdfs/node/tasks/install.yml +++ b/roles/hdfs_install/tasks/install.yml @@ -75,12 +75,12 @@ run_once: true delegate_to: localhost -# Run chosen installation method to get list of RPMs +# Run chosen installation method to get list of RPMs - name: install | Set the extracted package directory path set_fact: hdfs_extracted_path: "{{ scale_extracted_path }}" - hdfs_version_path_selection_rhel: "{{ hdfs_rhel_version_path }}" + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path }}" - name: install | Stat extracted packages directory stat: @@ -90,27 +90,32 @@ - include_tasks: prepare_env.yml - block: - - name: install | Fetch hdfs version + - name: install | Fetch hdfs version set_fact: - hdfs_version_path_selection_rhel: "{{ hdfs_rhel_version_path_33 }}" + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path_33 }}" when: transparency_33_enabled|bool + - name: install | Fetch hdfs version + set_fact: + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path_322 }}" + when: transparency_322_enabled|bool + - name: install | Fetch hdfs rpm dir path for rhel set_fact: - hdfs_rpm_path_rhel: "{{ hdfs_version_path_selection_rhel }}" + hdfs_rpm_path_rhel: "{{ hdfs_version_path_selection_rhel }}" - name: install | Set correct hdfs rpm dir path for scale release lower 5.1.2 set_fact: - hdfs_rpm_path_rhel: "{{ hdfs_rpm_path_rhel | replace('/rhel/','/rhel7/') }}" + hdfs_rpm_path_rhel: "{{ hdfs_rpm_path_rhel | replace('/rhel/','/rhel7/') }}" when: scale_version is version_compare('5.1.2', '<') - name: install | Fetch hdfs rpm dir path for sles set_fact: - hdfs_rpm_path_sles: "{{ hdfs_sles_version_path }}" - + hdfs_rpm_path_sles: "{{ scale_hdfs_sles_version_path }}" + - name: install | Fetch hdfs rpm dir path for ubuntu set_fact: - hdfs_rpm_path_ubuntu: "{{ hdfs_ubuntu_version_path }}" + hdfs_rpm_path_ubuntu: "{{ scale_hdfs_ubuntu_version_path }}" run_once: true delegate_to: localhost diff --git a/roles/scale_hdfs/node/tasks/install_dir_pkg.yml b/roles/hdfs_install/tasks/install_dir_pkg.yml similarity index 96% rename from roles/scale_hdfs/node/tasks/install_dir_pkg.yml rename to roles/hdfs_install/tasks/install_dir_pkg.yml index 035fce33..7cf61453 100644 --- a/roles/scale_hdfs/node/tasks/install_dir_pkg.yml +++ b/roles/hdfs_install/tasks/install_dir_pkg.yml @@ -91,5 +91,5 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_hdfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/scale_hdfs/node/tasks/install_local_pkg.yml b/roles/hdfs_install/tasks/install_local_pkg.yml similarity index 97% rename from roles/scale_hdfs/node/tasks/install_local_pkg.yml rename to roles/hdfs_install/tasks/install_local_pkg.yml index be9b38b3..a852ab3e 100644 --- a/roles/scale_hdfs/node/tasks/install_local_pkg.yml +++ b/roles/hdfs_install/tasks/install_local_pkg.yml @@ -155,4 +155,4 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_hdfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/scale_hdfs/node/tasks/install_remote_pkg.yml b/roles/hdfs_install/tasks/install_remote_pkg.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/install_remote_pkg.yml rename to roles/hdfs_install/tasks/install_remote_pkg.yml diff --git a/roles/scale_hdfs/node/tasks/install_repository.yml b/roles/hdfs_install/tasks/install_repository.yml similarity index 81% rename from roles/scale_hdfs/node/tasks/install_repository.yml rename to roles/hdfs_install/tasks/install_repository.yml index 14bae69c..a2a9a9ac 100644 --- a/roles/scale_hdfs/node/tasks/install_repository.yml +++ b/roles/hdfs_install/tasks/install_repository.yml @@ -9,6 +9,11 @@ scale_hdfs_url: "{{ hdfs_rpm_path_rhel }}" when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | hdfs path on rhel9 + set_fact: + scale_hdfs_url: "{{ hdfs_rpm_path_rhel }}" + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | Configure hdfs YUM repository yum_repository: name: spectrum-scale-hdfs @@ -21,6 +26,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Add GPFS hdfs packages to list diff --git a/roles/scale_hdfs/node/tasks/main.yml b/roles/hdfs_install/tasks/main.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/main.yml rename to roles/hdfs_install/tasks/main.yml diff --git a/roles/scale_hdfs/node/tasks/prepare_env.yml b/roles/hdfs_install/tasks/prepare_env.yml similarity index 100% rename from roles/scale_hdfs/node/tasks/prepare_env.yml rename to roles/hdfs_install/tasks/prepare_env.yml diff --git a/roles/hdfs_install/tasks/yum/install.yml b/roles/hdfs_install/tasks/yum/install.yml new file mode 100644 index 00000000..32137175 --- /dev/null +++ b/roles/hdfs_install/tasks/yum/install.yml @@ -0,0 +1,7 @@ +--- +- name: install | Install GPFS HDFS packages + yum: + name: "{{ scale_install_all_packages }}" + state: present + disable_gpg_check: "{{ scale_disable_gpgcheck }}" + when: ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list or ansible_fqdn in scale_protocol_nodes_list or inventory_hostname in scale_protocol_nodes_list diff --git a/roles/scale_hdfs/node/vars/main.yml b/roles/hdfs_install/vars/main.yml similarity index 100% rename from roles/scale_hdfs/node/vars/main.yml rename to roles/hdfs_install/vars/main.yml diff --git a/roles/scale_hdfs/postcheck/.travis.yml b/roles/hdfs_prepare/.travis.yml similarity index 100% rename from roles/scale_hdfs/postcheck/.travis.yml rename to roles/hdfs_prepare/.travis.yml diff --git a/roles/hdfs_prepare/README.md b/roles/hdfs_prepare/README.md new file mode 120000 index 00000000..0c5f1b61 --- /dev/null +++ b/roles/hdfs_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.HDFS.md \ No newline at end of file diff --git a/roles/scale_hdfs/precheck/defaults/main.yml b/roles/hdfs_prepare/defaults/main.yml similarity index 100% rename from roles/scale_hdfs/precheck/defaults/main.yml rename to roles/hdfs_prepare/defaults/main.yml diff --git a/roles/scale_hdfs/precheck/meta/main.yml b/roles/hdfs_prepare/meta/main.yml similarity index 70% rename from roles/scale_hdfs/precheck/meta/main.yml rename to roles/hdfs_prepare/meta/main.yml index 5d862b46..dab8063f 100644 --- a/roles/scale_hdfs/precheck/meta/main.yml +++ b/roles/hdfs_prepare/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: scale_hdfs author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,10 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_hdfs/precheck/tasks/check.yml b/roles/hdfs_prepare/tasks/check.yml similarity index 89% rename from roles/scale_hdfs/precheck/tasks/check.yml rename to roles/hdfs_prepare/tasks/check.yml index ce06bb9b..46d43177 100644 --- a/roles/scale_hdfs/precheck/tasks/check.yml +++ b/roles/hdfs_prepare/tasks/check.yml @@ -1,9 +1,12 @@ --- - include_tasks: prepare_env.yml - + - debug: msg: "transparency_33_enabled: {{ transparency_33_enabled|bool }}" +- debug: + msg: "transparency_322_enabled: {{ transparency_322_enabled|bool }}" + - name: global_var | Initialize set_fact: scale_hdfs_cluster: [] @@ -12,8 +15,8 @@ - name: check | Collect all protocol nodes set_fact: - scale_protocol_nodes_list: "{{ scale_protocol_nodes_list + [hostvars[hosts]['ansible_fqdn']] }}" - when: hostvars[hosts]['is_protocol_node'] is defined and hostvars[hosts]['is_protocol_node']|bool + scale_protocol_nodes_list: "{{ scale_protocol_nodes_list + [hostvars[hosts]['inventory_hostname']] }}" + when: hostvars[hosts]['scale_protocol_node'] is defined and hostvars[hosts]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" loop_control: @@ -41,7 +44,7 @@ - fail: msg: "HDFS is not supported on {{ ansible_distribution }} OS." - when: ansible_distribution not in hdfs_os_distribution + when: ansible_distribution not in scale_hdfs_os_distribution delegate_to: "{{ server }}" run_once: true @@ -68,7 +71,7 @@ - fail: msg: "Not sufficient CESIPs are assigned in export_ip_pool for HDFS clusters, please add more CESIP and retry." - when: + when: - hdfs_cluster_length|int > export_cesip_length|int delegate_to: localhost run_once: true @@ -116,4 +119,3 @@ - debug: msg: "HDFS Precheck ok" when: scale_hdfs_clusters|length == 1 - diff --git a/roles/scale_hdfs/precheck/tasks/java_home.yml b/roles/hdfs_prepare/tasks/java_home.yml similarity index 73% rename from roles/scale_hdfs/precheck/tasks/java_home.yml rename to roles/hdfs_prepare/tasks/java_home.yml index d9e44391..08a11d62 100644 --- a/roles/scale_hdfs/precheck/tasks/java_home.yml +++ b/roles/hdfs_prepare/tasks/java_home.yml @@ -34,36 +34,36 @@ - name: check | Fetch JAVA_HOME path shell: echo $JAVA_HOME register: java_path - when: ansible_fqdn in scale_hdfs_nodes_list + when: ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: check | Check JAVA_HOME path exist stat: path: "{{ java_path.stdout }}" register: java_path_details - when: ansible_fqdn in scale_hdfs_nodes_list + when: ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: check | Assert JAVA_HOME path exist assert: that: - java_path_details.stat.exists fail_msg: The JAVA_HOME path does not exists ! - when: ansible_fqdn in scale_hdfs_nodes_list + when: ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: check | Set path of JAVA_HOME set_fact: javahome_path: "{{ java_path.stdout }}" when: - - ansible_fqdn in scale_hdfs_nodes_list + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - name: check | verify JAVA command: "ls {{ javahome_path }}/bin/java" register: jvm_list when: - - ansible_fqdn in scale_hdfs_nodes_list + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - javahome_path|length > 0 - fail: msg: "JAVA_HOME not set properly" when: - - ansible_fqdn in scale_hdfs_nodes_list + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - jvm_list.rc != 0 \ No newline at end of file diff --git a/roles/scale_hdfs/precheck/tasks/main.yml b/roles/hdfs_prepare/tasks/main.yml similarity index 100% rename from roles/scale_hdfs/precheck/tasks/main.yml rename to roles/hdfs_prepare/tasks/main.yml diff --git a/roles/hdfs_prepare/tasks/prepare_env.yml b/roles/hdfs_prepare/tasks/prepare_env.yml new file mode 100644 index 00000000..ff52a973 --- /dev/null +++ b/roles/hdfs_prepare/tasks/prepare_env.yml @@ -0,0 +1,37 @@ +--- +- name: + set_fact: + transparency_33_enabled: "False" + transparency_322_enabled: "False" + transparency_version_33: "False" + transparency_version_322: "False" + +- name: + shell: "echo $SCALE_HDFS_TRANSPARENCY_VERSION_33_ENABLE" + register: transparency_version_33 + delegate_to: localhost + run_once: true + +- name: + shell: "echo $SCALE_HDFS_TRANSPARENCY_VERSION_322_ENABLE" + register: transparency_version_322 + delegate_to: localhost + run_once: true + +- name: + set_fact: + transparency_33_enabled: "{{ transparency_version_33.stdout|bool }}" + when: + - transparency_version_33.stdout is defined + - transparency_version_33.stdout|bool + delegate_to: localhost + run_once: true + +- name: + set_fact: + transparency_322_enabled: "{{ transparency_version_322.stdout|bool }}" + when: + - transparency_version_322.stdout is defined + - transparency_version_322.stdout|bool + delegate_to: localhost + run_once: true diff --git a/roles/scale_hdfs/precheck/vars/main.yml b/roles/hdfs_prepare/vars/main.yml similarity index 75% rename from roles/scale_hdfs/precheck/vars/main.yml rename to roles/hdfs_prepare/vars/main.yml index 9ca79643..77948826 100644 --- a/roles/scale_hdfs/precheck/vars/main.yml +++ b/roles/hdfs_prepare/vars/main.yml @@ -1,6 +1,6 @@ --- # vars file for precheck ## Supported HDFS os distrubution -hdfs_os_distribution: +scale_hdfs_os_distribution: - RedHat - CentOS diff --git a/roles/hdfs_upgrade/README.md b/roles/hdfs_upgrade/README.md new file mode 120000 index 00000000..0c5f1b61 --- /dev/null +++ b/roles/hdfs_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.HDFS.md \ No newline at end of file diff --git a/roles/scale_hdfs/upgrade/defaults/main.yml b/roles/hdfs_upgrade/defaults/main.yml similarity index 62% rename from roles/scale_hdfs/upgrade/defaults/main.yml rename to roles/hdfs_upgrade/defaults/main.yml index cfdecd12..74bea331 100644 --- a/roles/scale_hdfs/upgrade/defaults/main.yml +++ b/roles/hdfs_upgrade/defaults/main.yml @@ -13,16 +13,19 @@ scale_hdfs_packages: scale_install_localpkg_tmpdir_path: /tmp ## Flag to install hdfs debug package -install_debuginfo: true +scale_hdfs_install_debuginfo: true # Directory to install 3.1.1.x hdfs package -hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' +scale_hdfs_rhel_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' # Directory to install 3.3.x hdfs package -hdfs_rhel_version_path_33: 'hdfs_rpms/rhel/hdfs_3.3.x/' +scale_hdfs_rhel_version_path_33: 'hdfs_rpms/rhel/hdfs_3.3.x/' + +# Directory to install 3.2.2.x hdfs package +scale_hdfs_rhel_version_path_322: 'hdfs_rpms/rhel/hdfs_3.2.2.x/' # Directory to install 3.1.1.x hdfs package -hdfs_sles_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' +scale_hdfs_sles_version_path: 'hdfs_rpms/rhel/hdfs_3.1.1.x/' # Directory to install 3.3.x hdfs package -hdfs_ubuntu_version_path: 'hdfs_debs/ubuntu/hdfs_3.1.1.x/' +scale_hdfs_ubuntu_version_path: 'hdfs_debs/ubuntu/hdfs_3.1.1.x/' diff --git a/roles/scale_hdfs/upgrade/handlers/mail.yml b/roles/hdfs_upgrade/handlers/mail.yml similarity index 100% rename from roles/scale_hdfs/upgrade/handlers/mail.yml rename to roles/hdfs_upgrade/handlers/mail.yml diff --git a/roles/hdfs_upgrade/meta/main.yml b/roles/hdfs_upgrade/meta/main.yml new file mode 100644 index 00000000..d32d632b --- /dev/null +++ b/roles/hdfs_upgrade/meta/main.yml @@ -0,0 +1,20 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common diff --git a/roles/scale_hdfs/upgrade/tasks/main.yml b/roles/hdfs_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/main.yml rename to roles/hdfs_upgrade/tasks/main.yml diff --git a/roles/hdfs_upgrade/tasks/prepare_env.yml b/roles/hdfs_upgrade/tasks/prepare_env.yml new file mode 100644 index 00000000..e58acfef --- /dev/null +++ b/roles/hdfs_upgrade/tasks/prepare_env.yml @@ -0,0 +1,37 @@ +--- +- name: + set_fact: + transparency_33_enabled: "False" + transparency_322_enabled: "False" + transparency_version_33: "False" + transparency_version_322: "False" + +- name: + shell: "echo $SCALE_HDFS_TRANSPARENCY_VERSION_33_ENABLE" + register: transparency_version_33 + delegate_to: localhost + run_once: true + +- name: + shell: "echo $SCALE_HDFS_TRANSPARENCY_VERSION_322_ENABLE" + register: transparency_version_322 + delegate_to: localhost + run_once: true + +- name: + set_fact: + transparency_33_enabled: "{{ transparency_version.stdout|bool }}" + when: + - transparency_version_33.stdout is defined + - transparency_version_33.stdout|bool + delegate_to: localhost + run_once: true + +- name: + set_fact: + transparency_322_enabled: "{{ transparency_version.stdout|bool }}" + when: + - transparency_version_322.stdout is defined + - transparency_version_322.stdout|bool + delegate_to: localhost + run_once: true diff --git a/roles/scale_hdfs/upgrade/tasks/upgrade.yml b/roles/hdfs_upgrade/tasks/upgrade.yml similarity index 87% rename from roles/scale_hdfs/upgrade/tasks/upgrade.yml rename to roles/hdfs_upgrade/tasks/upgrade.yml index 3a7b2b71..82008c5c 100644 --- a/roles/scale_hdfs/upgrade/tasks/upgrade.yml +++ b/roles/hdfs_upgrade/tasks/upgrade.yml @@ -51,7 +51,7 @@ - name: upgrade | Set the extracted package directory path set_fact: hdfs_extracted_path: "{{ scale_extracted_path }}" - hdfs_version_path_selection_rhel: "{{ hdfs_rhel_version_path }}" + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path }}" - name: upgrade | Stat extracted packages directory stat: @@ -63,12 +63,17 @@ - block: - name: set_fact: - hdfs_version_path_selection_rhel: "{{ hdfs_rhel_version_path_33 }}" + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path_33 }}" when: transparency_33_enabled|bool + - name: + set_fact: + hdfs_version_path_selection_rhel: "{{ scale_hdfs_rhel_version_path_322 }}" + when: transparency_322_enabled|bool + - name: upgrade | Fetch hdfs rpm dir path for rhel set_fact: - hdfs_rpm_path_rhel: "{{ hdfs_version_path_selection_rhel }}" + hdfs_rpm_path_rhel: "{{ hdfs_version_path_selection_rhel }}" run_once: true delegate_to: localhost diff --git a/roles/scale_hdfs/upgrade/tasks/upgrade_dir_pkg.yml b/roles/hdfs_upgrade/tasks/upgrade_dir_pkg.yml similarity index 96% rename from roles/scale_hdfs/upgrade/tasks/upgrade_dir_pkg.yml rename to roles/hdfs_upgrade/tasks/upgrade_dir_pkg.yml index 95af2da2..2bb76618 100644 --- a/roles/scale_hdfs/upgrade/tasks/upgrade_dir_pkg.yml +++ b/roles/hdfs_upgrade/tasks/upgrade_dir_pkg.yml @@ -87,5 +87,5 @@ - name: remove debuginfo from packages set_fact: scale_upgrade_all_packages: "{{ scale_upgrade_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_hdfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/scale_hdfs/upgrade/tasks/upgrade_local_pkg.yml b/roles/hdfs_upgrade/tasks/upgrade_local_pkg.yml similarity index 97% rename from roles/scale_hdfs/upgrade/tasks/upgrade_local_pkg.yml rename to roles/hdfs_upgrade/tasks/upgrade_local_pkg.yml index ef1762fd..5e1c089c 100644 --- a/roles/scale_hdfs/upgrade/tasks/upgrade_local_pkg.yml +++ b/roles/hdfs_upgrade/tasks/upgrade_local_pkg.yml @@ -154,5 +154,5 @@ - name: remove debuginfo from packages set_fact: scale_upgrade_all_packages: "{{ scale_upgrade_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_hdfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/scale_hdfs/upgrade/tasks/upgrade_remote_pkg.yml b/roles/hdfs_upgrade/tasks/upgrade_remote_pkg.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/upgrade_remote_pkg.yml rename to roles/hdfs_upgrade/tasks/upgrade_remote_pkg.yml diff --git a/roles/scale_hdfs/upgrade/tasks/upgrade_repository.yml b/roles/hdfs_upgrade/tasks/upgrade_repository.yml similarity index 95% rename from roles/scale_hdfs/upgrade/tasks/upgrade_repository.yml rename to roles/hdfs_upgrade/tasks/upgrade_repository.yml index e04047e9..e1933fed 100644 --- a/roles/scale_hdfs/upgrade/tasks/upgrade_repository.yml +++ b/roles/hdfs_upgrade/tasks/upgrade_repository.yml @@ -24,6 +24,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Add GPFS hdfs packages to list diff --git a/roles/scale_hdfs/upgrade/tasks/yum/upgrade.yml b/roles/hdfs_upgrade/tasks/yum/upgrade.yml similarity index 100% rename from roles/scale_hdfs/upgrade/tasks/yum/upgrade.yml rename to roles/hdfs_upgrade/tasks/yum/upgrade.yml diff --git a/roles/scale_hdfs/upgrade/vars/main.yml b/roles/hdfs_upgrade/vars/main.yml similarity index 100% rename from roles/scale_hdfs/upgrade/vars/main.yml rename to roles/hdfs_upgrade/vars/main.yml diff --git a/roles/scale_hdfs/precheck/.travis.yml b/roles/hdfs_verify/.travis.yml similarity index 100% rename from roles/scale_hdfs/precheck/.travis.yml rename to roles/hdfs_verify/.travis.yml diff --git a/roles/hdfs_verify/README.md b/roles/hdfs_verify/README.md new file mode 120000 index 00000000..0c5f1b61 --- /dev/null +++ b/roles/hdfs_verify/README.md @@ -0,0 +1 @@ +../../docs/README.HDFS.md \ No newline at end of file diff --git a/roles/scale_hdfs/postcheck/defaults/main.yml b/roles/hdfs_verify/defaults/main.yml similarity index 100% rename from roles/scale_hdfs/postcheck/defaults/main.yml rename to roles/hdfs_verify/defaults/main.yml diff --git a/roles/scale_hdfs/postcheck/meta/main.yml b/roles/hdfs_verify/meta/main.yml similarity index 70% rename from roles/scale_hdfs/postcheck/meta/main.yml rename to roles/hdfs_verify/meta/main.yml index 5d862b46..dab8063f 100644 --- a/roles/scale_hdfs/postcheck/meta/main.yml +++ b/roles/hdfs_verify/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: scale_hdfs author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,10 +14,6 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: [] diff --git a/roles/scale_hdfs/postcheck/tasks/check.yml b/roles/hdfs_verify/tasks/check.yml similarity index 100% rename from roles/scale_hdfs/postcheck/tasks/check.yml rename to roles/hdfs_verify/tasks/check.yml diff --git a/roles/scale_hdfs/postcheck/tasks/main.yml b/roles/hdfs_verify/tasks/main.yml similarity index 100% rename from roles/scale_hdfs/postcheck/tasks/main.yml rename to roles/hdfs_verify/tasks/main.yml diff --git a/roles/scale_hdfs/postcheck/vars/main.yml b/roles/hdfs_verify/vars/main.yml similarity index 100% rename from roles/scale_hdfs/postcheck/vars/main.yml rename to roles/hdfs_verify/vars/main.yml diff --git a/roles/nfs/cluster/meta/main.yml b/roles/nfs/cluster/meta/main.yml deleted file mode 100644 index 4e4eeb35..00000000 --- a/roles/nfs/cluster/meta/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -galaxy_info: - role_name: nfs_cluster - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - nfs/precheck - - nfs/common - diff --git a/roles/nfs/node/meta/main.yml b/roles/nfs/node/meta/main.yml deleted file mode 100644 index c68a66d3..00000000 --- a/roles/nfs/node/meta/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -galaxy_info: - role_name: nfs_node - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - core/common - - nfs/precheck - diff --git a/roles/nfs/postcheck/meta/main.yml b/roles/nfs/postcheck/meta/main.yml deleted file mode 100644 index 86cfa371..00000000 --- a/roles/nfs/postcheck/meta/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -galaxy_info: - role_name: nfs_postcheck - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: [] - diff --git a/roles/nfs/precheck/meta/main.yml b/roles/nfs/precheck/meta/main.yml deleted file mode 100644 index 44ea4611..00000000 --- a/roles/nfs/precheck/meta/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -galaxy_info: - role_name: nfs_precheck - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: [] - diff --git a/roles/nfs_configure/README.md b/roles/nfs_configure/README.md new file mode 120000 index 00000000..6a3df305 --- /dev/null +++ b/roles/nfs_configure/README.md @@ -0,0 +1 @@ +../../docs/README.NFS.md \ No newline at end of file diff --git a/roles/nfs/cluster/defaults/main.yml b/roles/nfs_configure/defaults/main.yml similarity index 100% rename from roles/nfs/cluster/defaults/main.yml rename to roles/nfs_configure/defaults/main.yml diff --git a/roles/scale_hdfs/cluster/meta/main.yml b/roles/nfs_configure/meta/main.yml similarity index 67% rename from roles/scale_hdfs/cluster/meta/main.yml rename to roles/nfs_configure/meta/main.yml index b6cf2dcb..f2018326 100644 --- a/roles/scale_hdfs/cluster/meta/main.yml +++ b/roles/nfs_configure/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: scale_hdfs author: IBM Corporation description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) company: IBM + license: Apache-2.0 - min_ansible_version: 2.4 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,11 +14,8 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] dependencies: - - nfs/common + - ibm.spectrum_scale.nfs_prepare + - ibm.spectrum_scale.ces_common diff --git a/roles/nfs/cluster/tasks/configure.yml b/roles/nfs_configure/tasks/configure.yml similarity index 100% rename from roles/nfs/cluster/tasks/configure.yml rename to roles/nfs_configure/tasks/configure.yml diff --git a/roles/nfs/cluster/tasks/main.yml b/roles/nfs_configure/tasks/main.yml similarity index 100% rename from roles/nfs/cluster/tasks/main.yml rename to roles/nfs_configure/tasks/main.yml diff --git a/roles/nfs/cluster/vars/main.yml b/roles/nfs_configure/vars/main.yml similarity index 100% rename from roles/nfs/cluster/vars/main.yml rename to roles/nfs_configure/vars/main.yml diff --git a/roles/nfs_install/README.md b/roles/nfs_install/README.md new file mode 120000 index 00000000..6a3df305 --- /dev/null +++ b/roles/nfs_install/README.md @@ -0,0 +1 @@ +../../docs/README.NFS.md \ No newline at end of file diff --git a/roles/nfs/node/defaults/main.yml b/roles/nfs_install/defaults/main.yml similarity index 94% rename from roles/nfs/node/defaults/main.yml rename to roles/nfs_install/defaults/main.yml index 69b23edd..22279143 100644 --- a/roles/nfs/node/defaults/main.yml +++ b/roles/nfs_install/defaults/main.yml @@ -23,10 +23,10 @@ scale_nfs_debs: ## pm ganesha package for nfs performance monitoring scale_pm_package: - - gpfs.pm-ganesha + - gpfs.pm-ganesha ## Temporary directory to copy installation package to ## (local package installation method) scale_install_localpkg_tmpdir_path: /tmp ## Flag to install ganesha debug package -install_debuginfo: true +scale_nfs_install_debuginfo: true diff --git a/roles/scale_ece/upgrade/handlers/main.yml b/roles/nfs_install/handlers/main.yml similarity index 100% rename from roles/scale_ece/upgrade/handlers/main.yml rename to roles/nfs_install/handlers/main.yml diff --git a/roles/nfs_install/meta/main.yml b/roles/nfs_install/meta/main.yml new file mode 100644 index 00000000..a451c3b9 --- /dev/null +++ b/roles/nfs_install/meta/main.yml @@ -0,0 +1,21 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.nfs_prepare diff --git a/roles/nfs/node/tasks/apt/install.yml b/roles/nfs_install/tasks/apt/install.yml similarity index 57% rename from roles/nfs/node/tasks/apt/install.yml rename to roles/nfs_install/tasks/apt/install.yml index 3d7b82b1..7859fae0 100644 --- a/roles/nfs/node/tasks/apt/install.yml +++ b/roles/nfs_install/tasks/apt/install.yml @@ -3,12 +3,12 @@ package: name: "{{ scale_install_all_packages }}" state: present - when: scale_install_repository_url is defined and ansible_fqdn in scale_nfs_nodes_list + when: scale_install_repository_url is defined and inventory_hostname in scale_nfs_nodes_list - name: install| Install GPFS NFS deb apt: deb: "{{ item }}" state: present - when: scale_install_repository_url is not defined and ansible_fqdn in scale_nfs_nodes_list + when: scale_install_repository_url is not defined and inventory_hostname in scale_nfs_nodes_list with_items: - "{{ scale_install_all_packages }}" diff --git a/roles/nfs/node/tasks/install.yml b/roles/nfs_install/tasks/install.yml similarity index 97% rename from roles/nfs/node/tasks/install.yml rename to roles/nfs_install/tasks/install.yml index d405db94..07809c57 100644 --- a/roles/nfs/node/tasks/install.yml +++ b/roles/nfs_install/tasks/install.yml @@ -72,4 +72,4 @@ package: name: rpcbind state: present - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list diff --git a/roles/nfs/node/tasks/install_dir_pkg.yml b/roles/nfs_install/tasks/install_dir_pkg.yml similarity index 98% rename from roles/nfs/node/tasks/install_dir_pkg.yml rename to roles/nfs_install/tasks/install_dir_pkg.yml index e8d17207..17ad0e4e 100644 --- a/roles/nfs/node/tasks/install_dir_pkg.yml +++ b/roles/nfs_install/tasks/install_dir_pkg.yml @@ -231,7 +231,7 @@ - name: install | remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/nfs/node/tasks/install_local_pkg.yml b/roles/nfs_install/tasks/install_local_pkg.yml similarity index 91% rename from roles/nfs/node/tasks/install_local_pkg.yml rename to roles/nfs_install/tasks/install_local_pkg.yml index 80265dd8..8a0236da 100644 --- a/roles/nfs/node/tasks/install_local_pkg.yml +++ b/roles/nfs_install/tasks/install_local_pkg.yml @@ -107,6 +107,11 @@ scale_nfs_url: 'ganesha_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | nfs path set_fact: scale_nfs_url: 'ganesha_debs/ubuntu16/' @@ -137,6 +142,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' @@ -155,7 +165,7 @@ - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' - name: install | smb path set_fact: @@ -172,6 +182,30 @@ scale_smb_url: 'smb_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + # Find nfs rpms - block: ## when: host is defined as a protocol node @@ -303,7 +337,6 @@ assert: that: scale_install_gpfs_nfs_pm.matched > 0 msg: "No GPFS utils (gpfs.pm-ganesha) package found {{ nfs_extracted_path }}/{{ scale_zimon_url }}gpfs.pm-ganesha*" - when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_ubuntu_distribution - block: ## when: host is defined as a protocol node @@ -327,6 +360,7 @@ with_items: - "{{ scale_install_gpfs_nfs_python.files }}" - "{{ scale_install_gpfs_nfs_doc.files }}" + - "{{ scale_install_gpfs_nfs_gpfs.files }}" when: ansible_distribution in scale_ubuntu_distribution - name: install | Add GPFS package to list @@ -336,7 +370,6 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" with_items: - "{{ scale_install_gpfs_nfs_pm.files }}" - when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_ubuntu_distribution - block: - name: initialize @@ -352,7 +385,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/nfs/node/tasks/install_remote_pkg.yml b/roles/nfs_install/tasks/install_remote_pkg.yml similarity index 90% rename from roles/nfs/node/tasks/install_remote_pkg.yml rename to roles/nfs_install/tasks/install_remote_pkg.yml index 60196221..cb929943 100644 --- a/roles/nfs/node/tasks/install_remote_pkg.yml +++ b/roles/nfs_install/tasks/install_remote_pkg.yml @@ -81,6 +81,11 @@ scale_nfs_url: 'ganesha_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | nfs path set_fact: scale_nfs_url: 'ganesha_debs/ubuntu16/' @@ -111,6 +116,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' @@ -129,7 +139,7 @@ - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' - name: install | smb path set_fact: @@ -146,6 +156,30 @@ scale_smb_url: 'smb_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + - block: ## when: host is defined as a protocol node - name: install | Find gpfs.smb (gpfs.smb) package @@ -324,7 +358,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - name: List all GPFS package to be installed debug: diff --git a/roles/nfs/node/tasks/install_repository.yml b/roles/nfs_install/tasks/install_repository.yml similarity index 81% rename from roles/nfs/node/tasks/install_repository.yml rename to roles/nfs_install/tasks/install_repository.yml index 1a1422b1..987fef61 100644 --- a/roles/nfs/node/tasks/install_repository.yml +++ b/roles/nfs_install/tasks/install_repository.yml @@ -9,6 +9,11 @@ scale_nfs_url: 'ganesha_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | nfs path set_fact: scale_nfs_url: 'ganesha_debs/ubuntu16/' @@ -44,6 +49,30 @@ scale_smb_url: 'smb_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/rhel7/' @@ -54,6 +83,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' @@ -72,7 +106,7 @@ - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' - name: install|configure nfs YUM repository yum_repository: @@ -86,6 +120,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure nfs APT repository @@ -99,6 +134,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure smb APT repository @@ -112,6 +148,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure nfs zypper repository @@ -123,6 +160,7 @@ disable_gpg_check: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install|configure pm-ganesha YUM repository @@ -137,6 +175,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure pm-ganesha APT repository @@ -150,6 +189,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure pm-ganesha zypper repository @@ -162,6 +202,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Add GPFS nfs packages to list diff --git a/roles/nfs/node/tasks/main.yml b/roles/nfs_install/tasks/main.yml similarity index 100% rename from roles/nfs/node/tasks/main.yml rename to roles/nfs_install/tasks/main.yml diff --git a/roles/nfs/node/tasks/yum/install.yml b/roles/nfs_install/tasks/yum/install.yml similarity index 76% rename from roles/nfs/node/tasks/yum/install.yml rename to roles/nfs_install/tasks/yum/install.yml index d23447df..73ee4575 100644 --- a/roles/nfs/node/tasks/yum/install.yml +++ b/roles/nfs_install/tasks/yum/install.yml @@ -4,4 +4,4 @@ name: "{{ scale_install_all_packages }}" state: present disable_gpg_check: "{{ scale_disable_gpgcheck }}" - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list diff --git a/roles/nfs/node/tasks/zypper/install.yml b/roles/nfs_install/tasks/zypper/install.yml similarity index 100% rename from roles/nfs/node/tasks/zypper/install.yml rename to roles/nfs_install/tasks/zypper/install.yml diff --git a/roles/nfs/node/vars/main.yml b/roles/nfs_install/vars/main.yml similarity index 100% rename from roles/nfs/node/vars/main.yml rename to roles/nfs_install/vars/main.yml diff --git a/roles/nfs_prepare/README.md b/roles/nfs_prepare/README.md new file mode 120000 index 00000000..6a3df305 --- /dev/null +++ b/roles/nfs_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.NFS.md \ No newline at end of file diff --git a/roles/nfs_prepare/meta/main.yml b/roles/nfs_prepare/meta/main.yml new file mode 100644 index 00000000..dab8063f --- /dev/null +++ b/roles/nfs_prepare/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: [] diff --git a/roles/nfs/precheck/tasks/check.yml b/roles/nfs_prepare/tasks/check.yml similarity index 82% rename from roles/nfs/precheck/tasks/check.yml rename to roles/nfs_prepare/tasks/check.yml index 4dd769a8..9651102a 100644 --- a/roles/nfs/precheck/tasks/check.yml +++ b/roles/nfs_prepare/tasks/check.yml @@ -11,8 +11,8 @@ - name: check | Collect all nfs nodes set_fact: - scale_nfs_nodes_list: "{{ scale_nfs_nodes_list + [hostvars[item]['ansible_fqdn']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + scale_nfs_nodes_list: "{{ scale_nfs_nodes_list + [hostvars[item]['inventory_hostname']] }}" + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost @@ -31,7 +31,7 @@ shell: cmd: systemctl status nfs-server register: scale_nfs_status - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list ignore_errors: true failed_when: false @@ -41,14 +41,14 @@ - scale_nfs_status.rc > 0 fail_msg: "Service nfs found running on {{ ansible_hostname }}. Which conflicts with the installation of NFS. SUGGESTTED ACTION- Run commands to stop (systemctl stop nfs) and disable (systemctl disable nfs) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list any_errors_fatal: true - name: check | Collect status of service nfs-kernel-server shell: cmd: systemctl status nfs-kernel-server register: scale_nfs_status - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list ignore_errors: true failed_when: false @@ -59,14 +59,14 @@ SUGGESTTED ACTION- Run commands to stop (systemctl stop nfs) and disable (system fail_msg: "Service nfs-kernel-server found running on {{ ansible_hostname }}. Which conflicts with the installation of NFS. SUGGESTTED ACTION Run commands to stop (systemctl stop nfs-kernel-server) and disable (systemctl disable nfs-kernel-server) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list any_errors_fatal: true - name: check | Collect status of service knfs-server shell: cmd: systemctl status knfs-server register: scale_nfs_status - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list ignore_errors: true failed_when: false @@ -77,5 +77,5 @@ this service on node {{ ansible_hostname }}" fail_msg: "Service knfs-kernel-server found running on {{ ansible_hostname }}. Which conflicts with the installation of NFS. SUGGESTTED ACTION Run commands to stop (systemctl stop knfs-server) and disable (systemctl disable knfs-server) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list any_errors_fatal: true diff --git a/roles/nfs/precheck/tasks/main.yml b/roles/nfs_prepare/tasks/main.yml similarity index 100% rename from roles/nfs/precheck/tasks/main.yml rename to roles/nfs_prepare/tasks/main.yml diff --git a/roles/nfs_upgrade/README.md b/roles/nfs_upgrade/README.md new file mode 120000 index 00000000..6a3df305 --- /dev/null +++ b/roles/nfs_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.NFS.md \ No newline at end of file diff --git a/roles/nfs/upgrade/defaults/main.yml b/roles/nfs_upgrade/defaults/main.yml similarity index 96% rename from roles/nfs/upgrade/defaults/main.yml rename to roles/nfs_upgrade/defaults/main.yml index a4f272fd..2737c4e8 100644 --- a/roles/nfs/upgrade/defaults/main.yml +++ b/roles/nfs_upgrade/defaults/main.yml @@ -28,4 +28,4 @@ scale_pm_package: scale_install_localpkg_tmpdir_path: /tmp ## Flag to install ganesha debug package -install_debuginfo: true +scale_nfs_install_debuginfo: true diff --git a/roles/scale_hdfs/node/handlers/main.yml b/roles/nfs_upgrade/handlers/main.yml similarity index 100% rename from roles/scale_hdfs/node/handlers/main.yml rename to roles/nfs_upgrade/handlers/main.yml diff --git a/roles/nfs_upgrade/meta/main.yml b/roles/nfs_upgrade/meta/main.yml new file mode 100644 index 00000000..d32d632b --- /dev/null +++ b/roles/nfs_upgrade/meta/main.yml @@ -0,0 +1,20 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common diff --git a/roles/nfs/upgrade/tasks/apt/install.yml b/roles/nfs_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/nfs/upgrade/tasks/apt/install.yml rename to roles/nfs_upgrade/tasks/apt/install.yml diff --git a/roles/nfs/upgrade/tasks/install.yml b/roles/nfs_upgrade/tasks/install.yml similarity index 100% rename from roles/nfs/upgrade/tasks/install.yml rename to roles/nfs_upgrade/tasks/install.yml diff --git a/roles/nfs/upgrade/tasks/install_dir_pkg.yml b/roles/nfs_upgrade/tasks/install_dir_pkg.yml similarity index 98% rename from roles/nfs/upgrade/tasks/install_dir_pkg.yml rename to roles/nfs_upgrade/tasks/install_dir_pkg.yml index 1371923d..37ba19b1 100644 --- a/roles/nfs/upgrade/tasks/install_dir_pkg.yml +++ b/roles/nfs_upgrade/tasks/install_dir_pkg.yml @@ -231,7 +231,7 @@ - name: upgrade | remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/nfs/upgrade/tasks/install_local_pkg.yml b/roles/nfs_upgrade/tasks/install_local_pkg.yml similarity index 99% rename from roles/nfs/upgrade/tasks/install_local_pkg.yml rename to roles/nfs_upgrade/tasks/install_local_pkg.yml index 4ddf3c58..d095eb26 100644 --- a/roles/nfs/upgrade/tasks/install_local_pkg.yml +++ b/roles/nfs_upgrade/tasks/install_local_pkg.yml @@ -318,4 +318,4 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/nfs/upgrade/tasks/install_remote_pkg.yml b/roles/nfs_upgrade/tasks/install_remote_pkg.yml similarity index 99% rename from roles/nfs/upgrade/tasks/install_remote_pkg.yml rename to roles/nfs_upgrade/tasks/install_remote_pkg.yml index a3ee191e..b96c0651 100644 --- a/roles/nfs/upgrade/tasks/install_remote_pkg.yml +++ b/roles/nfs_upgrade/tasks/install_remote_pkg.yml @@ -324,4 +324,4 @@ - name: upgrade | remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_nfs_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/nfs/upgrade/tasks/install_repository.yml b/roles/nfs_upgrade/tasks/install_repository.yml similarity index 81% rename from roles/nfs/upgrade/tasks/install_repository.yml rename to roles/nfs_upgrade/tasks/install_repository.yml index 90a52e57..7fbc70c4 100644 --- a/roles/nfs/upgrade/tasks/install_repository.yml +++ b/roles/nfs_upgrade/tasks/install_repository.yml @@ -9,6 +9,11 @@ scale_nfs_url: 'ganesha_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: upgrade | nfs path + set_fact: + scale_nfs_url: 'ganesha_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: upgrade | nfs path set_fact: scale_nfs_url: 'ganesha_debs/ubuntu/' @@ -34,6 +39,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: upgrade | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: upgrade | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' @@ -52,9 +62,23 @@ - name: upgrade | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' + +- block: + - name: upgrade | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: upgrade | nfs path + set_fact: + scale_nfs_url: 'ganesha_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" -- name: install|configure nfs YUM repository +- name: upgrade | configure nfs YUM repository yum_repository: name: spectrum-scale-nfs-rpms description: IBM Spectrum Scale (NFS RPMS) @@ -66,6 +90,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure nfs APT repository @@ -79,6 +104,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure nfs zypper repository @@ -90,6 +116,7 @@ disable_gpg_check: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install|configure pm-ganesha YUM repository @@ -104,6 +131,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure pm-ganesha APT repository @@ -117,6 +145,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure pm-ganesha zypper repository @@ -129,6 +158,7 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Add GPFS nfs packages to list diff --git a/roles/nfs/upgrade/tasks/main.yml b/roles/nfs_upgrade/tasks/main.yml similarity index 100% rename from roles/nfs/upgrade/tasks/main.yml rename to roles/nfs_upgrade/tasks/main.yml diff --git a/roles/nfs/upgrade/tasks/yum/install.yml b/roles/nfs_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/nfs/upgrade/tasks/yum/install.yml rename to roles/nfs_upgrade/tasks/yum/install.yml diff --git a/roles/nfs/upgrade/tasks/zypper/install.yml b/roles/nfs_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/nfs/upgrade/tasks/zypper/install.yml rename to roles/nfs_upgrade/tasks/zypper/install.yml diff --git a/roles/nfs/upgrade/vars/main.yml b/roles/nfs_upgrade/vars/main.yml similarity index 100% rename from roles/nfs/upgrade/vars/main.yml rename to roles/nfs_upgrade/vars/main.yml diff --git a/roles/nfs_verify/README.md b/roles/nfs_verify/README.md new file mode 120000 index 00000000..6a3df305 --- /dev/null +++ b/roles/nfs_verify/README.md @@ -0,0 +1 @@ +../../docs/README.NFS.md \ No newline at end of file diff --git a/roles/nfs/postcheck/defaults/main.yml b/roles/nfs_verify/defaults/main.yml similarity index 100% rename from roles/nfs/postcheck/defaults/main.yml rename to roles/nfs_verify/defaults/main.yml diff --git a/roles/nfs_verify/meta/main.yml b/roles/nfs_verify/meta/main.yml new file mode 100644 index 00000000..dab8063f --- /dev/null +++ b/roles/nfs_verify/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: [] diff --git a/roles/nfs/postcheck/tasks/check.yml b/roles/nfs_verify/tasks/check.yml similarity index 75% rename from roles/nfs/postcheck/tasks/check.yml rename to roles/nfs_verify/tasks/check.yml index 17f2a5f7..058ed741 100644 --- a/roles/nfs/postcheck/tasks/check.yml +++ b/roles/nfs_verify/tasks/check.yml @@ -3,7 +3,7 @@ shell: cmd: "{{ scale_command_path }}mmces service list|grep NFS" register: scale_nfs_status - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list failed_when: false - name: postcheck | Check if NFS is running @@ -11,4 +11,4 @@ that: - scale_nfs_status.rc == 0 fail_msg: "NFS is not active on {{ ansible_hostname }}" - when: ansible_fqdn in scale_nfs_nodes_list + when: inventory_hostname in scale_nfs_nodes_list diff --git a/roles/nfs/postcheck/tasks/main.yml b/roles/nfs_verify/tasks/main.yml similarity index 100% rename from roles/nfs/postcheck/tasks/main.yml rename to roles/nfs_verify/tasks/main.yml diff --git a/roles/nfs/postcheck/vars/main.yml b/roles/nfs_verify/vars/main.yml similarity index 100% rename from roles/nfs/postcheck/vars/main.yml rename to roles/nfs_verify/vars/main.yml diff --git a/roles/obj_configure/README.md b/roles/obj_configure/README.md new file mode 120000 index 00000000..4f37f9be --- /dev/null +++ b/roles/obj_configure/README.md @@ -0,0 +1 @@ +../../docs/README.OBJ.md \ No newline at end of file diff --git a/roles/scale_object/cluster/defaults/main.yml b/roles/obj_configure/defaults/main.yml similarity index 100% rename from roles/scale_object/cluster/defaults/main.yml rename to roles/obj_configure/defaults/main.yml diff --git a/roles/obj_configure/meta/main.yml b/roles/obj_configure/meta/main.yml new file mode 100644 index 00000000..b4409a0f --- /dev/null +++ b/roles/obj_configure/meta/main.yml @@ -0,0 +1,21 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.obj_prepare + - ibm.spectrum_scale.ces_common diff --git a/roles/scale_object/cluster/tasks/configure.yml b/roles/obj_configure/tasks/configure.yml similarity index 91% rename from roles/scale_object/cluster/tasks/configure.yml rename to roles/obj_configure/tasks/configure.yml index 3872c807..e969ecf8 100644 --- a/roles/scale_object/cluster/tasks/configure.yml +++ b/roles/obj_configure/tasks/configure.yml @@ -48,21 +48,21 @@ - name: configure | Set configuration parameter to configure OBJ set_fact: - obj_param: "-g {{ scale_protocols.mountpoint }} -o {{ scale_ces_obj.object_fileset }} --cluster-hostname {{ scale_obj_nodes_list.0 }} --pwd-file {{ scale_ces_obj.pwd_file }}" + obj_param: "-g {{ scale_protocols.mountpoint }} -o {{ scale_ces_obj.object_fileset }} --cluster-hostname {{ hostvars[scale_obj_nodes_list.0]['scale_daemon_nodename'] }} --pwd-file {{ scale_ces_obj.pwd_file }}" delegate_to: "{{ scale_obj_nodes_list.0 }}" when: - not scale_ces_dynamic_obj|bool run_once: True -- name: configure | Check local-keystone is defined +- name: configure | Check local-keystone is defined set_fact: - obj_param: "{{ obj_param }} --local-keystone" - when: + obj_param: "{{ obj_param }} --local-keystone" + when: - scale_ces_obj.local_keystone is defined and scale_ces_obj.local_keystone|bool - not scale_ces_dynamic_obj|bool delegate_to: "{{ scale_obj_nodes_list.0 }}" run_once: True - + - name: configure | Check enable-s3 is defined set_fact: obj_param: "{{ obj_param }} --enable-s3" @@ -80,7 +80,7 @@ - not scale_ces_dynamic_obj|bool delegate_to: "{{ scale_obj_nodes_list.0 }}" run_once: true - + # # Configure Object # @@ -104,8 +104,8 @@ register: scale_ces_enable_obj_service - name: configure | Show OBJ Service is enabled - debug: - var: scale_ces_enable_obj_service.stdout_lines + debug: + var: scale_ces_enable_obj_service.stdout_lines # Start Object on CES - name: configure | Start OBJ Service @@ -124,4 +124,4 @@ when: obj_enabled is defined and not obj_enabled delegate_to: "{{ scale_obj_nodes_list.0 }}" - run_once: true + run_once: true diff --git a/roles/scale_object/cluster/tasks/configure_pmswift.yml b/roles/obj_configure/tasks/configure_pmswift.yml similarity index 100% rename from roles/scale_object/cluster/tasks/configure_pmswift.yml rename to roles/obj_configure/tasks/configure_pmswift.yml diff --git a/roles/scale_object/cluster/tasks/main.yml b/roles/obj_configure/tasks/main.yml similarity index 100% rename from roles/scale_object/cluster/tasks/main.yml rename to roles/obj_configure/tasks/main.yml diff --git a/roles/scale_object/cluster/templates/obj_passwd.j2 b/roles/obj_configure/templates/obj_passwd.j2 similarity index 100% rename from roles/scale_object/cluster/templates/obj_passwd.j2 rename to roles/obj_configure/templates/obj_passwd.j2 diff --git a/roles/scale_object/cluster/vars/main.yml b/roles/obj_configure/vars/main.yml similarity index 100% rename from roles/scale_object/cluster/vars/main.yml rename to roles/obj_configure/vars/main.yml diff --git a/roles/obj_install/README.md b/roles/obj_install/README.md new file mode 120000 index 00000000..4f37f9be --- /dev/null +++ b/roles/obj_install/README.md @@ -0,0 +1 @@ +../../docs/README.OBJ.md \ No newline at end of file diff --git a/roles/scale_object/node/defaults/main.yml b/roles/obj_install/defaults/main.yml similarity index 100% rename from roles/scale_object/node/defaults/main.yml rename to roles/obj_install/defaults/main.yml diff --git a/roles/scale_object/node/handlers/main.yml b/roles/obj_install/handlers/main.yml similarity index 100% rename from roles/scale_object/node/handlers/main.yml rename to roles/obj_install/handlers/main.yml diff --git a/roles/obj_install/meta/main.yml b/roles/obj_install/meta/main.yml new file mode 100644 index 00000000..ba337e84 --- /dev/null +++ b/roles/obj_install/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.obj_prepare diff --git a/roles/scale_object/node/tasks/install.yml b/roles/obj_install/tasks/install.yml similarity index 100% rename from roles/scale_object/node/tasks/install.yml rename to roles/obj_install/tasks/install.yml diff --git a/roles/scale_object/node/tasks/install_dir_pkg.yml b/roles/obj_install/tasks/install_dir_pkg.yml similarity index 98% rename from roles/scale_object/node/tasks/install_dir_pkg.yml rename to roles/obj_install/tasks/install_dir_pkg.yml index d29a6d69..712c7374 100644 --- a/roles/scale_object/node/tasks/install_dir_pkg.yml +++ b/roles/obj_install/tasks/install_dir_pkg.yml @@ -84,5 +84,4 @@ msg: "No Scale object (spectrum-scale-object) package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: when: ansible_fqdn in scale_protocol_node_list - + when: inventory_hostname in scale_protocol_node_list diff --git a/roles/scale_object/node/tasks/install_local_pkg.yml b/roles/obj_install/tasks/install_local_pkg.yml similarity index 96% rename from roles/scale_object/node/tasks/install_local_pkg.yml rename to roles/obj_install/tasks/install_local_pkg.yml index 8ca34192..39e1d326 100644 --- a/roles/scale_object/node/tasks/install_local_pkg.yml +++ b/roles/obj_install/tasks/install_local_pkg.yml @@ -99,13 +99,13 @@ # Find object rpms -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: install | obj path set_fact: scale_obj_url: 'object_rpms/rhel8' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' - - name: install | Find all packages + - name: install | Find all packages find: paths: "{{ obj_extracted_path }}/{{ scale_obj_url }}" patterns: "*.rpm" @@ -132,5 +132,4 @@ msg: "No Scale object spectrum-scale-object package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/scale_object/node/tasks/install_pmswift.yml b/roles/obj_install/tasks/install_pmswift.yml similarity index 77% rename from roles/scale_object/node/tasks/install_pmswift.yml rename to roles/obj_install/tasks/install_pmswift.yml index ec93d44b..7f8f2fbc 100644 --- a/roles/scale_object/node/tasks/install_pmswift.yml +++ b/roles/obj_install/tasks/install_pmswift.yml @@ -5,24 +5,34 @@ # Add pmswift rpm -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: install | pmswift path set_fact: scale_zimon_url: 'zimon_rpms/rhel8' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + - name: install | pmswift path + set_fact: + scale_gpg_key_path: + - "{{ scale_gpgKey_repository_obj_src }}" + - "{{ scale_gpgKey_repository_src }}" + when: scale_version >= "5.1.2.2" + - name: install | Configure ZIMon YUM repository yum_repository: name: spectrum-scale-zimon description: IBM Spectrum Scale (ZIMon) baseurl: "{{ scale_install_repository_url }}{{ scale_zimon_url }}" gpgcheck: "{{ scale_install_gpgcheck }}" + gpgkey: "{{ scale_gpg_key_path }}" repo_gpgcheck: no state: present + sslverify: false notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Find pmswift packages @@ -45,5 +55,4 @@ scale_install_all_packages: "{{ scale_install_all_packages + pmswift_package }}" when: scale_install_repository_url is undefined - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/scale_object/node/tasks/install_remote_pkg.yml b/roles/obj_install/tasks/install_remote_pkg.yml similarity index 96% rename from roles/scale_object/node/tasks/install_remote_pkg.yml rename to roles/obj_install/tasks/install_remote_pkg.yml index cfc4ae82..14aee865 100644 --- a/roles/scale_object/node/tasks/install_remote_pkg.yml +++ b/roles/obj_install/tasks/install_remote_pkg.yml @@ -73,7 +73,7 @@ # Find object rpms -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: install | obj path set_fact: scale_obj_url: 'object_rpms/rhel8' @@ -106,5 +106,4 @@ msg: "No Scale object spectrum-scale-object package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/scale_object/node/tasks/install_repository.yml b/roles/obj_install/tasks/install_repository.yml similarity index 76% rename from roles/scale_object/node/tasks/install_repository.yml rename to roles/obj_install/tasks/install_repository.yml index 92c05e2e..2e286789 100644 --- a/roles/scale_object/node/tasks/install_repository.yml +++ b/roles/obj_install/tasks/install_repository.yml @@ -4,18 +4,27 @@ scale_obj_url: 'object_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | pmswift path + set_fact: + scale_gpg_key_path: + - "{{ scale_gpgKey_repository_obj_src }}" + - "{{ scale_gpgKey_repository_src }}" + when: scale_version >= "5.1.2.2" + - name: install | configure object YUM repository yum_repository: name: spectrum-scale-object-rpms description: IBM Spectrum Scale (object RPMS) baseurl: "{{ scale_install_repository_url }}{{ scale_obj_url }}" gpgcheck: "{{ scale_install_gpgcheck }}" + gpgkey: "{{ scale_gpg_key_path }}" repo_gpgcheck: no sslverify: no state: present notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Add GPFS object packages to list diff --git a/roles/scale_object/node/tasks/main.yml b/roles/obj_install/tasks/main.yml similarity index 100% rename from roles/scale_object/node/tasks/main.yml rename to roles/obj_install/tasks/main.yml diff --git a/roles/scale_object/node/tasks/yum/install.yml b/roles/obj_install/tasks/yum/install.yml similarity index 78% rename from roles/scale_object/node/tasks/yum/install.yml rename to roles/obj_install/tasks/yum/install.yml index 59506d16..d21e99c3 100644 --- a/roles/scale_object/node/tasks/yum/install.yml +++ b/roles/obj_install/tasks/yum/install.yml @@ -4,12 +4,12 @@ name: "{{ scale_install_all_packages }}" state: present disable_gpg_check: "{{ scale_disable_gpgcheck }}" - when: ansible_fqdn in scale_obj_nodes_list + when: inventory_hostname in scale_obj_nodes_list - name: install | Get installed spectrum-scale-object shell: rpm -qa | grep spectrum-scale-object register: scale_package_status - when: ansible_fqdn in scale_obj_nodes_list + when: inventory_hostname in scale_obj_nodes_list ignore_errors: true args: warn: false @@ -19,5 +19,4 @@ that: - scale_package_status.rc == 0 fail_msg: "spectrum-scale-object is not installed on {{ ansible_hostname }}" - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/smb/node/vars/main.yml b/roles/obj_install/vars/main.yml similarity index 71% rename from roles/smb/node/vars/main.yml rename to roles/obj_install/vars/main.yml index 5a6e9c01..e3f1da5d 100644 --- a/roles/smb/node/vars/main.yml +++ b/roles/obj_install/vars/main.yml @@ -8,3 +8,7 @@ scale_rpmversion: "{{ scale_version | regex_replace('^([0-9.]+)\\.([0-9])$', '\\ ## Default scale extraction path scale_extracted_default_path: "/usr/lpp/mmfs" scale_extracted_path: "{{ scale_extracted_default_path }}/{{ scale_version }}" + +scale_gpg_key_path: "{{ scale_gpgKey_repository_src }}" + +scale_gpgKey_repository_obj_src: "{{ scale_install_repository_url }}Public_Keys/RPM-GPG-KEY-redhat-release" diff --git a/roles/obj_prepare/README.md b/roles/obj_prepare/README.md new file mode 120000 index 00000000..4f37f9be --- /dev/null +++ b/roles/obj_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.OBJ.md \ No newline at end of file diff --git a/roles/scale_object/precheck/default/main.yml b/roles/obj_prepare/defaults/main.yml similarity index 100% rename from roles/scale_object/precheck/default/main.yml rename to roles/obj_prepare/defaults/main.yml diff --git a/roles/obj_prepare/meta/main.yml b/roles/obj_prepare/meta/main.yml new file mode 100644 index 00000000..4e20e076 --- /dev/null +++ b/roles/obj_prepare/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common diff --git a/roles/scale_object/precheck/tasks/check.yml b/roles/obj_prepare/tasks/check.yml similarity index 80% rename from roles/scale_object/precheck/tasks/check.yml rename to roles/obj_prepare/tasks/check.yml index fe80e7b1..009f47e4 100644 --- a/roles/scale_object/precheck/tasks/check.yml +++ b/roles/obj_prepare/tasks/check.yml @@ -12,8 +12,8 @@ - name: check | Collect all object nodes set_fact: - scale_obj_nodes_list: "{{ scale_obj_nodes_list + [hostvars[item]['ansible_fqdn']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + scale_obj_nodes_list: "{{ scale_obj_nodes_list + [hostvars[item]['inventory_hostname']] }}" + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost @@ -34,6 +34,6 @@ Object is only supported for Rhel 8 and higher! with_items: - "{{ ansible_play_hosts }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool delegate_to: localhost run_once: true diff --git a/roles/scale_object/precheck/tasks/inventory_check.yml b/roles/obj_prepare/tasks/inventory_check.yml similarity index 100% rename from roles/scale_object/precheck/tasks/inventory_check.yml rename to roles/obj_prepare/tasks/inventory_check.yml diff --git a/roles/scale_object/precheck/tasks/main.yml b/roles/obj_prepare/tasks/main.yml similarity index 100% rename from roles/scale_object/precheck/tasks/main.yml rename to roles/obj_prepare/tasks/main.yml diff --git a/roles/obj_upgrade/README.md b/roles/obj_upgrade/README.md new file mode 120000 index 00000000..4f37f9be --- /dev/null +++ b/roles/obj_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.OBJ.md \ No newline at end of file diff --git a/roles/scale_object/upgrade/defaults/main.yml b/roles/obj_upgrade/defaults/main.yml similarity index 100% rename from roles/scale_object/upgrade/defaults/main.yml rename to roles/obj_upgrade/defaults/main.yml diff --git a/roles/scale_object/upgrade/handlers/main.yml b/roles/obj_upgrade/handlers/main.yml similarity index 100% rename from roles/scale_object/upgrade/handlers/main.yml rename to roles/obj_upgrade/handlers/main.yml diff --git a/roles/obj_upgrade/meta/main.yml b/roles/obj_upgrade/meta/main.yml new file mode 100644 index 00000000..4e20e076 --- /dev/null +++ b/roles/obj_upgrade/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common diff --git a/roles/scale_object/upgrade/tasks/install.yml b/roles/obj_upgrade/tasks/install.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/install.yml rename to roles/obj_upgrade/tasks/install.yml diff --git a/roles/scale_object/upgrade/tasks/install_dir_pkg.yml b/roles/obj_upgrade/tasks/install_dir_pkg.yml similarity index 98% rename from roles/scale_object/upgrade/tasks/install_dir_pkg.yml rename to roles/obj_upgrade/tasks/install_dir_pkg.yml index 52fc41f6..77244733 100644 --- a/roles/scale_object/upgrade/tasks/install_dir_pkg.yml +++ b/roles/obj_upgrade/tasks/install_dir_pkg.yml @@ -84,5 +84,4 @@ msg: "No Scale object (spectrum-scale-object) package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: when: ansible_fqdn in scale_protocol_node_list - + when: inventory_hostname in scale_protocol_node_list diff --git a/roles/scale_object/upgrade/tasks/install_local_pkg.yml b/roles/obj_upgrade/tasks/install_local_pkg.yml similarity index 96% rename from roles/scale_object/upgrade/tasks/install_local_pkg.yml rename to roles/obj_upgrade/tasks/install_local_pkg.yml index c06510a2..843fd8cf 100644 --- a/roles/scale_object/upgrade/tasks/install_local_pkg.yml +++ b/roles/obj_upgrade/tasks/install_local_pkg.yml @@ -101,13 +101,13 @@ # Find object rpms -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: upgrade | obj path set_fact: scale_obj_url: 'object_rpms/rhel8' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' - - name: upgrade | Find all packages + - name: upgrade | Find all packages find: paths: "{{ obj_extracted_path }}/{{ scale_obj_url }}" patterns: "*.rpm" @@ -134,5 +134,4 @@ msg: "No Scale object spectrum-scale-object package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/scale_object/upgrade/tasks/install_pmswift.yml b/roles/obj_upgrade/tasks/install_pmswift.yml similarity index 90% rename from roles/scale_object/upgrade/tasks/install_pmswift.yml rename to roles/obj_upgrade/tasks/install_pmswift.yml index e0ae0e3b..1a145f3c 100644 --- a/roles/scale_object/upgrade/tasks/install_pmswift.yml +++ b/roles/obj_upgrade/tasks/install_pmswift.yml @@ -5,7 +5,7 @@ # Add pmswift rpm -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname - name: upgrade | pmswift path set_fact: scale_obj_url: 'zimon_rpms/rhel8' @@ -24,7 +24,7 @@ - "{{ scale_obj_sensors_packages }}" when: scale_install_repository_url is defined - - name: upgrade | Add pmswift package name + - name: upgrade | Add pmswift package name vars: pmswift_package: "{{ object_package.files | map(attribute='path') | list}}" set_fact: diff --git a/roles/scale_object/upgrade/tasks/install_remote_pkg.yml b/roles/obj_upgrade/tasks/install_remote_pkg.yml similarity index 96% rename from roles/scale_object/upgrade/tasks/install_remote_pkg.yml rename to roles/obj_upgrade/tasks/install_remote_pkg.yml index 0620ae7c..f632383a 100644 --- a/roles/scale_object/upgrade/tasks/install_remote_pkg.yml +++ b/roles/obj_upgrade/tasks/install_remote_pkg.yml @@ -73,7 +73,7 @@ # Find object rpms -- block: ## when: ansible_fqdn in scale_obj_nodes_list +- block: ## when: inventory_hostname in scale_obj_nodes_list - name: upgrade | obj path set_fact: scale_obj_url: 'object_rpms/rhel8' @@ -106,5 +106,4 @@ msg: "No Scale object spectrum-scale-object package found {{ obj_extracted_path }}/{{ scale_obj_url }}/spectrum-scale-object*" run_once: true - when: ansible_fqdn in scale_obj_nodes_list - + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/scale_object/upgrade/tasks/install_repository.yml b/roles/obj_upgrade/tasks/install_repository.yml similarity index 76% rename from roles/scale_object/upgrade/tasks/install_repository.yml rename to roles/obj_upgrade/tasks/install_repository.yml index 202baa2b..c553d4e8 100644 --- a/roles/scale_object/upgrade/tasks/install_repository.yml +++ b/roles/obj_upgrade/tasks/install_repository.yml @@ -4,18 +4,27 @@ scale_obj_url: 'object_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | pmswift path + set_fact: + scale_gpg_key_path: + - "{{ scale_gpgKey_repository_obj_src }}" + - "{{ scale_gpgKey_repository_src }}" + when: scale_version >= "5.1.2.2" + - name: upgrade | configure object YUM repository yum_repository: name: spectrum-scale-object-rpms description: IBM Spectrum Scale (object RPMS) baseurl: "{{ scale_install_repository_url }}{{ scale_obj_url }}" gpgcheck: "{{ scale_install_gpgcheck }}" + gpgkey: "{{ scale_gpg_key_path }}" repo_gpgcheck: no sslverify: no state: present notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Add GPFS object packages to list diff --git a/roles/scale_object/upgrade/tasks/main.yml b/roles/obj_upgrade/tasks/main.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/main.yml rename to roles/obj_upgrade/tasks/main.yml diff --git a/roles/scale_object/upgrade/tasks/yum/install.yml b/roles/obj_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/scale_object/upgrade/tasks/yum/install.yml rename to roles/obj_upgrade/tasks/yum/install.yml diff --git a/roles/smb/upgrade/vars/main.yml b/roles/obj_upgrade/vars/main.yml similarity index 71% rename from roles/smb/upgrade/vars/main.yml rename to roles/obj_upgrade/vars/main.yml index 5a6e9c01..e3f1da5d 100644 --- a/roles/smb/upgrade/vars/main.yml +++ b/roles/obj_upgrade/vars/main.yml @@ -8,3 +8,7 @@ scale_rpmversion: "{{ scale_version | regex_replace('^([0-9.]+)\\.([0-9])$', '\\ ## Default scale extraction path scale_extracted_default_path: "/usr/lpp/mmfs" scale_extracted_path: "{{ scale_extracted_default_path }}/{{ scale_version }}" + +scale_gpg_key_path: "{{ scale_gpgKey_repository_src }}" + +scale_gpgKey_repository_obj_src: "{{ scale_install_repository_url }}Public_Keys/RPM-GPG-KEY-redhat-release" diff --git a/roles/obj_verify/README.md b/roles/obj_verify/README.md new file mode 120000 index 00000000..4f37f9be --- /dev/null +++ b/roles/obj_verify/README.md @@ -0,0 +1 @@ +../../docs/README.OBJ.md \ No newline at end of file diff --git a/roles/obj_verify/meta/main.yml b/roles/obj_verify/meta/main.yml new file mode 100644 index 00000000..dab8063f --- /dev/null +++ b/roles/obj_verify/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: [] diff --git a/roles/scale_object/postcheck/tasks/check.yml b/roles/obj_verify/tasks/check.yml similarity index 76% rename from roles/scale_object/postcheck/tasks/check.yml rename to roles/obj_verify/tasks/check.yml index a59a5427..1729c381 100644 --- a/roles/scale_object/postcheck/tasks/check.yml +++ b/roles/obj_verify/tasks/check.yml @@ -4,7 +4,7 @@ shell: cmd: "{{ scale_command_path }}mmces service list|grep OBJ" register: scale_obj_status - when: ansible_fqdn in scale_obj_nodes_list + when: inventory_hostname in scale_obj_nodes_list failed_when: false - name: postcheck | Check if OBJ is running @@ -12,4 +12,4 @@ that: - scale_obj_status.rc == 0 fail_msg: "OBJ is not active on {{ ansible_hostname }}" - when: ansible_fqdn in scale_obj_nodes_list + when: inventory_hostname in scale_obj_nodes_list diff --git a/roles/scale_object/postcheck/tasks/main.yml b/roles/obj_verify/tasks/main.yml similarity index 100% rename from roles/scale_object/postcheck/tasks/main.yml rename to roles/obj_verify/tasks/main.yml diff --git a/roles/scale_object/postcheck/vars/main.yml b/roles/obj_verify/vars/main.yml similarity index 100% rename from roles/scale_object/postcheck/vars/main.yml rename to roles/obj_verify/vars/main.yml diff --git a/roles/perfmon_configure/README.md b/roles/perfmon_configure/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/perfmon_configure/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/zimon/cluster/defaults/main.yml b/roles/perfmon_configure/defaults/main.yml similarity index 100% rename from roles/zimon/cluster/defaults/main.yml rename to roles/perfmon_configure/defaults/main.yml diff --git a/roles/zimon/upgrade/meta/main.yml b/roles/perfmon_configure/meta/main.yml similarity index 59% rename from roles/zimon/upgrade/meta/main.yml rename to roles/perfmon_configure/meta/main.yml index bc8ff453..b74ad2a1 100644 --- a/roles/zimon/upgrade/meta/main.yml +++ b/roles/perfmon_configure/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: zimon_node author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,14 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common diff --git a/roles/zimon/cluster/tasks/configure.yml b/roles/perfmon_configure/tasks/configure.yml similarity index 94% rename from roles/zimon/cluster/tasks/configure.yml rename to roles/perfmon_configure/tasks/configure.yml index e900ca73..04a620c8 100644 --- a/roles/zimon/cluster/tasks/configure.yml +++ b/roles/perfmon_configure/tasks/configure.yml @@ -60,7 +60,7 @@ #TODO: added a check for output, but are having problems using the ( collector_nodes | join(',') ) to use when adding nodes. - name: configure | Initialize performance collection vars: - collector_nodes: "{{ groups['scale_zimon_collectors'] | list }}" + collector_nodes: "{{ groups['scale_zimon_collectors'] | map('extract', hostvars, 'scale_daemon_nodename') | list }}" command: /usr/lpp/mmfs/bin/mmperfmon config generate --collectors {{ collector_nodes | join(',') }} register: scale_zimon_conf_pmcollector when: @@ -73,7 +73,7 @@ - name: configure | update performance collection for new node vars: - collector_nodes_new: "{{ groups['scale_zimon_collectors'] | list }}" + collector_nodes_new: "{{ groups['scale_zimon_collectors'] | map('extract', hostvars, 'scale_daemon_nodename') | list }}" command: /usr/lpp/mmfs/bin/mmperfmon config update --collectors "{{ collector_nodes_new | join(',') }}" register: scale_zimon_update_pmcollector when: @@ -88,7 +88,7 @@ - name: configure | Check before enable nodes for performance collection #TODO: Only checks first node for perfmon. vars: sensor_nodes: "{{ ansible_play_hosts | list }}" - shell: "/usr/lpp/mmfs/bin/mmlscluster -Y | grep -v HEADER | grep clusterNode |grep {{ sensor_nodes | first }} | cut -d ':' -f 14" + shell: "/usr/lpp/mmfs/bin/mmlscluster -Y | grep -v HEADER | grep clusterNode | grep {{ sensor_nodes | map('extract', hostvars, 'scale_daemon_nodename') | first }} | cut -d ':' -f 14" register: scale_zimon_conf_perfmon_check run_once: true failed_when: false @@ -109,7 +109,7 @@ - name: configure | Enable nodes for performance collection #TODO discuss: should it be dependent on scale_zimon_collector? vars: sensor_nodes: "{{ ansible_play_hosts | list }}" - command: /usr/lpp/mmfs/bin/mmchnode --perfmon -N {{ sensor_nodes | join(',') }} + command: /usr/lpp/mmfs/bin/mmchnode --perfmon -N {{ sensor_nodes | map('extract', hostvars, 'scale_daemon_nodename') | join(',') }} async: 45 poll: 5 register: scale_zimon_conf_enable_node_perfmon diff --git a/roles/zimon/cluster/tasks/main.yml b/roles/perfmon_configure/tasks/main.yml similarity index 100% rename from roles/zimon/cluster/tasks/main.yml rename to roles/perfmon_configure/tasks/main.yml diff --git a/roles/perfmon_install/README.md b/roles/perfmon_install/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/perfmon_install/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/zimon/node/defaults/main.yml b/roles/perfmon_install/defaults/main.yml similarity index 100% rename from roles/zimon/node/defaults/main.yml rename to roles/perfmon_install/defaults/main.yml diff --git a/roles/zimon/node/meta/main.yml b/roles/perfmon_install/meta/main.yml similarity index 59% rename from roles/zimon/node/meta/main.yml rename to roles/perfmon_install/meta/main.yml index bc8ff453..b74ad2a1 100644 --- a/roles/zimon/node/meta/main.yml +++ b/roles/perfmon_install/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: zimon_node author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,14 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common diff --git a/roles/zimon/node/tasks/apt/install.yml b/roles/perfmon_install/tasks/apt/install.yml similarity index 100% rename from roles/zimon/node/tasks/apt/install.yml rename to roles/perfmon_install/tasks/apt/install.yml diff --git a/roles/zimon/node/tasks/install.yml b/roles/perfmon_install/tasks/install.yml similarity index 100% rename from roles/zimon/node/tasks/install.yml rename to roles/perfmon_install/tasks/install.yml diff --git a/roles/zimon/node/tasks/install_dir_pkg.yml b/roles/perfmon_install/tasks/install_dir_pkg.yml similarity index 100% rename from roles/zimon/node/tasks/install_dir_pkg.yml rename to roles/perfmon_install/tasks/install_dir_pkg.yml diff --git a/roles/zimon/node/tasks/install_local_pkg.yml b/roles/perfmon_install/tasks/install_local_pkg.yml similarity index 91% rename from roles/zimon/node/tasks/install_local_pkg.yml rename to roles/perfmon_install/tasks/install_local_pkg.yml index e4ae8fa8..ba944101 100644 --- a/roles/zimon/node/tasks/install_local_pkg.yml +++ b/roles/perfmon_install/tasks/install_local_pkg.yml @@ -121,6 +121,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/ubuntu16/' @@ -136,6 +141,19 @@ scale_zimon_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu22/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' diff --git a/roles/zimon/node/tasks/install_remote_pkg.yml b/roles/perfmon_install/tasks/install_remote_pkg.yml similarity index 90% rename from roles/zimon/node/tasks/install_remote_pkg.yml rename to roles/perfmon_install/tasks/install_remote_pkg.yml index 2113459e..b1881d17 100644 --- a/roles/zimon/node/tasks/install_remote_pkg.yml +++ b/roles/perfmon_install/tasks/install_remote_pkg.yml @@ -95,6 +95,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/ubuntu16/' @@ -110,6 +115,19 @@ scale_zimon_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu22/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' diff --git a/roles/zimon/node/tasks/install_repository.yml b/roles/perfmon_install/tasks/install_repository.yml similarity index 74% rename from roles/zimon/node/tasks/install_repository.yml rename to roles/perfmon_install/tasks/install_repository.yml index 66ec7112..6ced865b 100644 --- a/roles/zimon/node/tasks/install_repository.yml +++ b/roles/perfmon_install/tasks/install_repository.yml @@ -7,6 +7,7 @@ - name: Initialize set_fact: scale_zimon_url: "" + scale_zimon_collector_url: "" - name: install | zimon path set_fact: @@ -18,6 +19,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/ubuntu16/' @@ -33,6 +39,19 @@ scale_zimon_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu22/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' + - name: install | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' @@ -43,6 +62,16 @@ scale_zimon_url: 'zimon_rpms/sles15/' when: ansible_distribution in scale_sles_distribution and ansible_distribution_major_version == '15' +- name: install | remove existing zimon APT repository + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/apt/sources.list.d/spectrum-scale-pm-ganesha-debs.list + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + - name: install | Configure ZIMon YUM repository yum_repository: name: spectrum-scale-zimon @@ -55,12 +84,13 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure zimon APT repository apt_repository: - filename: spectrum-scale-zimon-debs - repo: "deb [trusted=yes] {{ scale_install_repository_url }}{{ scale_zimon_url }} ./" + filename: "{{ item.key }}" + repo: "deb [trusted=yes] {{ scale_install_repository_url }}{{ item.value }} ./" validate_certs: no state: present update_cache: yes @@ -68,7 +98,10 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' + with_dict: + spectrum-scale-zimon-debs: "{{ scale_zimon_url }}" - name: install | Configure ZIMon repository zypper_repository: @@ -80,12 +113,14 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined + - scale_install_repository_url != 'existing' - name: install | package methods set_fact: scale_zimon_sensors_packages: "{{ scale_zimon_sensors_packages }}" scale_zimon_collector_packages: "{{ scale_zimon_collector_packages }}" - when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' - name: install | package methods set_fact: diff --git a/roles/zimon/node/tasks/main.yml b/roles/perfmon_install/tasks/main.yml similarity index 100% rename from roles/zimon/node/tasks/main.yml rename to roles/perfmon_install/tasks/main.yml diff --git a/roles/zimon/node/tasks/yum/install.yml b/roles/perfmon_install/tasks/yum/install.yml similarity index 100% rename from roles/zimon/node/tasks/yum/install.yml rename to roles/perfmon_install/tasks/yum/install.yml diff --git a/roles/zimon/node/tasks/zypper/install.yml b/roles/perfmon_install/tasks/zypper/install.yml similarity index 100% rename from roles/zimon/node/tasks/zypper/install.yml rename to roles/perfmon_install/tasks/zypper/install.yml diff --git a/roles/zimon/node/vars/main.yml b/roles/perfmon_install/vars/main.yml similarity index 100% rename from roles/zimon/node/vars/main.yml rename to roles/perfmon_install/vars/main.yml diff --git a/roles/perfmon_prepare/README.md b/roles/perfmon_prepare/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/perfmon_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/zimon/precheck/defaults/main.yml b/roles/perfmon_prepare/defaults/main.yml similarity index 100% rename from roles/zimon/precheck/defaults/main.yml rename to roles/perfmon_prepare/defaults/main.yml diff --git a/roles/zimon/precheck/meta/main.yml b/roles/perfmon_prepare/meta/main.yml similarity index 58% rename from roles/zimon/precheck/meta/main.yml rename to roles/perfmon_prepare/meta/main.yml index b75780c9..b74ad2a1 100644 --- a/roles/zimon/precheck/meta/main.yml +++ b/roles/perfmon_prepare/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: zimon_precheck author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,14 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common diff --git a/roles/zimon/precheck/tasks/main.yml b/roles/perfmon_prepare/tasks/main.yml similarity index 100% rename from roles/zimon/precheck/tasks/main.yml rename to roles/perfmon_prepare/tasks/main.yml diff --git a/roles/zimon/precheck/vars/main.yml b/roles/perfmon_prepare/vars/main.yml similarity index 100% rename from roles/zimon/precheck/vars/main.yml rename to roles/perfmon_prepare/vars/main.yml diff --git a/roles/perfmon_upgrade/README.md b/roles/perfmon_upgrade/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/perfmon_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/zimon/upgrade/defaults/main.yml b/roles/perfmon_upgrade/defaults/main.yml similarity index 100% rename from roles/zimon/upgrade/defaults/main.yml rename to roles/perfmon_upgrade/defaults/main.yml diff --git a/roles/zimon/cluster/meta/main.yml b/roles/perfmon_upgrade/meta/main.yml similarity index 57% rename from roles/zimon/cluster/meta/main.yml rename to roles/perfmon_upgrade/meta/main.yml index b57e4c95..b74ad2a1 100644 --- a/roles/zimon/cluster/meta/main.yml +++ b/roles/perfmon_upgrade/meta/main.yml @@ -1,11 +1,12 @@ --- galaxy_info: - role_name: zimon_cluster author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Zimon company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL @@ -13,15 +14,7 @@ galaxy_info: - 7 - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui - - zimon + galaxy_tags: [] dependencies: - - core/common + - ibm.spectrum_scale.core_common diff --git a/roles/zimon/upgrade/tasks/apt/install.yml b/roles/perfmon_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/zimon/upgrade/tasks/apt/install.yml rename to roles/perfmon_upgrade/tasks/apt/install.yml diff --git a/roles/zimon/upgrade/tasks/install.yml b/roles/perfmon_upgrade/tasks/install.yml similarity index 100% rename from roles/zimon/upgrade/tasks/install.yml rename to roles/perfmon_upgrade/tasks/install.yml diff --git a/roles/zimon/upgrade/tasks/install_dir_pkg.yml b/roles/perfmon_upgrade/tasks/install_dir_pkg.yml similarity index 100% rename from roles/zimon/upgrade/tasks/install_dir_pkg.yml rename to roles/perfmon_upgrade/tasks/install_dir_pkg.yml diff --git a/roles/zimon/upgrade/tasks/install_local_pkg.yml b/roles/perfmon_upgrade/tasks/install_local_pkg.yml similarity index 100% rename from roles/zimon/upgrade/tasks/install_local_pkg.yml rename to roles/perfmon_upgrade/tasks/install_local_pkg.yml diff --git a/roles/zimon/upgrade/tasks/install_remote_pkg.yml b/roles/perfmon_upgrade/tasks/install_remote_pkg.yml similarity index 100% rename from roles/zimon/upgrade/tasks/install_remote_pkg.yml rename to roles/perfmon_upgrade/tasks/install_remote_pkg.yml diff --git a/roles/zimon/upgrade/tasks/install_repository.yml b/roles/perfmon_upgrade/tasks/install_repository.yml similarity index 80% rename from roles/zimon/upgrade/tasks/install_repository.yml rename to roles/perfmon_upgrade/tasks/install_repository.yml index dacc7c87..667ff21a 100644 --- a/roles/zimon/upgrade/tasks/install_repository.yml +++ b/roles/perfmon_upgrade/tasks/install_repository.yml @@ -7,6 +7,7 @@ - name: Initialize set_fact: scale_zimon_url: "" + scale_zimon_collector_url: "" is_scale_collector_pkg_installed: false is_scale_pmswift_pkg_installed: false @@ -20,6 +21,11 @@ scale_zimon_url: 'zimon_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: upgrade | zimon path + set_fact: + scale_zimon_url: 'zimon_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: upgrade | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/ubuntu16/' @@ -31,10 +37,28 @@ when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '18' - name: upgrade | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' + +- name: install | zimon path set_fact: scale_zimon_url: 'zimon_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu20/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + - scale_version >= "5.1.4.0" + +- name: install | zimon path + set_fact: + scale_zimon_url: 'zimon_debs/ubuntu/ubuntu22/' + when: + - ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '22' + - name: upgrade | zimon path set_fact: scale_zimon_url: 'zimon_rpms/sles12/' @@ -45,6 +69,16 @@ scale_zimon_url: 'zimon_rpms/sles15/' when: ansible_distribution in scale_sles_distribution and ansible_distribution_major_version == '15' +- name: install | remove existing zimon APT repository + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/apt/sources.list.d/spectrum-scale-pm-ganesha-debs.list + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + - name: upgrade | Configure ZIMon YUM repository yum_repository: name: spectrum-scale-zimon @@ -57,12 +91,13 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure zimon APT repository apt_repository: - filename: spectrum-scale-zimon-debs - repo: "deb [trusted=yes] {{ scale_install_repository_url }}{{ scale_zimon_url }} ./" + filename: "{{ item.key }}" + repo: "deb [trusted=yes] {{ scale_install_repository_url }}{{ item.value }} ./" validate_certs: no state: present update_cache: yes @@ -70,7 +105,10 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' + with_dict: + spectrum-scale-zimon-debs: "{{ scale_zimon_url }}" - name: upgrade | Configure ZIMon repository zypper_repository: @@ -82,6 +120,8 @@ overwrite_multiple: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined + - scale_install_repository_url != 'existing' - name: upgrade | package methods set_fact: @@ -188,6 +228,8 @@ scale_install_all_packages: "{{ scale_install_all_packages + [ item ] }}" with_items: - "{{ scale_obj_sensors_packages }}" - when: (is_scale_pmswift_pkg_installed | bool) + when: + - is_scale_pmswift_pkg_installed | bool + - scale_zimon_offline_upgrade is undefined when: - ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' diff --git a/roles/zimon/upgrade/tasks/main.yml b/roles/perfmon_upgrade/tasks/main.yml similarity index 100% rename from roles/zimon/upgrade/tasks/main.yml rename to roles/perfmon_upgrade/tasks/main.yml diff --git a/roles/zimon/upgrade/tasks/yum/install.yml b/roles/perfmon_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/zimon/upgrade/tasks/yum/install.yml rename to roles/perfmon_upgrade/tasks/yum/install.yml diff --git a/roles/zimon/upgrade/tasks/zypper/install.yml b/roles/perfmon_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/zimon/upgrade/tasks/zypper/install.yml rename to roles/perfmon_upgrade/tasks/zypper/install.yml diff --git a/roles/zimon/upgrade/vars/main.yml b/roles/perfmon_upgrade/vars/main.yml similarity index 100% rename from roles/zimon/upgrade/vars/main.yml rename to roles/perfmon_upgrade/vars/main.yml diff --git a/roles/perfmon_verify/README.md b/roles/perfmon_verify/README.md new file mode 120000 index 00000000..c653cb1f --- /dev/null +++ b/roles/perfmon_verify/README.md @@ -0,0 +1 @@ +../../docs/README.GUI.md \ No newline at end of file diff --git a/roles/zimon/postcheck/defaults/main.yml b/roles/perfmon_verify/defaults/main.yml similarity index 100% rename from roles/zimon/postcheck/defaults/main.yml rename to roles/perfmon_verify/defaults/main.yml diff --git a/roles/zimon/postcheck/meta/main.yml b/roles/perfmon_verify/meta/main.yml similarity index 62% rename from roles/zimon/postcheck/meta/main.yml rename to roles/perfmon_verify/meta/main.yml index 3e1c6229..3f570dcd 100644 --- a/roles/zimon/postcheck/meta/main.yml +++ b/roles/perfmon_verify/meta/main.yml @@ -1,24 +1,18 @@ --- galaxy_info: - role_name: zimon_postcheck author: IBM Corporation description: Role for installing and configuring IBM Spectrum Scale (GPFS) Graphical User Interface (GUI) company: IBM + license: Apache-2.0 - min_ansible_version: 2.8 + + min_ansible_version: 2.9 platforms: - name: EL versions: - 7 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - - graphical - - interface - - gui + galaxy_tags: [] dependencies: [] diff --git a/roles/zimon/postcheck/tasks/main.yml b/roles/perfmon_verify/tasks/main.yml similarity index 100% rename from roles/zimon/postcheck/tasks/main.yml rename to roles/perfmon_verify/tasks/main.yml diff --git a/roles/remote_mount/tasks/mount_filesystems.yml b/roles/remote_mount/tasks/mount_filesystems.yml deleted file mode 100644 index 793fc9b2..00000000 --- a/roles/remote_mount/tasks/mount_filesystems.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -- name: Step 7 - Configure and Mount filesystems - debug: - msg: "Check if remotefileystem '{{ filesystem_loop.scale_remotemount_client_filesystem_name }}' is already defined on Client Cluster" - run_once: True - -- name: Client Cluster (access) | Check if the remotefilesystem is already defined - uri: - validate_certs: "{{ validate_certs_uri }}" - force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/remotefilesystems/{{ filesystem_loop.scale_remotemount_client_filesystem_name }} - method: GET - user: "{{ scale_remotemount_client_gui_username }}" - password: "{{ scale_remotemount_client_gui_password }}" - body_format: json - status_code: - - 200 - register: remote_filesystem_results - ignore_errors: true - run_once: True - -- name: block - block: - - name: Step 8 - debug: - msg: "Add the remotefileystem '{{ filesystem_loop.scale_remotemount_client_filesystem_name }}' and mount it on the Client Cluster (access)" - run_once: True - - - name: Client Cluster (access) | Create the remotefs and then mount the filesystem - uri: - validate_certs: "{{ validate_certs_uri }}" - force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/remotefilesystems - method: POST - user: "{{ scale_remotemount_client_gui_username }}" - password: "{{ scale_remotemount_client_gui_password }}" - body_format: json - body: | - { - "remoteFilesystem": "{{ filesystem_loop.scale_remotemount_client_filesystem_name }}", - "owningFilesystem": "{{ filesystem_loop.scale_remotemount_storage_filesystem_name }}", - "owningCluster": "{{ owning_cluster_name }}", - "remoteMountPath": "{{ filesystem_loop.scale_remotemount_client_remotemount_path | realpath }}", - "mountOptions": "{{ filesystem_loop.scale_remotemount_access_mount_attributes | default('rw') }}", - "automount": "{{ filesystem_loop.scale_remotemount_client_mount_fs | default('yes') }}", - "mountOnNodes": "all" - } - status_code: - - 202 - register: send_key - run_once: True - - - name: "Client Cluster (access) | Check the result of adding the remotefs and mounting the filesystem (JOB: {{ send_key.json.jobs[0].jobId }})" - uri: - validate_certs: "{{ validate_certs_uri }}" - force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} - method: GET - user: "{{ scale_remotemount_client_gui_username }}" - password: "{{ scale_remotemount_client_gui_password }}" - register: completed_check - until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" - run_once: True - - when: (remote_filesystem_results.status == 400) \ No newline at end of file diff --git a/roles/remote_mount/.yamllint b/roles/remotemount_configure/.yamllint similarity index 100% rename from roles/remote_mount/.yamllint rename to roles/remotemount_configure/.yamllint diff --git a/roles/remotemount_configure/README.md b/roles/remotemount_configure/README.md new file mode 120000 index 00000000..d978b78e --- /dev/null +++ b/roles/remotemount_configure/README.md @@ -0,0 +1 @@ +../../docs/README.REMOTEMOUNT.md \ No newline at end of file diff --git a/roles/remote_mount/defaults/main.yml b/roles/remotemount_configure/defaults/main.yml similarity index 51% rename from roles/remote_mount/defaults/main.yml rename to roles/remotemount_configure/defaults/main.yml index faa2dc7e..75c0da42 100644 --- a/roles/remote_mount/defaults/main.yml +++ b/roles/remotemount_configure/defaults/main.yml @@ -4,19 +4,20 @@ scale_remotemount_debug: false scale_remotemount_forceRun: false # retries - 2 minutes (40 x 3 seconds) -restapi_retries_count: 40 -restapi_retries_delay: 3 +scale_remotemount_restapi_retries_count: 40 +scale_remotemount_restapi_retries_delay: 3 -client_cluster_gui_port: 443 -storage_cluster_gui_port: 443 +scale_remotemount_client_cluster_gui_port: 443 +scale_remotemount_storage_cluster_gui_port: 443 -scalemgmt_endpoint: "scalemgmt/v2" -remote_mount_endpoint: "{{ scalemgmt_endpoint }}/remotemount" +scale_remotemount_scalemgmt_endpoint: "scalemgmt/v2" +scale_remotemount_endpoint: "{{ scale_remotemount_scalemgmt_endpoint }}/remotemount" -validate_certs_uri: 'no' + +scale_remotemount_validate_certs_uri: 'no' # Temporary Storage for Public Key, Only used when debuging -scale_remote_mount_client_access_key: /tmp/client_cluster.pub +scale_remotemount_client_access_key: /tmp/client_cluster.pub # Sets the security mode for communications between the current cluster and the remote cluster # Encyption can have performance effect and increased CPU usage @@ -28,7 +29,7 @@ scale_remote_mount_client_access_key: /tmp/client_cluster.pub # AES256-SHA256 # AES128-SHA', 'AES256-SHA' , AUTHONLY -remotecluster_chipers: "AUTHONLY" +scale_remotemount_remotecluster_chipers: "AUTHONLY" # Storage filesystem # scale_remotemount_access_mount_attributes: "rw" @@ -58,4 +59,32 @@ scale_remotemount_cleanup_remote_mount: false # Spectrum Scale uses the Deamon Node Name and the IP Attach to connect and run Cluster traffic. in most cases the admin network and deamon network is the same. # In case you have different AdminNode address and DeamonNode address and for some reason you want to use admin network, then you can set the variable: scale_remotemount_storage_adminnodename: true # Default = DeamonNodeName -#scale_remotemount_storage_adminnodename: false \ No newline at end of file +scale_remotemount_storage_adminnodename: false + + +# Added check that GPFS deamon is started on GUI node, it will check the first server in NodeClass GUI_MGMT_SERVERS +# Check can be disabled with changing the flag to false. +scale_remotemount_gpfsdemon_check: true + +# Default it will try to mount the filesystem on all client cluster (accessing) nodes, here you can replace the this with a comma seperated list of servers. +# scale1-test,scale2-test +# scale_remotemount_client_mount_on_nodes: all + +# When we are adding the storage Cluster in client cluster we need to spesify what nodes should be used. and in normal cases all nodes would be fine. +# In cases we have AFM Gateway nodes, or Cloud nodes TFCT, we want to use the RESTAPI filter to remove those nodes so they are not used. +# Example and the default below is to only list all servers that have (AFM) gatewayNode=false. +scale_remotemount_storage_contactnodes_filter: '?fields=roles.gatewayNode%2Cnetwork.daemonNodeName&filter=roles.gatewayNode%3Dfalse' +# Examples: +# NO AFM and CloudGateway: ?fields=roles.gatewayNode%2Cnetwork.daemonNodeName%2Croles.cloudGatewayNode&filter=roles.gatewayNode%3Dfalse%2Croles.cloudGatewayNode%3Dfalse +# to create your own filter, go to the API Explorer on Spectrum Scale GUI. https://IP-TO-GUI-NODE/ibm/api/explorer/#!/Spectrum_Scale_REST_API_v2/nodesGetv2 +# Roles in version 5.1.1.3 +# "roles": { +# "cesNode": false, +# "cloudGatewayNode": false, +# "cnfsNode": false, +# "designation": "quorum", +# "gatewayNode": false, +# "managerNode": false, +# "otherNodeRoles": "perfmonNode", +# "quorumNode": true, +# "snmpNode": false \ No newline at end of file diff --git a/roles/remote_mount/handlers/main.yml b/roles/remotemount_configure/handlers/main.yml similarity index 100% rename from roles/remote_mount/handlers/main.yml rename to roles/remotemount_configure/handlers/main.yml diff --git a/roles/remote_mount/meta/main.yml b/roles/remotemount_configure/meta/main.yml similarity index 70% rename from roles/remote_mount/meta/main.yml rename to roles/remotemount_configure/meta/main.yml index fdb3fff5..cfd80fb5 100644 --- a/roles/remote_mount/meta/main.yml +++ b/roles/remotemount_configure/meta/main.yml @@ -1,9 +1,10 @@ galaxy_info: - role_name: remote_mount author: IBM Corporation description: IBM Spectrum Scale (GPFS) ansible role to configure remote mount company: IBM + license: Apache-2.0 + min_ansible_version: 2.9 platforms: @@ -13,11 +14,6 @@ galaxy_info: - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs + galaxy_tags: [] -dependencies: - [] +dependencies: [] diff --git a/roles/remote_mount/molecule/default/INSTALL.rst b/roles/remotemount_configure/molecule/default/INSTALL.rst similarity index 100% rename from roles/remote_mount/molecule/default/INSTALL.rst rename to roles/remotemount_configure/molecule/default/INSTALL.rst diff --git a/roles/remote_mount/molecule/default/converge.yml b/roles/remotemount_configure/molecule/default/converge.yml similarity index 100% rename from roles/remote_mount/molecule/default/converge.yml rename to roles/remotemount_configure/molecule/default/converge.yml diff --git a/roles/remote_mount/molecule/default/molecule.yml b/roles/remotemount_configure/molecule/default/molecule.yml similarity index 100% rename from roles/remote_mount/molecule/default/molecule.yml rename to roles/remotemount_configure/molecule/default/molecule.yml diff --git a/roles/remote_mount/molecule/default/verify.yml b/roles/remotemount_configure/molecule/default/verify.yml similarity index 100% rename from roles/remote_mount/molecule/default/verify.yml rename to roles/remotemount_configure/molecule/default/verify.yml diff --git a/roles/remote_mount/tasks/cleanup_filesystem_api_cli.yml b/roles/remotemount_configure/tasks/cleanup_filesystem_api_cli.yml similarity index 100% rename from roles/remote_mount/tasks/cleanup_filesystem_api_cli.yml rename to roles/remotemount_configure/tasks/cleanup_filesystem_api_cli.yml diff --git a/roles/remote_mount/tasks/cleanup_filesystems.yml b/roles/remotemount_configure/tasks/cleanup_filesystems.yml similarity index 52% rename from roles/remote_mount/tasks/cleanup_filesystems.yml rename to roles/remotemount_configure/tasks/cleanup_filesystems.yml index 203851ff..b26b6789 100644 --- a/roles/remote_mount/tasks/cleanup_filesystems.yml +++ b/roles/remotemount_configure/tasks/cleanup_filesystems.yml @@ -1,26 +1,27 @@ --- - name: "Cleanup | Client Cluster (access) | Check if the remotefilesystem is already defined {{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_filesystem_results ignore_errors: true run_once: True - name: "Cleanup | Client Cluster (access) | Remove defined filesystem {{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}" block: - - name: "Client Cluster (access) | Unmount the filesystem | PUT {{ scalemgmt_endpoint }}/filesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}/unmount" + - name: "Client Cluster (access) | Unmount the filesystem | PUT {{ scale_remotemount_scalemgmt_endpoint }}/filesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}/unmount" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/filesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}/unmount + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/filesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}/unmount method: PUT user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -36,22 +37,22 @@ - name: "Checking results from the job: {{ umount_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ umount_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ umount_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" - - name: "Client Cluster (access) | Delete the filesystem | DELETE {{ remote_mount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}?force=yes" + - name: "Client Cluster (access) | Delete the filesystem | DELETE {{ scale_remotemount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}?force=yes" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}?force=yes + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remotefilesystems/{{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}?force=yes method: DELETE user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -61,15 +62,22 @@ - name: "Checking results from the job: {{ delete_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" - when: not remote_filesystem_results.failed + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" + when: remote_filesystem_results.json.status.code == 200 run_once: True + +- name: "Cleanup | Client Cluster (access) | Output from remove defined filesystem {{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}" + run_once: True + debug: + msg: "The is no filesystem named ({{ cleanup_filesystem_loop.scale_remotemount_client_filesystem_name }}) - Message from Restapi: {{ remote_filesystem_results.json.status.message }}" + when: + - remote_filesystem_results.json.status.code == 400 \ No newline at end of file diff --git a/roles/remote_mount/tasks/cleanup_remote_mount.yml b/roles/remotemount_configure/tasks/cleanup_remote_mount.yml similarity index 68% rename from roles/remote_mount/tasks/cleanup_remote_mount.yml rename to roles/remotemount_configure/tasks/cleanup_remote_mount.yml index 351e0b06..3275549c 100644 --- a/roles/remote_mount/tasks/cleanup_remote_mount.yml +++ b/roles/remotemount_configure/tasks/cleanup_remote_mount.yml @@ -3,9 +3,9 @@ # - name: Cleanup | Storage Cluster (owner) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -23,9 +23,9 @@ - name: Cleanup | Client Cluster (access) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -69,16 +69,16 @@ - name: Cleanup | Client Cluster (access) | List the remote cluster already defined uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: remote_clusters_result run_once: True -- name: Cleanup | scale_remotemount_debug | Print out the remote clusters +- name: Cleanup | Client Cluster (access) | scale_remotemount_debug | Print out the remote clusters message code from RestAPI. debug: msg: "{{ remote_clusters_result.json }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -86,7 +86,7 @@ # The remote_clusters_results is in an array, so looping here incase there are multiple remote clusters # We want to delete the one where the owningCluster name matches what we are trying to do a remote mount on -- name: Cleanup | Delete the clusters on a loop... +- name: Cleanup | Client Cluster (access) | Delete the Remote Mount/clusters connection on a loop. include_tasks: delete_remote_cluster.yml when: item.owningCluster == owning_cluster_name loop: "{{ remote_clusters_result.json.owningClusters }}" @@ -98,26 +98,27 @@ - name: "Cleanup | Storage Cluster (owner) | Check if the Client Cluster ('{{ access_cluster_name }}') is already defined" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_clusters_results ignore_errors: true run_once: True - name: Cleanup | Storage Cluster (owner) | Delete the Client Cluster, if it exists block: - - name: "DELETE: {{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}" + - name: "DELETE: {{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: DELETE user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -127,15 +128,23 @@ - name: "Cleanup | Checking results from the job: {{ delete_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" - when: not remote_clusters_results.failed - run_once: True \ No newline at end of file + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" + #when: not remote_clusters_results.failed + when: remote_clusters_results.json.status.code == 200 + run_once: True + +- name: "Cleanup | Storage Cluster (owner) | Output from delete the Client Cluster, ('{{ access_cluster_name }}')" + run_once: True + debug: + msg: "The is no Client/Accessing cluster named: ({{ access_cluster_name }}) - Message from RestAPI: {{ remote_clusters_results.json.status.message }}" + when: + - remote_clusters_results.json.status.code == 400 \ No newline at end of file diff --git a/roles/remote_mount/tasks/cleanup_remote_mount_api_cli.yml b/roles/remotemount_configure/tasks/cleanup_remote_mount_api_cli.yml similarity index 82% rename from roles/remote_mount/tasks/cleanup_remote_mount_api_cli.yml rename to roles/remotemount_configure/tasks/cleanup_remote_mount_api_cli.yml index a72791f8..cc880850 100644 --- a/roles/remote_mount/tasks/cleanup_remote_mount_api_cli.yml +++ b/roles/remotemount_configure/tasks/cleanup_remote_mount_api_cli.yml @@ -3,9 +3,9 @@ # - name: Cleanup Remote Mount - API-CLI | Storage Cluster (owner) | GET the Owning Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -125,15 +125,16 @@ - name: "Cleanup Remote Mount - API-CLI | Storage Cluster (owner) | Check if the Client Cluster ('{{ access_cluster_name }}') is already defined" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_clusters_results ignore_errors: true run_once: True @@ -141,11 +142,11 @@ - name: Cleanup Remote Mount - API-CLI | Storage Cluster (owner) | Delete the Client Cluster, if it exists. block: - - name: "DELETE: {{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}" + - name: "DELETE: {{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: DELETE user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -155,15 +156,22 @@ - name: "Cleanup Remote Mount - API-CLI | Checking results from the job: {{ delete_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" - when: not remote_clusters_results.failed - run_once: True \ No newline at end of file + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" + when: remote_clusters_results.json.status.code == 200 + run_once: True + +- name: "Cleanup Remote Mount - API-CLI | Storage Cluster (owner) | Output from delete the Client Cluster, ('{{ access_cluster_name }}')" + run_once: True + debug: + msg: "The is no Client/Accessing cluster named: ({{ access_cluster_name }}) - Message from RestAPI: {{ remote_clusters_results.json.status.message }}" + when: + - remote_clusters_results.json.status.code == 400 \ No newline at end of file diff --git a/roles/remote_mount/tasks/delete_remote_cluster.yml b/roles/remotemount_configure/tasks/delete_remote_cluster.yml similarity index 69% rename from roles/remote_mount/tasks/delete_remote_cluster.yml rename to roles/remotemount_configure/tasks/delete_remote_cluster.yml index 3c81da4a..2979ed13 100644 --- a/roles/remote_mount/tasks/delete_remote_cluster.yml +++ b/roles/remotemount_configure/tasks/delete_remote_cluster.yml @@ -4,9 +4,9 @@ # Only users with role 'Administrator' or 'CNSS Operator' have permission to for this REST endpoint. Read also the documentation of CLI command 'mmremotecluster delete'. - name: Client Cluster (access) | DELETE The remotecluster {{ owning_cluster_name }} ... uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters/{{ owning_cluster_name }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters/{{ owning_cluster_name }} method: DELETE user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -16,13 +16,13 @@ - name: Client Cluster (access) | Check the results from the DELETE uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" diff --git a/roles/remote_mount/tasks/main.yml b/roles/remotemount_configure/tasks/main.yml similarity index 50% rename from roles/remote_mount/tasks/main.yml rename to roles/remotemount_configure/tasks/main.yml index fb30c85f..17519f26 100644 --- a/roles/remote_mount/tasks/main.yml +++ b/roles/remotemount_configure/tasks/main.yml @@ -36,9 +36,9 @@ - block: # RESTAPI - when: scale_remotemount_client_no_gui == false - name: Main | Storage Cluster (owner) | Check Connectivity to Storage Cluster GUI uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -73,9 +73,9 @@ - name: Main | Client Cluster (access) | Check Connectivity to Client Cluster GUI uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -108,6 +108,78 @@ when: - access_cluster_status.status == 401 + - name: Main | Client Cluster (access) | Check status of GPFS deamon (Nodeclass GUI_MGMT_SERVERS) + uri: + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" + force_basic_auth: true + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C + method: GET + user: "{{ scale_remotemount_client_gui_username }}" + password: "{{ scale_remotemount_client_gui_password }}" + body_format: json + return_content: yes + status_code: + - 200 + - 400 + register: clientcluster_gpfs_deamon_status + run_once: True + when: scale_remotemount_gpfsdemon_check | bool + + - name: Main | Client Cluster (access) | Print status of GPFS deamon on GUI_MGMT_SERVERS - Debug + run_once: True + ignore_errors: true + debug: + msg: "Status of GPFS Deamon: {{ clientcluster_gpfs_deamon_status.json.states[0].state }} - Rest API status message: {{ clientcluster_gpfs_deamon_status.json.status.message }}" + when: + - scale_remotemount_gpfsdemon_check | bool + - scale_remotemount_debug is defined + - scale_remotemount_debug | bool + + - name: Main | Client Cluster (access) | GPFS Deamon on Client Cluster (Nodeclass GUI_MGMT_SERVERS) + run_once: True + assert: + that: + - "'HEALTHY' or 'DEGRADED' in clientcluster_gpfs_deamon_status.json.states[0].state" + fail_msg: "'GPFS Deamon is NOT started on NodeClass GUI_MGMT_SERVERS" + success_msg: "'GPFS Deamon is started on NodeClass GUI_MGMT_SERVERS" + when: scale_remotemount_gpfsdemon_check | bool + + - name: Main | Storage Cluster (owning) | Check status of gpfs deamon on GUI_MGMT_SERVERS + uri: + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" + force_basic_auth: true + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C + method: GET + user: "{{ scale_remotemount_storage_gui_username }}" + password: "{{ scale_remotemount_storage_gui_password }}" + body_format: json + return_content: yes + status_code: + - 200 + - 400 + register: storagecluster_gpfs_deamon_status + run_once: True + when: scale_remotemount_gpfsdemon_check | bool + + - name: Main | storage Cluster (owning) | Print status of GPFS deamon on GUI_MGMT_SERVERS - Debug + run_once: True + ignore_errors: true + debug: + msg: "{{ storagecluster_gpfs_deamon_status.json.states[0].state }}" + when: + - scale_remotemount_gpfsdemon_check | bool + - scale_remotemount_debug is defined + - scale_remotemount_debug | bool + + - name: Main | Storage Cluster (owning) | GPFS Deamon on Storage Cluster (Nodeclass GUI_MGMT_SERVERS) + run_once: True + assert: + that: + - "'HEALTHY' or 'DEGRADED' in storagecluster_gpfs_deamon_status.json.states[0].state" + fail_msg: "'GPFS Deamon is NOT started on NodeClass GUI_MGMT_SERVERS" + success_msg: "'GPFS Deamon is started on NodeClass GUI_MGMT_SERVERS" + when: scale_remotemount_gpfsdemon_check | bool + - name: msg debug: msg: "Force Run was passed in, attempting to run remote_mount role regardless of whether the filesystem is configured." @@ -125,9 +197,9 @@ - block: # RESTAPI-CLI when: scale_remotemount_client_no_gui == true - name: Main | API-CLI | Storage Cluster (owner) | Check Connectivity to Storage Cluster GUI uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -160,16 +232,51 @@ when: - storage_cluster_status.status == 401 + - name: Main | API-CLI | Storage Cluster (owning) | Check status of gpfs deamon on GUI_MGMT_SERVERS + uri: + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" + force_basic_auth: true + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/GUI_MGMT_SERVERS/health/states?fields=component%2Cstate&filter=component%3DGPFS%2C + method: GET + user: "{{ scale_remotemount_storage_gui_username }}" + password: "{{ scale_remotemount_storage_gui_password }}" + body_format: json + return_content: yes + status_code: + - 200 + register: storagecluster_gpfs_deamon_status + run_once: True + when: scale_remotemount_gpfsdemon_check | bool + + - name: Main | API-CLI | Storage Cluster (owning) | Print status of GPFS deamon on GUI_MGMT_SERVERS - Debug + run_once: True + ignore_errors: true + debug: + msg: "{{ storagecluster_gpfs_deamon_status.json.states[0].state }}" + when: + - scale_remotemount_gpfsdemon_check | bool + - scale_remotemount_debug is defined + - scale_remotemount_debug | bool + + - name: Main | API-CLI | Storage Cluster (owning) | GPFS Deamon on Storage Cluster is down (Nodeclass GUI_MGMT_SERVERS) + run_once: True + assert: + that: + - "'HEALTHY' or 'DEGRADED' in storagecluster_gpfs_deamon_status.json.states[0].state" + fail_msg: "'GPFS Deamon is NOT started on NodeClass GUI_MGMT_SERVERS" + success_msg: "'GPFS Deamon is started on NodeClass GUI_MGMT_SERVERS" + when: scale_remotemount_gpfsdemon_check | bool + - name: Main | API-CLI | Force Run debug: - msg: "Force Run was passed in, attempting to run remote_mount role regardless of whether the filesystem is configured." + msg: "Force Run was passed in, attempting to run remote_mount role regardless of whether the filesystem is configured" when: scale_remotemount_forceRun | bool - name: Main | API-CLI | Configure Remote Cluster include_tasks: remotecluster_api_cli.yml run_once: True - - name: Main | API-CLI | Remote mount the filesystem's + - name: Main | API-CLI | Remote Mount the filesystems include_tasks: mount_filesystem_api_cli.yml run_once: True diff --git a/roles/remote_mount/tasks/mount_filesystem_api_cli.yml b/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml similarity index 86% rename from roles/remote_mount/tasks/mount_filesystem_api_cli.yml rename to roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml index 2d90caa0..aef036b6 100644 --- a/roles/remote_mount/tasks/mount_filesystem_api_cli.yml +++ b/roles/remotemount_configure/tasks/mount_filesystem_api_cli.yml @@ -9,9 +9,9 @@ - name: Mount Filesystem - Rest-API | Storage Cluster (owner) | GET the Cluster Information uri: - validate_certs: no + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -52,14 +52,14 @@ - name: Step 5 - Mount Filesystem - Rest-API debug: - msg: "On Storage Cluster, Check if filesystems is allready accessible for Client Cluster" + msg: "On Storage Cluster, Check if filesystems is already accessible for Client Cluster" run_once: True -- name: "Mount Filesystem - Rest-API | Storage Cluster (owner) | Check if filesystems is allready accessible for Client Cluster ('{{ access_cluster_name }}')" +- name: "Mount Filesystem - Rest-API | Storage Cluster (owner) | Check if filesystems is already accessible for Client Cluster ('{{ access_cluster_name }}')" uri: - validate_certs: no + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -92,9 +92,9 @@ - name: Mount Filesystem - Rest-API | Storage Cluster (owning) | Allow and Set the client cluster filesystem access attributes on the Storage Cluster uri: - validate_certs: no + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}/access/{{ item.scale_remotemount_storage_filesystem_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}/access/{{ item.scale_remotemount_storage_filesystem_name }} method: POST user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -121,16 +121,16 @@ - name: Mount Filesystem - Rest-API | Storage Cluster (owning) | Check the result of setting the access attributes on the Storage Cluster ##"{{ completed_check.json.jobs[0].jobId }}" uri: - validate_certs: no + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ item.json.jobs.0['jobId'] }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ item.json.jobs.0['jobId'] }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True loop: "{{ uri_result.results }}" when: @@ -168,7 +168,7 @@ debug: msg: "Add the remotefileystem and mount it on the Client Side" -- name: Mount Filesystem - Rest-API | Client Cluster (access) | Add remote filesystem +- name: Mount Filesystem - Rest-API | Client Cluster (access) | Add remote filesystem - Output is from check. run_once: True shell: | /usr/lpp/mmfs/bin/mmremotefs add {{ item.item.scale_remotemount_client_filesystem_name }} -f {{ item.item.scale_remotemount_storage_filesystem_name }} -C {{ owning_cluster_name }} -T {{ item.item.scale_remotemount_client_remotemount_path }} -o {{ item.item.scale_remotemount_access_mount_attributes | default ('rw') }} -A {{ item.item.scale_remotemount_client_mount_fs | default ('yes') }} --mount-priority {{ item.item.scale_remotemount_client_mount_priority | default ('0') }} @@ -212,14 +212,13 @@ fail: msg: "Scale/GPFS deamon is NOT running on one or serveral of your client cluster node. Check and run mmount manually" when: "'down' in gpfs_deamon_state.stdout" - ignore_errors: true run_once: true -# Not adding any check here, run only when when mmremotefs add task is also run. +# Not adding any check here, runs only when when mmremotefs add task is also run. -- name: Client Cluster (access) | Mount remote filesystem on all client nodes +- name: Client Cluster (access) | Mount remote filesystem on all client nodes - Output is from previous task, checks if the filesystem's is already mounted run_once: True - command: /usr/lpp/mmfs/bin/mmmount {{ item.item.scale_remotemount_client_filesystem_name }} -N {{ accessing_nodes_name }} + command: /usr/lpp/mmfs/bin/mmmount {{ item.item.scale_remotemount_client_filesystem_name }} -N {{ scale_remotemount_client_mount_on_nodes | default('all') }} loop: "{{ remote_filesystem_results_cli.results }}" when: - item.rc != 0 or scale_remotemount_forceRun | bool @@ -231,7 +230,7 @@ # Adding a stdout from previous as the stdout from the loop abow can be confusing when several loops. -- name: Client Cluster (access) | Mount remote filesystem on all client nodes - Show stdout from the previous task. +- name: Client Cluster (access) | Mount remote filesystem on all client nodes - shows stdout from the previous task. debug: msg: "Message from mount remote filesystem task: {{ item }}" loop: "{{ client_cluster_mount_remotefs | json_query('results[*].stdout') }}" diff --git a/roles/remotemount_configure/tasks/mount_filesystems.yml b/roles/remotemount_configure/tasks/mount_filesystems.yml new file mode 100644 index 00000000..8873e968 --- /dev/null +++ b/roles/remotemount_configure/tasks/mount_filesystems.yml @@ -0,0 +1,115 @@ +--- + + +- name: Step 7 - Check status of GPFS deamon on all nodes before mounting filesystem. + debug: + msg: "Check status of GPFS deamon on all nodes before mounting filesystem " + run_once: True +# +# Cheking that GPFS deamon is started on all nodes, else the adding and mounting of filesystem fails. +# RestAPI filters for GPFS deamon on all nodes with the state FAILED. +# +- name: Client Cluster (access) | Check status of GPFS deamon on all nodes before mounting filesystem. + uri: + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" + force_basic_auth: true + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/%3Aall%3A/health/states?fields=component%2Cstate&filter=component%3DGPFS%2Cstate%3DFAILED + method: GET + user: "{{ scale_remotemount_client_gui_username }}" + password: "{{ scale_remotemount_client_gui_password }}" + body_format: json + return_content: yes + status_code: + - 200 + register: clientcluster_gpfs_deamon_all_status + run_once: True + when: scale_remotemount_gpfsdemon_check | bool + +- name: Client Cluster (access) | Print status of GPFS deamon - Debug + run_once: True + ignore_errors: true + debug: + msg: "{{ clientcluster_gpfs_deamon_all_status.json.states }}" + when: + - scale_remotemount_gpfsdemon_check | bool + - scale_remotemount_debug is defined + - scale_remotemount_debug | bool + +- name: Client Cluster (access) | One or more GPFS Deamon on Client Cluster is down. + run_once: True + assert: + that: + - "{{ clientcluster_gpfs_deamon_all_status.json.states|length == 0 }}" + fail_msg: "'GPFS Deamon is NOT started on all nodes, so mounting of filesystem will fail " + success_msg: "'GPFS Deamon is started on all nodes" + when: + - scale_remotemount_gpfsdemon_check | bool + +- name: Step 8 - Configure and Mount filesystems + debug: + msg: "Check if remotefileystem '{{ filesystem_loop.scale_remotemount_client_filesystem_name }}' is already defined on Client Cluster" + run_once: True + +- name: Client Cluster (access) | Check if the remotefilesystem is already defined + uri: + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" + force_basic_auth: yes + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remotefilesystems/{{ filesystem_loop.scale_remotemount_client_filesystem_name }} + method: GET + user: "{{ scale_remotemount_client_gui_username }}" + password: "{{ scale_remotemount_client_gui_password }}" + body_format: json + status_code: + - 200 + - 400 + - 404 + register: remote_filesystem_results + ignore_errors: true + run_once: True + +- name: block + block: + - name: Step 9 + debug: + msg: "Add the remotefileystem '{{ filesystem_loop.scale_remotemount_client_filesystem_name }}' and mount it on the Client Cluster (access)" + run_once: True + + - name: Client Cluster (access) | Create the remotefs and then mount the filesystem + uri: + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" + force_basic_auth: true + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remotefilesystems + method: POST + user: "{{ scale_remotemount_client_gui_username }}" + password: "{{ scale_remotemount_client_gui_password }}" + body_format: json + body: | + { + "remoteFilesystem": "{{ filesystem_loop.scale_remotemount_client_filesystem_name }}", + "owningFilesystem": "{{ filesystem_loop.scale_remotemount_storage_filesystem_name }}", + "owningCluster": "{{ owning_cluster_name }}", + "remoteMountPath": "{{ filesystem_loop.scale_remotemount_client_remotemount_path | realpath }}", + "mountOptions": "{{ filesystem_loop.scale_remotemount_access_mount_attributes | default('rw') }}", + "automount": "{{ filesystem_loop.scale_remotemount_client_mount_fs | default('yes') }}", + "mountOnNodes": "{{ scale_remotemount_client_mount_on_nodes | default('all') }}" + } + status_code: + - 202 + register: send_key + run_once: True + + - name: "Client Cluster (access) | Check the result of adding the remotefs and mounting the filesystem (JOB: {{ send_key.json.jobs[0].jobId }})" + uri: + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" + force_basic_auth: true + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} + method: GET + user: "{{ scale_remotemount_client_gui_username }}" + password: "{{ scale_remotemount_client_gui_password }}" + register: completed_check + until: completed_check.json.jobs[0].status == "COMPLETED" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" + run_once: True + + when: (remote_filesystem_results.status == 400) or (remote_filesystem_results.status == 404) diff --git a/roles/remote_mount/tasks/precheck.yml b/roles/remotemount_configure/tasks/precheck.yml similarity index 52% rename from roles/remote_mount/tasks/precheck.yml rename to roles/remotemount_configure/tasks/precheck.yml index 94924018..b789d7fb 100644 --- a/roles/remote_mount/tasks/precheck.yml +++ b/roles/remotemount_configure/tasks/precheck.yml @@ -64,3 +64,62 @@ msg: "item.scale_remotemount_storage_filesystem_name is not defined" when: item.scale_remotemount_storage_filesystem_name is undefined loop: "{{ scale_remotemount_filesystem_name }}" + + +# This block is for systems with Scale GUI/RESTAPI on both Accessing cluster and Remote Cluster. +# +- block: # RESTAPI - when: scale_remotemount_client_no_gui == false + - name: Main | Storage Cluster (owner) | Check Connectivity to Storage Cluster GUI + uri: + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" + force_basic_auth: yes + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster + method: GET + user: "{{ scale_remotemount_storage_gui_username }}" + password: "{{ scale_remotemount_storage_gui_password }}" + body_format: json + status_code: + - 200 + register: storage_cluster_status + until: + - storage_cluster_status.status == 200 + retries: 15 + delay: 30 + changed_when: false + + + - name: Main | Storage Cluster (owner) | Conenction Refused Storage Cluster + run_once: True + fail: + msg: "There is issues connection to GUI/RestAPI, http return code: {{ storage_cluster_status.status }}" + when: + - storage_cluster_status.status != 200 + + - name: Main | Client Cluster (access) | Check Connectivity to Client Cluster GUI + uri: + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" + force_basic_auth: yes + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster + method: GET + user: "{{ scale_remotemount_client_gui_username }}" + password: "{{ scale_remotemount_client_gui_password }}" + body_format: json + status_code: + - 200 + register: access_cluster_status + until: + - access_cluster_status.status == 200 + retries: 15 + delay: 30 + changed_when: false + + - name: Main | Client Cluster (access) | Conenction Refused Client Cluster + run_once: True + fail: + msg: "There is issues connection to GUI/RestAPI, http return code: {{ access_cluster_status.status }}" + when: + - access_cluster_status.status != 200 + + when: + - scale_remotemount_client_no_gui == false + diff --git a/roles/remote_mount/tasks/remotecluster.yml b/roles/remotemount_configure/tasks/remotecluster.yml similarity index 75% rename from roles/remote_mount/tasks/remotecluster.yml rename to roles/remotemount_configure/tasks/remotecluster.yml index bc2367d4..4da9dba6 100644 --- a/roles/remote_mount/tasks/remotecluster.yml +++ b/roles/remotemount_configure/tasks/remotecluster.yml @@ -9,9 +9,9 @@ - name: Storage Cluster (owner) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -29,9 +29,9 @@ - name: Client Cluster (access) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -76,26 +76,29 @@ # - name: "Storage Cluster (owner) | Check if the Client Cluster ('{{ access_cluster_name }}') is already defined" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_clusters_results ignore_errors: true run_once: True # -# TODO: there is no Check if the Storage Cluster (Owner) is allready defined on Client Cluster +# TODO: there is no Check if the Storage Cluster (Owner) is already defined on Client Cluster, so in some cases where storage cluster have connection to client cluster (mmauth) but the client cluster don't have, the playbook will fail +# as the owningcluster is in a array, we need to loop over or make list of the array to be able to use when: +# - name: Client Cluster (access) | List the remote cluster already defined uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -124,11 +127,11 @@ - name: Storage Cluster (owner) | Delete the Client Cluster, if it exists block: - - name: "DELETE: {{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}" + - name: "DELETE: {{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: DELETE user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -138,16 +141,16 @@ - name: "Checking results from the job: {{ delete_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" when: not remote_clusters_results.failed and scale_remotemount_forceRun | bool run_once: True @@ -160,9 +163,9 @@ - name: Client Cluster (access) | Get the Public Key uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/authenticationkey + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/authenticationkey method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -180,9 +183,9 @@ - name: Storage Cluster (owner) | Send the Public Key of the Client Cluster (access) uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters method: POST user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -190,6 +193,7 @@ body: | { "remoteCluster": "{{ access_cluster_name }}", + "ciphers": ["{{ scale_remotemount_remotecluster_chipers }}"], "key": {{ accesskey_result.json.key | trim | replace(", ", ",") }} } status_code: @@ -199,16 +203,16 @@ - name: "Storage Cluster (owner) | Check the result of adding the Client Cluster {{ send_key.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status != "FAILED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True # @@ -217,9 +221,9 @@ - name: Storage Cluster (owner) | Get the Public Key uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/authenticationkey + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/authenticationkey method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -242,9 +246,9 @@ - name: Client Cluster (access) | List the remote cluster already defined uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -262,11 +266,11 @@ # # This section is to gather the nodenames and adminNodeName # - - name: "Storage Cluster (owning) | GET AdminNodeNames Info - GET {{ scalemgmt_endpoint }}/nodes" + - name: "Storage Cluster (owning) | GET AdminNodeNames Info - GET {{ scale_remotemount_scalemgmt_endpoint }}/nodes" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes{{ scale_remotemount_storage_contactnodes_filter }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -288,11 +292,11 @@ # # This Section is when using daemonNodeName # - - name: "Storage Cluster (owner) | GET daemonNodeName Info - GET {{ scalemgmt_endpoint }}/nodes/" + - name: "Storage Cluster (owner) | GET daemonNodeName Info - GET {{ scale_remotemount_scalemgmt_endpoint }}/nodes/" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/{{item}} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/{{item}} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -315,7 +319,7 @@ # # adminNodeName section # - - name: scale_remotemount_debug | Print out the array storing the nodes in the Storage Cluster (owning) + - name: scale_remotemount_debug | Print out the array storing the adminNodeNames from the Storage Cluster (owning) debug: msg: "{{ owning_nodes_name }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -323,9 +327,9 @@ - name: Client Cluster (access) | Add Storage Cluster as a Remote Cluster with adminNodeName uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters method: POST user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -344,22 +348,22 @@ - name: "Client Cluster (access) | Check the result of adding the remote Storage Cluster with adminNodeName (JOB: {{ adminnode_uri_result.json.jobs[0].jobId }})" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ adminnode_uri_result.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ adminnode_uri_result.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True when: scale_remotemount_storage_adminnodename is defined and scale_remotemount_storage_adminnodename | bool # # deamonNodeName section # - - name: scale_remotemount_debug | Print out the array storing the nodes in the Storage Cluster (owning) + - name: scale_remotemount_debug | Print out the array storing the DeamonNodeNames from the Storage Cluster (owning) debug: msg: "{{ owning_daemon_nodes_name }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -367,9 +371,9 @@ - name: Client Cluster (access) | Add Storage Cluster as a Remote Cluster with DeamonNodeName uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ remote_mount_endpoint }}/owningclusters + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/owningclusters method: POST user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" @@ -384,35 +388,35 @@ - 202 register: daemonnodesname_uri_result run_once: True - when: scale_remotemount_storage_adminnodename is not true + when: not scale_remotemount_storage_adminnodename - name: "Client Cluster (access) | Check the result of adding the remote Storage Cluster with DeamonNodeName (JOB: {{ daemonnodesname_uri_result.json.jobs[0].jobId }})" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_client_gui_hostname }}:{{ client_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ daemonnodesname_uri_result.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_client_gui_hostname }}:{{ scale_remotemount_client_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ daemonnodesname_uri_result.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_client_gui_username }}" password: "{{ scale_remotemount_client_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True - when: scale_remotemount_storage_adminnodename is not true + when: not scale_remotemount_storage_adminnodename when: - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - name: Step 5 - Configure and Mount filesystems debug: - msg: "On Storage Cluster, Check if filesystems is allready accessible for Client Cluster" + msg: "On Storage Cluster, Check if filesystems is already accessible for Client Cluster" run_once: True -- name: "Mount Filesystem | Storage Cluster (owner) | Check if filesystems is allready accessible for Client Cluster ('{{ access_cluster_name }}')" +- name: "Mount Filesystem | Storage Cluster (owner) | Check if filesystems is already accessible for Client Cluster ('{{ access_cluster_name }}')" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -447,9 +451,9 @@ - name: Mount Filesystem| Storage Cluster (owning) | Allow and Set the client cluster filesystem access attributes on the Storage Cluster uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}/access/{{ item.scale_remotemount_storage_filesystem_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}/access/{{ item.scale_remotemount_storage_filesystem_name }} method: POST user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -474,18 +478,18 @@ when: - 'item.item.scale_remotemount_storage_filesystem_name not in current_scale_remotemount_storage_filesystem_name' -- name: Mount Filesystem | Storage Cluster (owning) | Check the result of setting the access attributes on the Storage Cluster "{{ item.json.jobs.0['jobId'] }}" +- name: Mount Filesystem | Storage Cluster (owning) | Check the result of setting the access attributes on the Storage Cluster ##"{{ item.json.jobs.0['jobId'] }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ item.json.jobs.0['jobId'] }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ item.json.jobs.0['jobId'] }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True loop: "{{ uri_result.results }}" when: diff --git a/roles/remote_mount/tasks/remotecluster_api_cli.yml b/roles/remotemount_configure/tasks/remotecluster_api_cli.yml similarity index 69% rename from roles/remote_mount/tasks/remotecluster_api_cli.yml rename to roles/remotemount_configure/tasks/remotecluster_api_cli.yml index 214cf149..c7a14c53 100644 --- a/roles/remote_mount/tasks/remotecluster_api_cli.yml +++ b/roles/remotemount_configure/tasks/remotecluster_api_cli.yml @@ -9,9 +9,9 @@ - name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | GET the Cluster Information uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/cluster + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/cluster method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -21,7 +21,7 @@ register: owning_cluster_info run_once: True -- name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Storage Cluster (owner) | Print the Cluster Information +- name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | scale_remotemount_debug | Print the Cluster Information debug: msg: "{{ owning_cluster_info }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -34,7 +34,7 @@ failed_when: false run_once: True -- name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Client Cluster (access) | Print the Cluster Information +- name: Remote Cluster Config - API-CLI | Client Cluster (access) | scale_remotemount_debug | Print the Cluster Information debug: msg: "{{ access_cluster_info }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -53,19 +53,20 @@ - name: Step 2 - Remote Cluster Config - API-CLI debug: - msg: "Check if the Remote Cluster is allready configured" + msg: "Check if the Remote Cluster is already configured" - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | Check if the Client Cluster ('{{ access_cluster_name }}') is already defined" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" body_format: json status_code: - 200 + - 400 register: remote_clusters_results ignore_errors: true run_once: true @@ -80,11 +81,11 @@ - name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | Delete the Client Cluster, if it exists block: - - name: "DELETE: {{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }}" + - name: "DELETE: {{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters/{{ access_cluster_name }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters/{{ access_cluster_name }} method: DELETE user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -95,16 +96,16 @@ - name: "Checking results from the job: {{ delete_call.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ delete_call.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status == "COMPLETED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True when: - not remote_clusters_results.failed and scale_remotemount_forceRun | bool @@ -116,33 +117,77 @@ when: - (remote_clusters_results.status == 200) or (scale_remotemount_forceRun | bool) +# Get node names and check if gpfs deamon is running. +- name: Remote Cluster Config - API-CLI | Client Cluster (access) | GET the cluster nodes name information + shell: /usr/lpp/mmfs/bin/mmlscluster -Y | grep -v HEADER | grep clusterNode | cut -d ':' -f 8 + register: access_node_names + changed_when: false + failed_when: false + run_once: True + +- name: Remote Cluster Config - API-CLI | Client Cluster (access) | scale_remotemount_debug | Print the Cluster Information + debug: + msg: "{{ access_node_names.stdout_lines | join(',') }}" + when: scale_remotemount_debug is defined and scale_remotemount_debug | bool + run_once: True + +- set_fact: + accessing_nodes_name: [] + run_once: True + +- set_fact: + accessing_nodes_name: "{{ access_node_names.stdout_lines | join(',') }}" + run_once: True + +- name: Remote Cluster Config - API-CLI | Client Cluster (access) | Check if GPFS deamon is started + shell: /usr/lpp/mmfs/bin/mmgetstate -Y -N {{ accessing_nodes_name }} | grep -v HEADER | cut -d ':' -f 9 + register: gpfs_deamon_state + changed_when: false + run_once: true + +- name: Remote Cluster Config - API-CLI | Client Cluster (access) | Fail if GPFS deamon is not started + fail: + msg: "Scale/GPFS deamon is NOT running on one or serveral of your client cluster node. Check and run mmount manually" + when: "'down' in gpfs_deamon_state.stdout" + ignore_errors: true + run_once: true + +# +# Section for doing the configuration of remote cluster. +# - name: Remote Cluster Config - API-CLI | Exchange the keys between Storage and Client Clusters (access) block: - name: Step 3 - Remote Cluster Config - API-CLI debug: msg: "Configure remote Cluster connection between Storage Cluster (owner) and Client Cluster (access)" run_once: True + when: + - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - name: Remote Cluster Config - API-CLI | Remote Cluster connection status debug: msg: "Remote Cluster connection to ('{{ access_cluster_name }}') is not configured, procceding with configuration" run_once: True + when: + - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - name: Remote Cluster Config - API-CLI | Client Cluster (Access) | Get the Public key from CLI and register shell: "cat /var/mmfs/ssl/id_rsa_committed.pub" register: accesskey_result run_once: True + when: + - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the Client Cluster (access) Public Key results + - name: Remote Cluster Config - API-CLI | Client Cluster (accesing) | scale_remotemount_debug | Print out the Client Cluster (access) Public Key results debug: msg: "{{ accesskey_result }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool run_once: True - - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the Client Cluster (access) Public Key results to file ("{{ scale_remote_mount_client_access_key }}") + - name: Remote Cluster Config - API-CLI | Client Cluster (accesing) | scale_remotemount_debug | Print out the Client Cluster (access) Public Key results to file ("{{ scale_remotemount_client_access_key }}") copy: - dest: "{{ scale_remote_mount_client_access_key }}" + dest: "{{ scale_remotemount_client_access_key }}" content: "{{ accesskey_result }}\n" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool run_once: True @@ -152,9 +197,9 @@ - name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | Send the Public Key of the Client Cluster (access) to Storage Cluster (Owner) uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/remoteclusters + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/remoteclusters method: POST user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -162,33 +207,37 @@ body: | { "remoteCluster": "{{ access_cluster_name }}", - "ciphers": ["{{ remotecluster_chipers }}"], + "ciphers": ["{{ scale_remotemount_remotecluster_chipers }}"], "key": {{ accesskey_result.stdout_lines }} } status_code: - 202 register: send_key run_once: True + when: + - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | Check the result of adding the Client Cluster {{ send_key.json.jobs[0].jobId }}" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: true - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/jobs/{{ send_key.json.jobs[0].jobId }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" register: completed_check until: completed_check.json.jobs[0].status != "FAILED" - retries: "{{ restapi_retries_count }}" - delay: "{{ restapi_retries_delay }}" + retries: "{{ scale_remotemount_restapi_retries_count }}" + delay: "{{ scale_remotemount_restapi_retries_delay }}" run_once: True + when: + - (remote_clusters_results.status == 400) or (scale_remotemount_forceRun | bool) - name: Remote Cluster Config - API-CLI | Storage Cluster (owner) | Get the Public Key uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ remote_mount_endpoint }}/authenticationkey/ + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_endpoint }}/authenticationkey/ method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -225,11 +274,11 @@ # # This Section is gather the nodenames and adminNodeName # - - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | GET adminNodeName Info - GET {{ scalemgmt_endpoint }}/nodes" + - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | GET adminNodeName Info - GET {{ scale_remotemount_scalemgmt_endpoint }}/nodes" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes{{ scale_remotemount_storage_contactnodes_filter }} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -251,11 +300,11 @@ # # This Section is when using daemonNodeName # - - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | GET daemonNodeName Info - GET {{ scalemgmt_endpoint }}/nodes/" + - name: "Remote Cluster Config - API-CLI | Storage Cluster (owner) | GET daemonNodeName Info - GET {{ scale_remotemount_scalemgmt_endpoint }}/nodes/" uri: - validate_certs: "{{ validate_certs_uri }}" + validate_certs: "{{ scale_remotemount_validate_certs_uri }}" force_basic_auth: yes - url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ storage_cluster_gui_port }}/{{ scalemgmt_endpoint }}/nodes/{{item}} + url: https://{{ scale_remotemount_storage_gui_hostname }}:{{ scale_remotemount_storage_cluster_gui_port }}/{{ scale_remotemount_scalemgmt_endpoint }}/nodes/{{item}} method: GET user: "{{ scale_remotemount_storage_gui_username }}" password: "{{ scale_remotemount_storage_gui_password }}" @@ -280,7 +329,7 @@ # # adminNodeName section # - - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the array storing the nodes in the Storage Cluster (owning) + - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the array storing the AdminNodeNames from the Storage Cluster (owning) debug: msg: "{{ owning_nodes_name }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -297,7 +346,7 @@ # # deamonNodeName section # - - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the array storing the nodes in the Storage Cluster (owning) + - name: Remote Cluster Config - API-CLI | scale_remotemount_debug | Print out the array storing the DeamonNodeNames from the Storage Cluster (owning) debug: msg: "{{ owning_daemon_nodes_name }}" when: scale_remotemount_debug is defined and scale_remotemount_debug | bool @@ -310,7 +359,7 @@ register: remote_cluster_add_ssh failed_when: - "remote_cluster_add_ssh.rc != 0 and 'is already defined' not in remote_cluster_add_ssh.stderr" - when: scale_remotemount_storage_adminnodename is not true + when: not scale_remotemount_storage_adminnodename - name: Remote Cluster Config - API-CLI | Client Cluster (Access) | Cleanup temporary keys. file: diff --git a/roles/scale_fileauditlogging/upgrade/tests/inventory b/roles/remotemount_configure/tests/inventory similarity index 100% rename from roles/scale_fileauditlogging/upgrade/tests/inventory rename to roles/remotemount_configure/tests/inventory diff --git a/roles/remote_mount/tests/test.yml b/roles/remotemount_configure/tests/test.yml similarity index 100% rename from roles/remote_mount/tests/test.yml rename to roles/remotemount_configure/tests/test.yml diff --git a/roles/remote_mount/vars/main.yml b/roles/remotemount_configure/vars/main.yml similarity index 100% rename from roles/remote_mount/vars/main.yml rename to roles/remotemount_configure/vars/main.yml diff --git a/roles/scale_auth/upgrade/meta/main.yml b/roles/scale_auth/upgrade/meta/main.yml deleted file mode 100644 index fa7f21fe..00000000 --- a/roles/scale_auth/upgrade/meta/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -galaxy_info: - role_name: scale_auth - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - core/common diff --git a/roles/scale_hdfs/node/meta/main.yml b/roles/scale_hdfs/node/meta/main.yml deleted file mode 100644 index 480edff7..00000000 --- a/roles/scale_hdfs/node/meta/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -galaxy_info: - role_name: scale_hdfs - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - core/common - - scale_hdfs/precheck - diff --git a/roles/scale_hdfs/node/tasks/yum/install.yml b/roles/scale_hdfs/node/tasks/yum/install.yml deleted file mode 100644 index 654a4d97..00000000 --- a/roles/scale_hdfs/node/tasks/yum/install.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: install | Install GPFS HDFS packages - yum: - name: "{{ scale_install_all_packages }}" - state: present - disable_gpg_check: "{{ scale_disable_gpgcheck }}" - when: ansible_fqdn in scale_hdfs_nodes_list or ansible_fqdn in scale_protocol_nodes_list - diff --git a/roles/scale_hdfs/precheck/tasks/prepare_env.yml b/roles/scale_hdfs/precheck/tasks/prepare_env.yml deleted file mode 100644 index 4b52c076..00000000 --- a/roles/scale_hdfs/precheck/tasks/prepare_env.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: - set_fact: - transparency_33_enabled: "False" - transparency_version: "False" - -- name: - shell: "echo $SCALE_HDFS_TRANSPARENCY_VERSION_33_ENABLE" - register: transparency_version - delegate_to: localhost - run_once: true - -- name: - set_fact: - transparency_33_enabled: "{{ transparency_version.stdout|bool }}" - when: - - transparency_version.stdout is defined - - transparency_version.stdout|bool - delegate_to: localhost - run_once: true diff --git a/roles/scale_hdfs/upgrade/meta/main.yml b/roles/scale_hdfs/upgrade/meta/main.yml deleted file mode 100644 index 477ef8a5..00000000 --- a/roles/scale_hdfs/upgrade/meta/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -galaxy_info: - role_name: scale_hdfs - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - core/common \ No newline at end of file diff --git a/roles/scale_hdfs/upgrade/tasks/prepare_env.yml b/roles/scale_hdfs/upgrade/tasks/prepare_env.yml deleted file mode 100644 index 4b52c076..00000000 --- a/roles/scale_hdfs/upgrade/tasks/prepare_env.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: - set_fact: - transparency_33_enabled: "False" - transparency_version: "False" - -- name: - shell: "echo $SCALE_HDFS_TRANSPARENCY_VERSION_33_ENABLE" - register: transparency_version - delegate_to: localhost - run_once: true - -- name: - set_fact: - transparency_33_enabled: "{{ transparency_version.stdout|bool }}" - when: - - transparency_version.stdout is defined - - transparency_version.stdout|bool - delegate_to: localhost - run_once: true diff --git a/roles/scale_object/cluster/meta/main.yml b/roles/scale_object/cluster/meta/main.yml deleted file mode 100644 index 621ddd1c..00000000 --- a/roles/scale_object/cluster/meta/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -galaxy_info: - role_name: obj_protocol - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - scale_object/precheck - - nfs/common - diff --git a/roles/scale_object/node/meta/main.yml b/roles/scale_object/node/meta/main.yml deleted file mode 100644 index d1d3bda5..00000000 --- a/roles/scale_object/node/meta/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -galaxy_info: - role_name: obj_node - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - scale_object/precheck - - nfs/common - diff --git a/roles/scale_object/postcheck/meta/main.yml b/roles/scale_object/postcheck/meta/main.yml deleted file mode 100644 index 69417584..00000000 --- a/roles/scale_object/postcheck/meta/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -galaxy_info: - role_name: obj_postcheck - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: [] - diff --git a/roles/scale_object/precheck/meta/main.yml b/roles/scale_object/precheck/meta/main.yml deleted file mode 100644 index 828b184c..00000000 --- a/roles/scale_object/precheck/meta/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -galaxy_info: - role_name: obj_precheck - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - core/common diff --git a/roles/scale_object/upgrade/meta/main.yml b/roles/scale_object/upgrade/meta/main.yml deleted file mode 100644 index 289c1eb1..00000000 --- a/roles/scale_object/upgrade/meta/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -galaxy_info: - role_name: obj_upgrade - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - core/common - - nfs/common diff --git a/roles/smb/node/meta/main.yml b/roles/smb/node/meta/main.yml deleted file mode 100644 index ec26b7e2..00000000 --- a/roles/smb/node/meta/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -galaxy_info: - role_name: nfs_node - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - core/common - - smb/precheck - diff --git a/roles/smb/upgrade/meta/main.yml b/roles/smb/upgrade/meta/main.yml deleted file mode 100644 index 07ad9f01..00000000 --- a/roles/smb/upgrade/meta/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -galaxy_info: - role_name: nfs_node - author: IBM Corporation - description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) - company: IBM - license: Apache-2.0 - min_ansible_version: 2.4 - - platforms: - - name: EL - versions: - - 7 - - 8 - - galaxy_tags: - - ibm - - spectrum - - scale - - gpfs - -dependencies: - - core/common diff --git a/roles/smb_configure/README.md b/roles/smb_configure/README.md new file mode 120000 index 00000000..51c46d54 --- /dev/null +++ b/roles/smb_configure/README.md @@ -0,0 +1 @@ +../../docs/README.SMB.md \ No newline at end of file diff --git a/roles/smb/cluster/defaults/main.yml b/roles/smb_configure/defaults/main.yml similarity index 100% rename from roles/smb/cluster/defaults/main.yml rename to roles/smb_configure/defaults/main.yml diff --git a/roles/smb_configure/meta/main.yml b/roles/smb_configure/meta/main.yml new file mode 100644 index 00000000..c42d702c --- /dev/null +++ b/roles/smb_configure/meta/main.yml @@ -0,0 +1,20 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.ces_common diff --git a/roles/smb/cluster/tasks/configure.yml b/roles/smb_configure/tasks/configure.yml similarity index 100% rename from roles/smb/cluster/tasks/configure.yml rename to roles/smb_configure/tasks/configure.yml diff --git a/roles/smb/cluster/tasks/main.yml b/roles/smb_configure/tasks/main.yml similarity index 100% rename from roles/smb/cluster/tasks/main.yml rename to roles/smb_configure/tasks/main.yml diff --git a/roles/smb/cluster/vars/main.yml b/roles/smb_configure/vars/main.yml similarity index 100% rename from roles/smb/cluster/vars/main.yml rename to roles/smb_configure/vars/main.yml diff --git a/roles/smb_install/README.md b/roles/smb_install/README.md new file mode 120000 index 00000000..51c46d54 --- /dev/null +++ b/roles/smb_install/README.md @@ -0,0 +1 @@ +../../docs/README.SMB.md \ No newline at end of file diff --git a/roles/smb/upgrade/defaults/main.yml b/roles/smb_install/defaults/main.yml similarity index 95% rename from roles/smb/upgrade/defaults/main.yml rename to roles/smb_install/defaults/main.yml index 4c7c0f48..5d2f651e 100644 --- a/roles/smb/upgrade/defaults/main.yml +++ b/roles/smb_install/defaults/main.yml @@ -16,4 +16,4 @@ scale_smb_packages: scale_install_localpkg_tmpdir_path: /tmp ## Flag to install smb debug package -install_debuginfo: true +scale_smb_install_debuginfo: true diff --git a/roles/smb/node/handlers/main.yml b/roles/smb_install/handlers/main.yml similarity index 100% rename from roles/smb/node/handlers/main.yml rename to roles/smb_install/handlers/main.yml diff --git a/roles/smb_install/meta/main.yml b/roles/smb_install/meta/main.yml new file mode 100644 index 00000000..35961206 --- /dev/null +++ b/roles/smb_install/meta/main.yml @@ -0,0 +1,21 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.smb_prepare diff --git a/roles/smb/node/tasks/apt/install.yml b/roles/smb_install/tasks/apt/install.yml similarity index 56% rename from roles/smb/node/tasks/apt/install.yml rename to roles/smb_install/tasks/apt/install.yml index 4e99a32b..439f555f 100644 --- a/roles/smb/node/tasks/apt/install.yml +++ b/roles/smb_install/tasks/apt/install.yml @@ -3,14 +3,13 @@ package: name: "{{ scale_install_all_packages }}" state: present - when: scale_install_repository_url is defined and ansible_fqdn in scale_smb_node_list + when: scale_install_repository_url is defined and inventory_hostname in scale_smb_node_list - name: install| Install GPFS SMB deb apt: deb: "{{ item }}" state: present - when: scale_install_repository_url is not defined and ansible_fqdn in scale_smb_node_list + when: scale_install_repository_url is not defined and inventory_hostname in scale_smb_node_list with_items: - "{{ scale_install_all_packages }}" - diff --git a/roles/smb/node/tasks/install.yml b/roles/smb_install/tasks/install.yml similarity index 100% rename from roles/smb/node/tasks/install.yml rename to roles/smb_install/tasks/install.yml diff --git a/roles/smb/node/tasks/install_dir_pkg.yml b/roles/smb_install/tasks/install_dir_pkg.yml similarity index 97% rename from roles/smb/node/tasks/install_dir_pkg.yml rename to roles/smb_install/tasks/install_dir_pkg.yml index df5431fa..0af856f1 100644 --- a/roles/smb/node/tasks/install_dir_pkg.yml +++ b/roles/smb_install/tasks/install_dir_pkg.yml @@ -101,7 +101,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/smb/node/tasks/install_local_pkg.yml b/roles/smb_install/tasks/install_local_pkg.yml similarity index 91% rename from roles/smb/node/tasks/install_local_pkg.yml rename to roles/smb_install/tasks/install_local_pkg.yml index d59dcae6..06912cdd 100644 --- a/roles/smb/node/tasks/install_local_pkg.yml +++ b/roles/smb_install/tasks/install_local_pkg.yml @@ -108,6 +108,11 @@ scale_smb_url: 'smb_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | smb path + set_fact: + scale_smb_url: 'smb_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | smb path set_fact: scale_smb_url: 'smb_rpms/sles12/' @@ -133,6 +138,20 @@ scale_smb_url: 'smb_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + # Find smb rpms - block: ## when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_sles_distribution @@ -209,7 +228,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/smb/node/tasks/install_remote_pkg.yml b/roles/smb_install/tasks/install_remote_pkg.yml similarity index 90% rename from roles/smb/node/tasks/install_remote_pkg.yml rename to roles/smb_install/tasks/install_remote_pkg.yml index aff4c0eb..67cc2e4a 100644 --- a/roles/smb/node/tasks/install_remote_pkg.yml +++ b/roles/smb_install/tasks/install_remote_pkg.yml @@ -81,6 +81,11 @@ scale_smb_url: 'smb_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | smb path + set_fact: + scale_smb_url: 'smb_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | smb path set_fact: scale_smb_url: 'smb_rpms/sles12/' @@ -106,6 +111,20 @@ scale_smb_url: 'smb_debs/ubuntu/' when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" + # Find smb rpms - block: ## when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_sles_distribution @@ -181,7 +200,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/smb/node/tasks/install_repository.yml b/roles/smb_install/tasks/install_repository.yml similarity index 77% rename from roles/smb/node/tasks/install_repository.yml rename to roles/smb_install/tasks/install_repository.yml index 6baa89f7..f5d5a92b 100644 --- a/roles/smb/node/tasks/install_repository.yml +++ b/roles/smb_install/tasks/install_repository.yml @@ -9,6 +9,11 @@ scale_smb_url: 'smb_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: install | smb path + set_fact: + scale_smb_url: 'smb_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: install | smb path set_fact: scale_smb_url: 'smb_rpms/sles12/' @@ -32,7 +37,21 @@ - name: install | smb path set_fact: scale_smb_url: 'smb_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" - name: install | Configure smb YUM repository yum_repository: @@ -46,6 +65,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure smb zypper repository @@ -57,6 +77,7 @@ disable_gpg_check: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Configure smb APT repository @@ -70,6 +91,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: install | Add GPFS smb packages to list diff --git a/roles/smb/node/tasks/main.yml b/roles/smb_install/tasks/main.yml similarity index 100% rename from roles/smb/node/tasks/main.yml rename to roles/smb_install/tasks/main.yml diff --git a/roles/smb/node/tasks/yum/install.yml b/roles/smb_install/tasks/yum/install.yml similarity index 77% rename from roles/smb/node/tasks/yum/install.yml rename to roles/smb_install/tasks/yum/install.yml index 44aa7ef0..04df3786 100644 --- a/roles/smb/node/tasks/yum/install.yml +++ b/roles/smb_install/tasks/yum/install.yml @@ -4,5 +4,4 @@ name: "{{ scale_install_all_packages }}" state: present disable_gpg_check: "{{ scale_disable_gpgcheck }}" - when: ansible_fqdn in scale_smb_node_list - + when: inventory_hostname in scale_smb_node_list diff --git a/roles/smb/node/tasks/zypper/install.yml b/roles/smb_install/tasks/zypper/install.yml similarity index 74% rename from roles/smb/node/tasks/zypper/install.yml rename to roles/smb_install/tasks/zypper/install.yml index 351f4f05..ea0e031d 100644 --- a/roles/smb/node/tasks/zypper/install.yml +++ b/roles/smb_install/tasks/zypper/install.yml @@ -4,4 +4,4 @@ name: "{{ scale_install_all_packages }}" state: present disable_gpg_check: no - when: ansible_fqdn in scale_smb_node_list + when: inventory_hostname in scale_smb_node_list diff --git a/roles/scale_object/node/vars/main.yml b/roles/smb_install/vars/main.yml similarity index 100% rename from roles/scale_object/node/vars/main.yml rename to roles/smb_install/vars/main.yml diff --git a/roles/smb_prepare/README.md b/roles/smb_prepare/README.md new file mode 120000 index 00000000..51c46d54 --- /dev/null +++ b/roles/smb_prepare/README.md @@ -0,0 +1 @@ +../../docs/README.SMB.md \ No newline at end of file diff --git a/roles/smb_prepare/meta/main.yml b/roles/smb_prepare/meta/main.yml new file mode 100644 index 00000000..dab8063f --- /dev/null +++ b/roles/smb_prepare/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: [] diff --git a/roles/smb/precheck/tasks/check.yml b/roles/smb_prepare/tasks/check.yml similarity index 78% rename from roles/smb/precheck/tasks/check.yml rename to roles/smb_prepare/tasks/check.yml index 63bf7843..3c38df4f 100644 --- a/roles/smb/precheck/tasks/check.yml +++ b/roles/smb_prepare/tasks/check.yml @@ -5,8 +5,8 @@ - name: check | Collect all smb nodes set_fact: - scale_smb_node_list: "{{ scale_smb_node_list + [hostvars[item]['ansible_fqdn']] }}" - when: hostvars[item]['is_protocol_node'] is defined and hostvars[item]['is_protocol_node']|bool + scale_smb_node_list: "{{ scale_smb_node_list + [hostvars[item]['inventory_hostname']] }}" + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool with_items: - "{{ ansible_play_hosts }}" delegate_to: localhost @@ -30,7 +30,7 @@ that: - ansible_facts.services["smb"].state != "running" fail_msg: "Service smb found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop smb) and disable (systemctl disable smb) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["smb"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["smb"].state is defined any_errors_fatal: true - name: check | Check if service smbd is running @@ -38,7 +38,7 @@ that: - ansible_facts.services["smbd"].state != "running" fail_msg: "Service smbd found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop smbd) and disable (systemctl disable smbd) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["smbd"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["smbd"].state is defined any_errors_fatal: true - name: check | Check if service winbind is running @@ -46,7 +46,7 @@ that: - ansible_facts.services["winbind"].state != "running" fail_msg: "Service smb found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop winbind) and disable (systemctl disable winbind) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["winbind"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["winbind"].state is defined any_errors_fatal: true - name: check | Check if service winbindd is running @@ -54,7 +54,7 @@ that: - ansible_facts.services["winbindd"].state != "running" fail_msg: "Service winbindd found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop winbindd) and disable (systemctl disable winbindd) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["winbindd"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["winbindd"].state is defined any_errors_fatal: true - name: check | Check if service ctdb is running @@ -62,7 +62,7 @@ that: - ansible_facts.services["ctdb"].state != "running" fail_msg: "Service ctdb found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop ctdb) and disable (systemctl disable ctdb) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["ctdb"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["ctdb"].state is defined any_errors_fatal: true - name: check | Check if service ctdbd is running @@ -70,7 +70,7 @@ that: - ansible_facts.services["ctdbd"].state != "running" fail_msg: "Service ctdbd found running on {{ ansible_hostname }}. Which conflicts with the installation of SMB.SUGGESTTED ACTION- Run commands to stop (systemctl stop ctdbd) and disable (systemctl disable ctdbd) this service on node {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list and ansible_facts.services["ctdbd"].state is defined + when: inventory_hostname in scale_smb_node_list and ansible_facts.services["ctdbd"].state is defined any_errors_fatal: true - debug: diff --git a/roles/smb/precheck/tasks/main.yml b/roles/smb_prepare/tasks/main.yml similarity index 100% rename from roles/smb/precheck/tasks/main.yml rename to roles/smb_prepare/tasks/main.yml diff --git a/roles/smb_upgrade/README.md b/roles/smb_upgrade/README.md new file mode 120000 index 00000000..51c46d54 --- /dev/null +++ b/roles/smb_upgrade/README.md @@ -0,0 +1 @@ +../../docs/README.SMB.md \ No newline at end of file diff --git a/roles/smb/node/defaults/main.yml b/roles/smb_upgrade/defaults/main.yml similarity index 95% rename from roles/smb/node/defaults/main.yml rename to roles/smb_upgrade/defaults/main.yml index 4c7c0f48..5d2f651e 100644 --- a/roles/smb/node/defaults/main.yml +++ b/roles/smb_upgrade/defaults/main.yml @@ -16,4 +16,4 @@ scale_smb_packages: scale_install_localpkg_tmpdir_path: /tmp ## Flag to install smb debug package -install_debuginfo: true +scale_smb_install_debuginfo: true diff --git a/roles/smb/upgrade/handlers/main.yml b/roles/smb_upgrade/handlers/main.yml similarity index 100% rename from roles/smb/upgrade/handlers/main.yml rename to roles/smb_upgrade/handlers/main.yml diff --git a/roles/smb_upgrade/meta/main.yml b/roles/smb_upgrade/meta/main.yml new file mode 100644 index 00000000..d32d632b --- /dev/null +++ b/roles/smb_upgrade/meta/main.yml @@ -0,0 +1,20 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common diff --git a/roles/smb/upgrade/tasks/apt/install.yml b/roles/smb_upgrade/tasks/apt/install.yml similarity index 100% rename from roles/smb/upgrade/tasks/apt/install.yml rename to roles/smb_upgrade/tasks/apt/install.yml diff --git a/roles/smb/upgrade/tasks/install.yml b/roles/smb_upgrade/tasks/install.yml similarity index 100% rename from roles/smb/upgrade/tasks/install.yml rename to roles/smb_upgrade/tasks/install.yml diff --git a/roles/smb/upgrade/tasks/install_dir_pkg.yml b/roles/smb_upgrade/tasks/install_dir_pkg.yml similarity index 97% rename from roles/smb/upgrade/tasks/install_dir_pkg.yml rename to roles/smb_upgrade/tasks/install_dir_pkg.yml index 934ab38e..faf89d7b 100644 --- a/roles/smb/upgrade/tasks/install_dir_pkg.yml +++ b/roles/smb_upgrade/tasks/install_dir_pkg.yml @@ -101,7 +101,7 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution - debug: msg: "{{ scale_install_all_packages }}" diff --git a/roles/smb/upgrade/tasks/install_local_pkg.yml b/roles/smb_upgrade/tasks/install_local_pkg.yml similarity index 98% rename from roles/smb/upgrade/tasks/install_local_pkg.yml rename to roles/smb_upgrade/tasks/install_local_pkg.yml index 1698707a..ccb69e82 100644 --- a/roles/smb/upgrade/tasks/install_local_pkg.yml +++ b/roles/smb_upgrade/tasks/install_local_pkg.yml @@ -217,4 +217,4 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/smb/upgrade/tasks/install_remote_pkg.yml b/roles/smb_upgrade/tasks/install_remote_pkg.yml similarity index 98% rename from roles/smb/upgrade/tasks/install_remote_pkg.yml rename to roles/smb_upgrade/tasks/install_remote_pkg.yml index 633213b4..10c8fdc1 100644 --- a/roles/smb/upgrade/tasks/install_remote_pkg.yml +++ b/roles/smb_upgrade/tasks/install_remote_pkg.yml @@ -150,4 +150,4 @@ - name: remove debuginfo from packages set_fact: scale_install_all_packages: "{{ scale_install_all_packages | difference(debuginfo_package)}}" - when: not install_debuginfo|bool and ansible_distribution in scale_rhel_distribution + when: not scale_smb_install_debuginfo|bool and ansible_distribution in scale_rhel_distribution diff --git a/roles/smb/upgrade/tasks/install_repository.yml b/roles/smb_upgrade/tasks/install_repository.yml similarity index 77% rename from roles/smb/upgrade/tasks/install_repository.yml rename to roles/smb_upgrade/tasks/install_repository.yml index cbfe8dc7..6d1c3aa0 100644 --- a/roles/smb/upgrade/tasks/install_repository.yml +++ b/roles/smb_upgrade/tasks/install_repository.yml @@ -9,6 +9,11 @@ scale_smb_url: 'smb_rpms/rhel8/' when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' +- name: upgrade | smb path + set_fact: + scale_smb_url: 'smb_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + - name: upgrade | smb path set_fact: scale_smb_url: 'smb_rpms/sles12/' @@ -32,7 +37,21 @@ - name: upgrade | smb path set_fact: scale_smb_url: 'smb_debs/ubuntu/' - when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version >= '20' + when: ansible_distribution in scale_ubuntu_distribution and ansible_distribution_major_version == '20' + +- block: + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu20/' + when: ansible_distribution_major_version == '20' + + - name: install | smb path + set_fact: + scale_smb_url: 'smb_debs/ubuntu/ubuntu22/' + when: ansible_distribution_major_version == '22' + when: + - ansible_distribution in scale_ubuntu_distribution + - scale_version >= "5.1.4.0" - name: upgrade | Configure smb YUM repository yum_repository: @@ -46,6 +65,7 @@ notify: yum-clean-metadata when: - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure smb zypper repository @@ -57,6 +77,7 @@ disable_gpg_check: yes when: - ansible_pkg_mgr == 'zypper' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Configure smb APT repository @@ -70,6 +91,7 @@ mode: 0777 when: - ansible_pkg_mgr == 'apt' + - scale_install_repository_url is defined - scale_install_repository_url != 'existing' - name: upgrade | Add GPFS smb packages to list diff --git a/roles/smb/upgrade/tasks/main.yml b/roles/smb_upgrade/tasks/main.yml similarity index 100% rename from roles/smb/upgrade/tasks/main.yml rename to roles/smb_upgrade/tasks/main.yml diff --git a/roles/smb/upgrade/tasks/yum/install.yml b/roles/smb_upgrade/tasks/yum/install.yml similarity index 100% rename from roles/smb/upgrade/tasks/yum/install.yml rename to roles/smb_upgrade/tasks/yum/install.yml diff --git a/roles/smb/upgrade/tasks/zypper/install.yml b/roles/smb_upgrade/tasks/zypper/install.yml similarity index 100% rename from roles/smb/upgrade/tasks/zypper/install.yml rename to roles/smb_upgrade/tasks/zypper/install.yml diff --git a/roles/scale_object/upgrade/vars/main.yml b/roles/smb_upgrade/vars/main.yml similarity index 100% rename from roles/scale_object/upgrade/vars/main.yml rename to roles/smb_upgrade/vars/main.yml diff --git a/roles/smb_verify/README.md b/roles/smb_verify/README.md new file mode 120000 index 00000000..51c46d54 --- /dev/null +++ b/roles/smb_verify/README.md @@ -0,0 +1 @@ +../../docs/README.SMB.md \ No newline at end of file diff --git a/roles/smb/postcheck/defaults/main.yml b/roles/smb_verify/defaults/main.yml similarity index 100% rename from roles/smb/postcheck/defaults/main.yml rename to roles/smb_verify/defaults/main.yml diff --git a/roles/smb_verify/meta/main.yml b/roles/smb_verify/meta/main.yml new file mode 100644 index 00000000..dab8063f --- /dev/null +++ b/roles/smb_verify/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: [] diff --git a/roles/smb/postcheck/tasks/check.yml b/roles/smb_verify/tasks/check.yml similarity index 77% rename from roles/smb/postcheck/tasks/check.yml rename to roles/smb_verify/tasks/check.yml index c665896c..32ff3b09 100644 --- a/roles/smb/postcheck/tasks/check.yml +++ b/roles/smb_verify/tasks/check.yml @@ -3,7 +3,7 @@ shell: cmd: "{{ scale_command_path }}mmces service list|grep SMB" register: scale_smb_status - when: ansible_fqdn in scale_smb_node_list + when: inventory_hostname in scale_smb_node_list ignore_errors: true failed_when: false @@ -12,4 +12,4 @@ that: - scale_smb_status.rc == 0 fail_msg: "SMB is not active on {{ ansible_hostname }}" - when: ansible_fqdn in scale_smb_node_list + when: inventory_hostname in scale_smb_node_list diff --git a/roles/smb/postcheck/tasks/main.yml b/roles/smb_verify/tasks/main.yml similarity index 100% rename from roles/smb/postcheck/tasks/main.yml rename to roles/smb_verify/tasks/main.yml diff --git a/roles/smb/postcheck/vars/main.yml b/roles/smb_verify/vars/main.yml similarity index 100% rename from roles/smb/postcheck/vars/main.yml rename to roles/smb_verify/vars/main.yml diff --git a/samples/hosts b/samples/hosts index 71e20323..30562743 100644 --- a/samples/hosts +++ b/samples/hosts @@ -1,6 +1,6 @@ # hosts: # Sample host file for deploying IBM Spectrum Scale (GPFS) cluster [cluster01] -host-vm1 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=true -host-vm2 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=false -host-vm3 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=false +host-vm1 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false scale_protocol_node=true +host-vm2 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false scale_protocol_node=false +host-vm3 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false scale_protocol_node=false diff --git a/samples/legacy/daemon_admin_network b/samples/legacy/daemon_admin_network deleted file mode 100644 index a879bcb7..00000000 --- a/samples/legacy/daemon_admin_network +++ /dev/null @@ -1,13 +0,0 @@ -hosts: -# Sample parameter for the host file for deploying IBM Spectrum Scale (GPFS) cluster -# with admin and daemon network. -# -# To allow ssh to the cluster with the defined scale_admin_nodename only, the sshd_config -# needs to be updated. To allow update of sshd_config set the variables -# scale_prepare_enable_ssh_login and scale_prepare_restrict_ssh_address to true -# (see roles/core/precheck/defaults/main.yml). - -[cluster01] -scale01 scale_admin_nodename=scale01 scale_daemon_nodename=scale01d -scale02 scale_admin_nodename=scale02 scale_daemon_nodename=scale02d - diff --git a/samples/legacy/hosts b/samples/legacy/hosts deleted file mode 100644 index c1899ef3..00000000 --- a/samples/legacy/hosts +++ /dev/null @@ -1,6 +0,0 @@ -# hosts: -# Sample host file for deploying IBM Spectrum Scale (GPFS) cluster -[cluster01] -host-vm1 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=true -host-vm2 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=false -host-vm3 scale_cluster_quorum=true scale_cluster_manager=true scale_cluster_gui=false is_protocol_node=false diff --git a/samples/legacy/playbook_aws.yml b/samples/legacy/playbook_aws.yml deleted file mode 100644 index 3b7f0ac8..00000000 --- a/samples/legacy/playbook_aws.yml +++ /dev/null @@ -1,195 +0,0 @@ ---- -# -# samples/playbook_cloud.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using -# inventory in JSON format. - -# This file is mandatory to import and it will load inventory variables form -# vars/scale_clusterdefinition.json -- import_playbook: "set_json_variables.yml" - -# Ensure provisioned VMs are up and Passwordless SSH setup -# has been compleated and operational -- name: Check passwordless SSH connection is setup - hosts: scale_node - any_errors_fatal: true - gather_facts: false - connection: local - tasks: - - name: Check passwordless SSH on all scale inventory hosts - shell: ssh -i {{ ansible_ssh_private_key_file }} {{ inventory_hostname }} "echo PASSWDLESS_SSH_ENABLED" - register: result - until: result.stdout.find("PASSWDLESS_SSH_ENABLED") != -1 - retries: 30 - delay: 10 - -# Ensure all provisioned VMs are running the supported OS versions -- name: Check for supported OS - hosts: scale_node - any_errors_fatal: true - gather_facts: true - tasks: - - name: Spectrum Scale Precheck | Check OS Distribution - assert: - that: - - ansible_distribution == "RedHat" - - ansible_distribution_major_version == "7" or ansible_distribution_major_version == "8" - - (ansible_distribution_version is match("7.7") or - ansible_distribution_version is match("7.8") or - ansible_distribution_version is match("8.1") or - ansible_distribution_version is match("8.2")) - fail_msg: "Only instances running RedHat Enterprise Linux version 7.7, 7.8, 8.1 and 8.2 are supported" - -# Setup Spectrum Scale on nodes and create cluster -- hosts: scale_node - any_errors_fatal: true - vars: - - scale_install_directory_pkg_path: /opt/IBM/gpfs_cloud_rpms - roles: - - core/precheck - - core/node - - core/cluster - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck - - # Cloud deployment specific actions after Spectrum Scale - # cluster installation and setup - tasks: - - block: - - name: accept client lisence for compute descriptor node - command: /usr/lpp/mmfs/bin/mmchnode --client -N "computedescnodegrp" - - - name: set filesystem - set_fact: - fs_name: "{{ scale_storage.0.filesystem }}" - when: - - scale_storage is defined - - - name: create empty file on descriptor node - command: /usr/lpp/mmfs/bin/mmdsh -N "computedescnodegrp" touch /var/mmfs/etc/ignoreAnyMount.{{ fs_name }} - - - name: unmount filesystem on descriptor node - command: /usr/lpp/mmfs/bin/mmumount {{ fs_name }} -N "computedescnodegrp" - run_once: true - when: - - scale_sync_replication_config | bool - - - name: Prevent kernel upgrade - lineinfile: - path: /etc/yum.conf - line: exclude=kernel* redhat-release* - -# Configure the Spectrum Scale Pagepool setings -- hosts: scale_node - any_errors_fatal: false - gather_facts: true - tasks: - - block: - - name: Spectrum Scale Config | Find Compute Nodes - add_host: - name: "{{ item }}" - groups: scale_compute_members - when: - - hostvars[item]['scale_nodeclass'] is defined and 'computenodegrp' in hostvars[item]['scale_nodeclass'] - with_items: "{{ ansible_play_hosts }}" - changed_when: false - - - name: Spectrum Scale Config | Find Storage Nodes - add_host: - name: "{{ item }}" - groups: scale_storage_members - when: - - hostvars[item]['scale_nodeclass'] is defined and 'storagenodegrp' in hostvars[item]['scale_nodeclass'] - with_items: "{{ ansible_play_hosts }}" - changed_when: false - - - name: Spectrum Scale Config | Determine Compute Node Total Memory - set_fact: - scale_compute_total_mem: "{{ hostvars[item]['ansible_memtotal_mb'] }}" - when: hostvars[item]['ansible_memtotal_mb'] is defined and hostvars[item]['ansible_memtotal_mb'] - with_items: "{{ groups['scale_compute_members'].0 }}" - run_once: true - - - name: Spectrum Scale Config | Determine Storage Node Total Memory - set_fact: - scale_storage_total_mem: "{{ hostvars[item]['ansible_memtotal_mb'] }}" - when: hostvars[item]['ansible_memtotal_mb'] is defined and hostvars[item]['ansible_memtotal_mb'] - with_items: "{{ groups['scale_storage_members'].0 }}" - run_once: true - - - name: Spectrum Scale Config | Determine Compute Node Pagepool Memory - set_fact: - scale_compute_total_mem_per: "{{ ((scale_compute_total_mem | int / 1024) * 0.25) | round(0, 'ceil') | int | abs }}" - when: scale_compute_total_mem is defined - run_once: true - - - name: Spectrum Scale Config | Determine Storage Node Pagepool Memory - set_fact: - scale_storage_total_mem_per: "{{ ((scale_storage_total_mem | int / 1024) * 0.25) | round(0, 'ceil') | int | abs }}" - when: scale_storage_total_mem is defined - run_once: true - - - name: Spectrum Scale Config | Define Compute Raw Pagepool Size - set_fact: - pagepool_compute: "{{ scale_compute_total_mem_per }}" - when: scale_compute_total_mem_per is defined - run_once: true - - - name: Spectrum Scale Config | Define Storage Raw Pagepool Size - set_fact: - pagepool_storage: "{{ scale_storage_total_mem_per }}" - when: scale_storage_total_mem_per is defined - run_once: true - - - name: Spectrum Scale Config | Check Compute Pagepool Floor Value - set_fact: - pagepool_compute: "1" - when: - - pagepool_compute is defined - - pagepool_compute | int < 1 - run_once: true - - - name: Spectrum Scale Config | Check Compute Pagepool Ceiling Value - set_fact: - pagepool_compute: "16" - when: - - pagepool_compute is defined - - pagepool_compute | int > 16 - run_once: true - - - name: Spectrum Scale Config | Check Storage Pagepool Floor Value - set_fact: - pagepool_storage: "1" - when: - - pagepool_storage is defined - - pagepool_storage | int < 1 - run_once: true - - - name: Spectrum Scale Config | Check Storage Pagepool Ceiling Value - set_fact: - pagepool_compute: "16" - when: - - pagepool_storage is defined - - pagepool_storage | int > 16 - run_once: true - - - name: Spectrum Scale Config | Assign Compute Pagepool - command: "/usr/lpp/mmfs/bin/mmchconfig pagepool={{ pagepool_compute }}G -i -N computenodegrp" - when: - - pagepool_compute is defined - run_once: true - - - name: Spectrum Scale Config | Assign Storage Pagepool - command: "/usr/lpp/mmfs/bin/mmchconfig pagepool={{ pagepool_storage }}G -i -N storagenodegrp" - when: - - pagepool_storage is defined - run_once: true - diff --git a/samples/legacy/playbook_callhome.yml b/samples/legacy/playbook_callhome.yml deleted file mode 100644 index 6c833399..00000000 --- a/samples/legacy/playbook_callhome.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# -# samples/playbook_callhome.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with Call Home -# enabled. Additional variables need to be defined for this, it is recommended -# to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/callhome_vars.yml - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - pre_tasks: - - include_vars: callhome_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - callhome/precheck - - callhome/node - - callhome/cluster - - callhome/postcheck diff --git a/samples/legacy/playbook_ces.yml b/samples/legacy/playbook_ces.yml deleted file mode 100644 index d36c67aa..00000000 --- a/samples/legacy/playbook_ces.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# -# samples/playbook_ces.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with Cluster -# Export Services (CES). Additional variables need to be defined for this, it is -# recommended to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/ces_vars.yml - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - pre_tasks: - - include_vars: ces_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - nfs/precheck - - nfs/node - - nfs/cluster - - smb/precheck - - smb/node - - smb/cluster diff --git a/samples/legacy/playbook_ces_hdfs.yml b/samples/legacy/playbook_ces_hdfs.yml deleted file mode 100644 index afdfe2fb..00000000 --- a/samples/legacy/playbook_ces_hdfs.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# -# samples/playbook_ces_hdfs.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with Cluster -# Export Services (CES). Additional variables need to be defined for this, it is -# recommended to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/hdfs_cluster_vars.yml - -- hosts: cluster01 - any_errors_fatal: true - vars: - - scale_version: 5.1.1.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Advanced-5.1.1.0-x86_64-Linux-install - pre_tasks: - - include_vars: hdfs_cluster_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - scale_hdfs/precheck - - scale_hdfs/node - - scale_hdfs/cluster - - scale_hdfs/postcheck \ No newline at end of file diff --git a/samples/legacy/playbook_ces_object.yml b/samples/legacy/playbook_ces_object.yml deleted file mode 100644 index 808a442e..00000000 --- a/samples/legacy/playbook_ces_object.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# -# samples/playbook_ces_object.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with Cluster -# Export Services (CES). Additional variables need to be defined for this, it is -# recommended to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/scale_object_vars.yml - -- hosts: cluster01 - vars: - - scale_install_localpkg_path: /root/Spectrum_Scale_Advanced-5.1.1.0-x86_64-Linux-install - pre_tasks: - - include_vars: scale_object_vars.yml - roles: - - scale_object/precheck - - scale_object/node - - scale_object/cluster - - scale_object/postcheck - diff --git a/samples/legacy/playbook_cloud.yml b/samples/legacy/playbook_cloud.yml deleted file mode 100644 index 82c89ddc..00000000 --- a/samples/legacy/playbook_cloud.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# -# samples/playbook_cloud.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using -# inventory in JSON format. - -# This file is mandatory to import and it will load inventory variables form -# vars/scale_clusterdefinition.json -- import_playbook: "set_json_variables.yml" - -# Setup Spectrum Scale on nodes and create cluster -- hosts: scale_node - any_errors_fatal: true - vars: - - scale_install_directory_pkg_path: /opt/IBM/gpfs_cloud_rpms - roles: - - core/precheck - - core/node - - core/cluster - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck diff --git a/samples/legacy/playbook_directory.yml b/samples/legacy/playbook_directory.yml deleted file mode 100644 index 7372a97c..00000000 --- a/samples/legacy/playbook_directory.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# -# samples/playbook_directory.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using the -# directory installation method. You need to keep all required Spectrum Scale -# packages in a single user-provided directory. - -# Note that specifying the variable 'scale_version' is *not* required for this -# installation method. - -- hosts: cluster01 - vars: - - scale_install_directory_pkg_path: /root/spectrum_scale_packages - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/playbook_fileauditlogging.yml b/samples/legacy/playbook_fileauditlogging.yml deleted file mode 100644 index 93f8705d..00000000 --- a/samples/legacy/playbook_fileauditlogging.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# -# samples/playbook_fileauditlogging.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with File -# Audit Logging (FAL) enabled. - -# Sample definitions can be found in samples/vars/fal_vars.yml - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - pre_tasks: - - include_vars: fal_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - scale_fileauditlogging/precheck - - scale_fileauditlogging/node - - scale_fileauditlogging/cluster diff --git a/samples/legacy/playbook_json_ces.yml b/samples/legacy/playbook_json_ces.yml deleted file mode 100644 index 04798032..00000000 --- a/samples/legacy/playbook_json_ces.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# -# samples/playbook_json_ces.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using -# inventory in JSON format. This sample also contains protocols (NFS & SMB), -# callhome and file audit logging. - -# This file is mandatory to import and it will load inventory variables form -# samples/vars/scale_clusterdefinition.json -- import_playbook: set_json_variables.yml - -- hosts: scale_node - any_errors_fatal: true - vars: - - scale_install_localpkg_path: /root/Spectrum_Scale_Advanced-5.1.0.0-x86_64-Linux-install - roles: - - core/precheck - - core/node - - core/cluster - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck - - callhome/precheck - - callhome/cluster - - nfs/precheck - - nfs/node - - nfs/cluster - - smb/node - - smb/cluster - - scale_fileauditlogging/node - - scale_fileauditlogging/cluster diff --git a/samples/legacy/playbook_localpkg.yml b/samples/legacy/playbook_localpkg.yml deleted file mode 100644 index 78b359fd..00000000 --- a/samples/legacy/playbook_localpkg.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# -# samples/playbook_localpkg.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using local -# archive installation method. This means that the self-extracting archive -# containing the Spectrum Scale code is accessible on the Ansible control -# machine running the playbook. Integrity of the archive will be validated by -# comparing checksums with a *.md5 reference file (if present), the archive will -# be copied to each managed node in your cluster -# ('scale_install_localpkg_tmpdir_path'), and subsequently the archive will be -# extracted. Packages will then be installed from the local files on each node. - -- hosts: cluster01 - vars: - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/playbook_nodeclass.yml b/samples/legacy/playbook_nodeclass.yml deleted file mode 100644 index 06931b28..00000000 --- a/samples/legacy/playbook_nodeclass.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -# -# samples/playbook_nodeclass.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with node -# classes and Spectrum Scale configuration attributes. Node classes can be -# defined on a per-node basis by defining the `scale_nodeclass` variable, it is -# recommended to use Ansible host variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-one-machine-host-variables - -# Here is an example definition of these node classes for hosts 'scale01' and -# 'scale02': -# ``` -# # host_vars/scale01: -# --- -# scale_nodeclass: -# - classA -# - classB -# ``` -# ``` -# # host_vars/scale02: -# --- -# scale_nodeclass: -# - classA -# - classC -# ``` - -# These node classes can optionally be used to define Spectrum Scale -# configuration attributes. It is recommended to use Ansible group variables for -# this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/config_vars.yml - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - pre_tasks: - - include_vars: config_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/playbook_remote_mount.yml b/samples/legacy/playbook_remote_mount.yml deleted file mode 100644 index 04c92735..00000000 --- a/samples/legacy/playbook_remote_mount.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# -# samples/playbook_remote_mount.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with Remote_Mount -# enabled. Additional variables need to be defined for this, it is recommended -# to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -- hosts: localhost - vars: - - scale_remotemount_client_gui_username: admin - - scale_remotemount_client_gui_password: Admin@GUI - - scale_remotemount_client_gui_hostname: 10.10.10.10 - - scale_remotemount_storage_gui_username: fs1 - - scale_remotemount_client_remotemount_path: "/mnt/{{ scale_remotemount_client_filesystem_name }}" - - scale_remotemount_storage_gui_username: "{{ scale_remotemount_client_gui_username }}" - - scale_remotemount_storage_gui_password: "{{ scale_remotemount_client_gui_password }}" - - scale_remotemount_storage_gui_hostname: 10.10.10.20 - - scale_remotemount_storage_filesystem_name: gpfs01 - pre_tasks: - roles: - - remote_mount - -# If Accessing/Client Cluster don't have GUI, -# Then change wee need to add variable scale_remotemount_client_no_gui: true and ansible "hosts" need to point to one of the Scale client cluster node diff --git a/samples/legacy/playbook_remotepkg.yml b/samples/legacy/playbook_remotepkg.yml deleted file mode 100644 index 540a523e..00000000 --- a/samples/legacy/playbook_remotepkg.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# -# samples/playbook_remotepkg.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using remote -# archive installation method. This means that the self-extracting archive -# containing the Spectrum Scale code is accessible on each Ansible managed node. -# Integrity of the archive will be validated by comparing checksums with a *.md5 -# reference file (if present), and subsequently the archive will be extracted. -# Packages will then be installed from the local files on each node. - -- hosts: cluster01 - vars: - - scale_install_remotepkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/playbook_repository.yml b/samples/legacy/playbook_repository.yml deleted file mode 100644 index 0de9641b..00000000 --- a/samples/legacy/playbook_repository.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -# -# samples/playbook_repository.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster using -# repository installation method. You will need to provide the URL of an -# (existing) Spectrum Scale YUM repository. Copy the contents of -# /usr/lpp/mmfs/{{ scale_version }}/* to a web server to build your repository. -# A YUM repository will be defined on each managed node in your cluster. -# Packages will then be installed from this central repository. - -# Note that specifying the variable 'scale_version' is mandatory for this -# installation method. - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - # Remember the trailing slash `/` in the URL - - scale_install_repository_url: http://server/path/ - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/playbook_storage.yml b/samples/legacy/playbook_storage.yml deleted file mode 100644 index a9b1deed..00000000 --- a/samples/legacy/playbook_storage.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# -# samples/playbook_storage.yml -# - -# Playbook sample for deploying IBM Spectrum Scale (GPFS) cluster with storage -# (NSDs) and file systems. Additional variables need to be defined for this, it -# is recommended to use Ansible group variables for this purpose: -# https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#assigning-a-variable-to-many-machines-group-variables - -# Sample definitions can be found in samples/vars/storage_vars.yml - -- hosts: cluster01 - vars: - - scale_version: 5.0.4.0 - - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install - pre_tasks: - - include_vars: storage_vars.yml - roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck diff --git a/samples/legacy/vars b/samples/legacy/vars deleted file mode 120000 index b11f011a..00000000 --- a/samples/legacy/vars +++ /dev/null @@ -1 +0,0 @@ -../vars/ \ No newline at end of file diff --git a/samples/playbook_aws.yml b/samples/playbook_aws.yml index e04b1c85..c506f37f 100644 --- a/samples/playbook_aws.yml +++ b/samples/playbook_aws.yml @@ -50,18 +50,18 @@ vars: - scale_install_directory_pkg_path: /opt/IBM/gpfs_cloud_rpms roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - gui_prepare + - gui_install + - gui_configure + - gui_verify + - perfmon_prepare + - perfmon_install + - perfmon_configure + - perfmon_verify # Cloud deployment specific actions after Spectrum Scale # cluster installation and setup diff --git a/samples/playbook_callhome.yml b/samples/playbook_callhome.yml index 650186d6..69323801 100644 --- a/samples/playbook_callhome.yml +++ b/samples/playbook_callhome.yml @@ -19,11 +19,11 @@ pre_tasks: - include_vars: callhome_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - callhome/precheck - - callhome/node - - callhome/cluster - - callhome/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - callhome_prepare + - callhome_install + - callhome_configure + - callhome_verify diff --git a/samples/playbook_ces.yml b/samples/playbook_ces.yml index c6438b3d..4d72e3fa 100644 --- a/samples/playbook_ces.yml +++ b/samples/playbook_ces.yml @@ -19,15 +19,15 @@ pre_tasks: - include_vars: ces_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - nfs/precheck - - nfs/node - - nfs/cluster - - nfs/postcheck - - smb/precheck - - smb/node - - smb/cluster - - smb/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - nfs_prepare + - nfs_install + - nfs_configure + - nfs_verify + - smb_prepare + - smb_install + - smb_configure + - smb_verify diff --git a/samples/playbook_ces_hdfs.yml b/samples/playbook_ces_hdfs.yml index 36051fb5..05094991 100644 --- a/samples/playbook_ces_hdfs.yml +++ b/samples/playbook_ces_hdfs.yml @@ -19,11 +19,11 @@ pre_tasks: - include_vars: hdfs_cluster_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - scale_hdfs/precheck - - scale_hdfs/node - - scale_hdfs/cluster - - scale_hdfs/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - hdfs_prepare + - hdfs_install + - hdfs_configure + - hdfs_verify diff --git a/samples/playbook_ces_object.yml b/samples/playbook_ces_object.yml index 860e69c9..cce30776 100644 --- a/samples/playbook_ces_object.yml +++ b/samples/playbook_ces_object.yml @@ -18,11 +18,11 @@ pre_tasks: - include_vars: scale_object_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - scale_object/precheck - - scale_object/node - - scale_object/cluster - - scale_object/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - obj_prepare + - obj_install + - obj_configure + - obj_verify diff --git a/samples/playbook_cloud.yml b/samples/playbook_cloud.yml index ee4af81a..3b562203 100644 --- a/samples/playbook_cloud.yml +++ b/samples/playbook_cloud.yml @@ -14,15 +14,15 @@ collections: - ibm.spectrum_scale roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - gui_prepare + - gui_install + - gui_configure + - gui_verify + - perfmon_prepare + - perfmon_install + - perfmon_configure + - perfmon_verify diff --git a/samples/playbook_cloud_remote_mount.yml b/samples/playbook_cloud_remote_mount.yml index 1425c3fe..432a726d 100644 --- a/samples/playbook_cloud_remote_mount.yml +++ b/samples/playbook_cloud_remote_mount.yml @@ -16,7 +16,7 @@ vars: - scale_remotemount_debug: true roles: - - remote_mount + - remotemount_configure # If Accessing/Client Cluster don't have GUI, # Then change wee need to add variable scale_remotemount_client_no_gui: true and ansible "hosts" need to point to one of the Scale client cluster node diff --git a/samples/playbook_directory.yml b/samples/playbook_directory.yml index 41ef48aa..bcac188a 100644 --- a/samples/playbook_directory.yml +++ b/samples/playbook_directory.yml @@ -16,7 +16,7 @@ vars: - scale_install_directory_pkg_path: /root/spectrum_scale_packages roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/playbook_fileauditlogging.yml b/samples/playbook_fileauditlogging.yml index 4faab376..cad4500f 100644 --- a/samples/playbook_fileauditlogging.yml +++ b/samples/playbook_fileauditlogging.yml @@ -17,11 +17,11 @@ pre_tasks: - include_vars: fal_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - scale_fileauditlogging/precheck - - scale_fileauditlogging/node - - scale_fileauditlogging/cluster - - scale_fileauditlogging/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - fal_prepare + - fal_install + - fal_configure + - fal_verify diff --git a/samples/playbook_json_ces.yml b/samples/playbook_json_ces.yml index 0f069367..305d47a5 100644 --- a/samples/playbook_json_ces.yml +++ b/samples/playbook_json_ces.yml @@ -17,31 +17,31 @@ vars: - scale_install_localpkg_path: /root/Spectrum_Scale_Advanced-5.1.0.0-x86_64-Linux-install roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck - - gui/precheck - - gui/node - - gui/cluster - - gui/postcheck - - zimon/precheck - - zimon/node - - zimon/cluster - - zimon/postcheck - - callhome/precheck - - callhome/node - - callhome/cluster - - callhome/postcheck - - nfs/precheck - - nfs/node - - nfs/cluster - - nfs/postcheck - - smb/precheck - - smb/node - - smb/cluster - - smb/postcheck - - scale_fileauditlogging/precheck - - scale_fileauditlogging/node - - scale_fileauditlogging/cluster - - scale_fileauditlogging/postcheck + - core_prepare + - core_install + - core_configure + - core_verify + - gui_prepare + - gui_install + - gui_configure + - gui_verify + - perfmon_prepare + - perfmon_install + - perfmon_configure + - perfmon_verify + - callhome_prepare + - callhome_install + - callhome_configure + - callhome_verify + - nfs_prepare + - nfs_install + - nfs_configure + - nfs_verify + - smb_prepare + - smb_install + - smb_configure + - smb_verify + - fal_prepare + - fal_install + - fal_configure + - fal_verify diff --git a/samples/playbook_localpkg.yml b/samples/playbook_localpkg.yml index 3599b6e9..eda29d91 100644 --- a/samples/playbook_localpkg.yml +++ b/samples/playbook_localpkg.yml @@ -18,7 +18,7 @@ vars: - scale_install_localpkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/playbook_nodeclass.yml b/samples/playbook_nodeclass.yml index e5575131..b6b04d1d 100644 --- a/samples/playbook_nodeclass.yml +++ b/samples/playbook_nodeclass.yml @@ -42,7 +42,7 @@ pre_tasks: - include_vars: config_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/playbook_remote_mount.yml b/samples/playbook_remote_mount.yml index 9655b655..1584e652 100644 --- a/samples/playbook_remote_mount.yml +++ b/samples/playbook_remote_mount.yml @@ -22,7 +22,7 @@ - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs2", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables - { scale_remotemount_client_filesystem_name: "fs3", scale_remotemount_client_remotemount_path: "/gpfs/fs3", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } roles: - - remote_mount + - remotemount_configure # If Accessing/Client Cluster don't have GUI, # Then change wee need to add variable scale_remotemount_client_no_gui: true and ansible "hosts" need to point to one of the Scale client cluster node diff --git a/samples/playbook_remote_mount_cli.yml b/samples/playbook_remote_mount_cli.yml index 43414e94..bf250ca8 100644 --- a/samples/playbook_remote_mount_cli.yml +++ b/samples/playbook_remote_mount_cli.yml @@ -23,4 +23,4 @@ - { scale_remotemount_client_filesystem_name: "fs1", scale_remotemount_client_remotemount_path: "/gpfs/fs1", scale_remotemount_storage_filesystem_name: "gpfs01", } # Minimum variables - { scale_remotemount_client_filesystem_name: "fs2", scale_remotemount_client_remotemount_path: "/gpfs/fs1", scale_remotemount_storage_filesystem_name: "gpfs02", scale_remotemount_client_mount_priority: '2', scale_remotemount_access_mount_attributes: "rw", scale_remotemount_client_mount_fs: "yes" } roles: - - remote_mount + - remotemount_configure diff --git a/samples/playbook_remotepkg.yml b/samples/playbook_remotepkg.yml index 6b4a94ab..84fad65c 100644 --- a/samples/playbook_remotepkg.yml +++ b/samples/playbook_remotepkg.yml @@ -16,7 +16,7 @@ vars: - scale_install_remotepkg_path: /root/Spectrum_Scale_Standard-5.0.4.0-x86_64-Linux-install roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/playbook_repository.yml b/samples/playbook_repository.yml index e28ec5ad..079164d4 100644 --- a/samples/playbook_repository.yml +++ b/samples/playbook_repository.yml @@ -21,7 +21,7 @@ # Remember the trailing slash `/` in the URL - scale_install_repository_url: http://server/path/ roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/playbook_storage.yml b/samples/playbook_storage.yml index 644bc84a..13c40d4c 100644 --- a/samples/playbook_storage.yml +++ b/samples/playbook_storage.yml @@ -19,7 +19,7 @@ pre_tasks: - include_vars: storage_vars.yml roles: - - core/precheck - - core/node - - core/cluster - - core/postcheck + - core_prepare + - core_install + - core_configure + - core_verify diff --git a/samples/set_json_variables.yml b/samples/set_json_variables.yml index 2b8386af..b550d74c 100644 --- a/samples/set_json_variables.yml +++ b/samples/set_json_variables.yml @@ -36,8 +36,9 @@ scale_cluster_manager: "{{ item.is_manager_node | default(false) }}" scale_cluster_gui: "{{ item.is_gui_server | default(false) }}" scale_zimon_collector: "{{ item.is_collector_node | default(false) }}" + scale_nsd_server: "{{ item.is_nsd_server | default(false) }}" state: "{{ item.state | default('present') }}" - is_admin_node: "{{ item.is_admin_node | default('true') }}" + scale_admin_node: "{{ item.is_admin_node | default('true') }}" scale_nodeclass: "{{ item.scale_nodeclass | default(omit) }}" scale_config: "{{ scale_config | default(omit) }}" ansible_ssh_private_key_file: "{{ item.ansible_ssh_private_key_file | default(omit) }}" @@ -58,7 +59,7 @@ scale_remotemount_storage_gui_hostname: "{{ scale_remotemount.storage_gui_hostname | default(omit) }}" scale_remotemount_storage_filesystem_name: "{{ scale_remotemount.storage_filesystem_name | default(omit) }}" scale_sync_replication_config: "{{ scale_cluster.scale_sync_replication_config | default(false) }}" - is_protocol_node: "{{ item.is_protocol_node | default(false) }}" + scale_protocol_node: "{{ item.is_protocol_node | default(false) }}" scale_callhome_params: "{{ scale_callhome_params | default(omit) }}" scale_protocols: "{{ scale_protocols | default(omit) }}" scale_hdfs_cluster: "{{ scale_hdfs_cluster | default(omit) }}"