Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor the autoscaling tests into a telemtry_autoscaling role in fvt #3

Draft
wants to merge 17 commits into
base: dev_osp18
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
86 changes: 86 additions & 0 deletions playbooks/autoscaling_osp18.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
---
- hosts: localhost
become: no
name: Run the autoscaling tests
# NOTE: These vars should be eventually moved into a vars file.
vars:
openstack_cmd: "oc rsh openstackclient openstack"
metrics_backend: "prometheus"
stack_image: "cirros"
stack_flavor: "m1.small"
stack_network: "private"
stack_external_network: "public"
# this is temperory and should be replaced later by some heat stack or some ansible
fvt_dir: "{{ playbook_dir }}/feature-verification-tests"
tasks:
# this is temperory
- name: Clone feature-verification repository
git:
repo: http://github.com/infrawatch/feature-verification-tests
dest: "{{ fvt_dir }}"
version: efoley-add_telemetry_autoscaling

- ansible.builtin.command:
cmd: git clone http://github.com/openstack-k8s-operators/install_yamls
chdir: "{{ playbook_dir }}"
creates: "{{ playbook_dir }}/install_yamls"
tags:
- setup

- community.general.make:
chdir: "{{ playbook_dir }}/install_yamls/devsetup"
target: edpm_deploy_instance
tags:
- setup

- name: Patch the openstackversions to use the master containers for aodh and heat
ansible.builtin.shell:
cmd: |
oc patch openstackversions openstack-galera-network-isolation --type merge --patch-file patch.yaml
tags:
- setup

- name: patch observabilityclient into openstackclient
shell:
cmd: |
oc exec openstackclient -- python3 -m ensurepip --upgrade
oc exec openstackclient -- python3 -m pip install --upgrade aodhclient
oc exec openstackclient -- python3 -m pip install --upgrade python-observabilityclient
tags:
- setup

- name: Wait until the oscp is resolved the changes to continue
ansible.builtin.shell:
cmd: |
oc get oscp | grep "Setup complete"
retries: 24
timeout: 5
until: output.stdout_lines | length == 1
register: output
tags:
- setup

# NOTE: the tags are for testing/development, eventually, the role will just be imported, and main.yml will run through the tasks
- import_role:
name: '{{ fvt_dir }}/roles/telemetry_autoscaling'
tasks_from: 'verify_autoscaling'
tags:
- precheck
- import_role:
name: '{{ fvt_dir }}/roles/telemetry_autoscaling'
tasks_from: configure_heat
#tasks_from: creating_stack
tags:
- create

- import_role:
name: '{{ fvt_dir }}/roles/telemetry_autoscaling'
tasks_from: creating_stack
tags:
- create

- import_role:
name: '{{ fvt_dir }}/roles/telemetry_autoscaling'
tasks_from: test_autoscaling
tags:
- test
178 changes: 3 additions & 175 deletions playbooks/configure_heat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,181 +6,9 @@
- hosts: undercloud
become: no
name: Using the heat service for autoscaling
vars:
openstack_cmd: "{{ openstack_cmd if openstack_cmd is defined else 'openstack' }}"
stack_name: "{{ stack_name if stack_name is defined else 'vnf' }}"
tasks:

- name: Create the generic archive policy for autoscaling
shell: |
source ~/overcloudrc;
{{ openstack_cmd }} metric archive-policy create generic \
--back-window 0 \
--definition timespan:'4:00:00',granularity:'0:01:00',points:240 \
--aggregation-method 'rate:mean' \
--aggregation-method 'mean';
register: result
failed_when: result.rc >= 1

- name: Verify that the archive policy was created
shell: |
source ~/overcloudrc;
{{ openstack_cmd }} metric archive-policy show generic;
register: result
failed_when: result.rc >= 1

- name: Create "vnf" directory under templates
shell: |
mkdir -p $HOME/templates/autoscaling/vnf/

- name: Configure heat template for automatically scaling instances
copy:
dest: ~/templates/autoscaling/vnf/instance.yaml
content: |
heat_template_version: wallaby
description: Template to control scaling of VNF instance

parameters:
metadata:
type: json
image:
type: string
description: image used to create instance
default: workload_image_1
flavor:
type: string
description: instance flavor to be used
default: workload_flavor_1
key_name:
type: string
description: keypair to be used
default: workload_key_1
network:
type: string
description: project network to attach instance to
default: workload_internal_net_1
external_network:
type: string
description: network used for floating IPs
default: public

resources:
vnf:
type: OS::Nova::Server
properties:
flavor: {get_param: flavor}
key_name: {get_param: key_name}
image: { get_param: image }
metadata: { get_param: metadata }
networks:
- port: { get_resource: port }

port:
type: OS::Neutron::Port
properties:
network: {get_param: network}
security_groups:
- workload_secgroup_1

floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: {get_param: external_network }

floating_ip_assoc:
type: OS::Neutron::FloatingIPAssociation
properties:
floatingip_id: { get_resource: floating_ip }
port_id: { get_resource: port }

- name: Create the resource to reference in the heat template
copy:
dest: ~/templates/autoscaling/vnf/resources.yaml
content: |
resource_registry:
"OS::Nova::Server::VNF": /home/stack/templates/autoscaling/vnf/instance.yaml

- name: Create the deployment template for heat to control instance scaling
copy:
dest: ~/templates/autoscaling/vnf/template.yaml
content: |
heat_template_version: wallaby
description: Example auto scale group, policy and alarm
resources:
scaleup_group:
type: OS::Heat::AutoScalingGroup
properties:
max_size: 3
min_size: 1
#desired_capacity: 1
resource:
type: OS::Nova::Server::VNF
properties:
metadata: {"metering.server_group": {get_param: "OS::stack_id"}}

scaleup_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: scaleup_group }
cooldown: 60
scaling_adjustment: 1

scaledown_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: scaleup_group }
cooldown: 60
scaling_adjustment: -1

cpu_alarm_high:
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
properties:
description: Scale up instance if CPU > 50%
metric: cpu
aggregation_method: rate:mean
granularity: 300
evaluation_periods: 1
threshold: 30000000000.0
resource_type: instance
comparison_operator: gt
alarm_actions:
- str_replace:
template: trust+url
params:
url: {get_attr: [scaleup_policy, signal_url]}
query:
list_join:
- ''
- - {'=': {server_group: {get_param: "OS::stack_id"}}}

cpu_alarm_low:
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
properties:
description: Scale down instance if CPU < 20%
metric: cpu
aggregation_method: rate:mean
granularity: 300
evaluation_periods: 1
threshold: 12000000000.0
resource_type: instance
comparison_operator: lt
alarm_actions:
- str_replace:
template: trust+url
params:
url: {get_attr: [scaledown_policy, signal_url]}
query:
list_join:
- ''
- - {'=': {server_group: {get_param: "OS::stack_id"}}}

outputs:
scaleup_policy_signal_url:
value: {get_attr: [scaleup_policy, alarm_url]}

scaledown_policy_signal_url:
value: {get_attr: [scaledown_policy, alarm_url]}
- import_role:
name: '../roles/telemetry_autoscaling'
tasks_from: configure_heat

...
92 changes: 3 additions & 89 deletions playbooks/creating_stack.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,95 +6,9 @@
- hosts: undercloud
become: no
name: Creating the stack deployment for autoscaling
vars:
openstack_cmd: "{{ openstack_cmd if openstack_cmd is defined else 'openstack' }}"
stack_name: "{{ stack_name if stack_name is defined else 'vnf' }}"
tasks:

- name: Create the stack
shell: |
source ~/overcloudrc;
{{ openstack_cmd }} stack create \
-t $HOME/templates/autoscaling/vnf/template.yaml \
-e $HOME/templates/autoscaling/vnf/resources.yaml \
{{ stack_name }};
register: result
failed_when: result.rc >= 1

- name: Wait for 60 sec
pause:
minutes: 1

- name: Verify that the stack was created successfully
shell: |
source ~/overcloudrc;
{{ openstack_cmd }} stack show {{ stack_name }} -c id -c stack_status;
register: result
failed_when: '"CREATE_COMPLETE" not in result.stdout'

- name: Verify that the stack resources are created
shell: |
source ~/overcloudrc;
export STACK_ID=$({{ openstack_cmd }} stack show {{ stack_name }} -c id -f value);
{{ openstack_cmd }} stack resource list $STACK_ID;
register: result
failed_when: '"CREATE_COMPLETE" not in result.stdout'

- name: Verify that an instance was launched by the stack creation
shell: |
source ~/overcloudrc;
export STACK_ID=$({{ openstack_cmd }} stack show {{ stack_name }} -c id -f value);
{{ openstack_cmd }} server list --long | grep $STACK_ID;
register: result
failed_when: result.rc >= 1

- name: Verify that the alarms were created for the stack
shell: |
source ~/overcloudrc;
{{ openstack_cmd }} alarm list
register: result
failed_when: result.rc >= 1

- name: Note the physical_resource_id values for the cpu_alarm_low resource
shell: |
source ~/overcloudrc;
export STACK_ID=$({{ openstack_cmd }} stack show {{ stack_name }} -c id -f value);
{{ openstack_cmd }} stack resource list $STACK_ID |grep -i cpu_alarm_low | awk '{print $4}'
register: physical_resource_id_low

- name: Note the physical_resource_id values for the cpu_alarm_high resource
shell: |
source ~/overcloudrc;
export STACK_ID=$({{ openstack_cmd }} stack show {{ stack_name }} -c id -f value);
{{ openstack_cmd }} stack resource list $STACK_ID |grep -i cpu_alarm_high | awk '{print $4}'
register: physical_resource_id_high

- name: Verify physical_resource_id match the alarm id for cpu_alarm_low
shell: |
source ~/overcloudrc;
{{ openstack_cmd }} alarm list |grep -i cpu_alarm_low | awk '{print $2}'
register: alarm_id_low
failed_when:
- physical_resource_id_low.stdout != alarm_id_low.stdout

- name: Verify physical_resource_id match the alarm id for cpu_alarm_high
shell: |
source ~/overcloudrc;
{{ openstack_cmd }} alarm list |grep -i cpu_alarm_high | awk '{print $2}'
register: alarm_id_high
failed_when:
- physical_resource_id_high.stdout != alarm_id_high.stdout

- name: Verify that metric resources exist for the stack
shell: |
source ~/overcloudrc;
export STACK_ID=$({{ openstack_cmd }} stack show {{ stack_name }} -c id -f value);
{{ openstack_cmd }} metric resource search \
--sort-column launched_at -c id \
-c display_name -c launched_at \
-c deleted_at --type instance \
server_group="$STACK_ID"
register: result
failed_when: result.rc >= 1
- include_role:
name: '../roles/telemetry_autoscaling'
tasks_from: creating_stack

...
9 changes: 9 additions & 0 deletions playbooks/patch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
spec:
customContainerImages:
aodhAPIImage: quay.io/podified-master-centos9/openstack-aodh-api:current-podified
aodhEvaluatorImage: quay.io/podified-master-centos9/openstack-aodh-evaluator:current-podified
aodhListenerImage: quay.io/podified-master-centos9/openstack-aodh-listener:current-podified
aodhNotifierImage: quay.io/podified-master-centos9/openstack-aodh-notifier:current-podified
heatAPIImage: quay.io/podified-master-centos9/openstack-heat-api:current-podified
heatCfnapiImage: quay.io/podified-master-centos9/openstack-heat-api-cfn:current-podified
heatEngineImage: quay.io/podified-master-centos9/openstack-heat-engine:current-podified
Loading