Skip to content

Commit

Permalink
Baremetal e2e scripts (#248)
Browse files Browse the repository at this point in the history
Changes for baremetal

Co-authored-by: jdowni000 <[email protected]>
Co-authored-by: Marko Karg <[email protected]>
Co-authored-by: jdowni000 <[email protected]>
Co-authored-by: Raul Sevilla <[email protected]>
  • Loading branch information
5 people authored Oct 28, 2021
1 parent dbe4a1a commit dd11ead
Show file tree
Hide file tree
Showing 18 changed files with 606 additions and 25 deletions.
6 changes: 5 additions & 1 deletion utils/benchmark-operator.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,11 @@ install_cli() {
ripsaw_tmp=/tmp/ripsaw-cli
mkdir -p ${ripsaw_tmp}
if [[ ! -f ${ripsaw_tmp}/bin/activate ]]; then
python -m venv ${ripsaw_tmp}
if [[ "${isBareMetal}" == "true" ]]; then
python3.8 -m venv ${ripsaw_tmp}
else
python -m venv ${ripsaw_tmp}
fi
fi
source ${ripsaw_tmp}/bin/activate
pip3 install -U "git+https://github.com/cloud-bulldozer/benchmark-operator.git/#egg=ripsaw-cli&subdirectory=cli"
Expand Down
1 change: 0 additions & 1 deletion workloads/kube-burner/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -154,4 +154,3 @@ snappy_backup() {
../../utils/snappy-move-results/run_snappy.sh metadata.json $snappy_path
store_on_elastic
}

60 changes: 42 additions & 18 deletions workloads/network-perf/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,47 @@ check_cluster_health() {


export_defaults() {
network_type=$(oc get network cluster -o jsonpath='{.status.networkType}' | tr '[:upper:]' '[:lower:]')
network_type=$(oc get network cluster -o jsonpath='{.status.networkType}' | tr '[:upper:]' '[:lower:]')
export client_server_pairs=(1 2 4)
export CR_NAME=${BENCHMARK:=benchmark}
export baremetalCheck=$(oc get infrastructure cluster -o json | jq .spec.platformSpec.type)
zones=($(oc get nodes -l node-role.kubernetes.io/workload!=,node-role.kubernetes.io/infra!=,node-role.kubernetes.io/worker -o go-template='{{ range .items }}{{ index .metadata.labels "topology.kubernetes.io/zone" }}{{ "\n" }}{{ end }}' | uniq))
platform=$(oc get infrastructure cluster -o jsonpath='{.status.platformStatus.type}' | tr '[:upper:]' '[:lower:]')
log "Platform is found to be: ${platform} "
# If MULTI_AZ we use one node from the two first AZs
if [[ ${platform} == "vsphere" ]]; then

#Check to see if the infrastructure type is baremetal to adjust script as necessary
if [[ "${baremetalCheck}" == '"BareMetal"' ]]; then
log "BareMetal infastructure: setting isBareMetal accordingly"
export isBareMetal=true
else
export isBareMetal=false
fi

#If using baremetal we use different query to find worker nodes
if [[ "${isBareMetal}" == "true" ]]; then
#Installing python3.8
sudo yum -y install python3.8

nodeCount=$(oc get nodes --no-headers -l node-role.kubernetes.io/worker | wc -l)
if [[ ${nodeCount} -ge 2 ]]; then
serverNumber=$(( $RANDOM %${nodeCount} + 1 ))
clientNumber=$(( $RANDOM %${nodeCount} + 1 ))
while (( $serverNumber == $clientNumber ))
do
clientNumber=$(( $RANDOM %${nodeCount} + 1 ))
done
export server=$(oc get nodes --no-headers -l node-role.kubernetes.io/worker | awk 'NR=='${serverNumber}'{print $1}')
export client=$(oc get nodes --no-headers -l node-role.kubernetes.io/worker | awk 'NR=='${clientNumber}'{print $1}')
else
log "Colocating uperf pods for baremetal, since only one worker node available"
export server=$(oc get nodes --no-headers -l node-role.kubernetes.io/worker | awk 'NR=='1'{print $1}')
export client=$(oc get nodes --no-headers -l node-role.kubernetes.io/worker | awk 'NR=='1'{print $1}')
fi
log "Finished assigning server and client nodes"
log "Server to be scheduled on node: $server"
log "Client to be scheduled on node: $client"
# If multi_az we use one node from the two first AZs
elif [[ ${platform} == "vsphere" ]]; then
nodes=($(oc get nodes -l node-role.kubernetes.io/worker,node-role.kubernetes.io/workload!="",node-role.kubernetes.io/infra!="" -o jsonpath='{range .items[*]}{ .metadata.labels.kubernetes\.io/hostname}{"\n"}{end}'))
if [[ ${#nodes[@]} -lt 2 ]]; then
log "At least 2 worker nodes placed are required"
Expand Down Expand Up @@ -52,20 +86,6 @@ export_defaults() {
export client=${nodes[1]}
fi

if [ ${WORKLOAD} == "hostnet" ]
then
export hostnetwork=true
export serviceip=false
elif [ ${WORKLOAD} == "service" ]
then
export _metadata_targeted=false
export hostnetwork=false
export serviceip=true
else
export hostnetwork=false
export serviceip=false
fi

if [[ -z "$GSHEET_KEY_LOCATION" ]]; then
export GSHEET_KEY_LOCATION=$HOME/.secrets/gsheet_key.json
fi
Expand Down Expand Up @@ -154,15 +174,18 @@ assign_uuid() {
}

run_benchmark_comparison() {
log "Begining benchamrk comparison"
../../utils/touchstone-compare/run_compare.sh uperf ${baseline_uperf_uuid} ${compare_uperf_uuid} ${pairs}
pairs_array=( "${pairs_array[@]}" "compare_output_${pairs}.yaml" )
log "Finished benchmark comparison"
}

generate_csv() {
log "Generating CSV"
python3 csv_gen.py --files $(echo "${pairs_array[@]}") --latency_tolerance=$latency_tolerance --throughput_tolerance=$throughput_tolerance
log "Finished generating CSV"
}


get_gold_ocp_version(){
current_version=`oc get clusterversion | grep -o [0-9.]* | head -1 | cut -c 1-3`
export GOLD_OCP_VERSION=$( bc <<< "$current_version - 0.1" )
Expand All @@ -188,3 +211,4 @@ python3 -m pip install -r requirements.txt | grep -v 'already satisfied'
export_defaults
check_cluster_health
deploy_operator

3 changes: 2 additions & 1 deletion workloads/network-perf/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
gspread
gspread-formatting
oauth2client
pyyaml
PyYAML>=5.4.1
make
2 changes: 1 addition & 1 deletion workloads/network-perf/ripsaw-uperf-crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
apiVersion: ripsaw.cloudbulldozer.io/v1alpha1
kind: Benchmark
metadata:
name: uperf-benchmark-${WORKLOAD}-network-${pairs}
name: uperf-${CR_NAME}-${WORKLOAD}-network-${pairs}
namespace: benchmark-operator
spec:
uuid: ${UUID}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,4 @@ generate_csv
if [[ ${ENABLE_SNAPPY_BACKUP} == "true" ]] ; then
snappy_backup network_perf_hostnetwork_test
fi
log "Finished workload ${0}"
2 changes: 1 addition & 1 deletion workloads/network-perf/run_multus_network_tests_fromgit.sh
Original file line number Diff line number Diff line change
Expand Up @@ -114,4 +114,4 @@ fi
# Cleanup
rm -rf /tmp/benchmark-operator
rm -f compare_output_*.yaml
exit 0
log "Finished workload ${0}"
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,4 @@ for pairs in 1 2 4; do
run_benchmark_comparison
done
generate_csv
log "Finished workload ${0}"
1 change: 1 addition & 0 deletions workloads/network-perf/run_pod_network_test_fromgit.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,4 @@ generate_csv
if [[ ${ENABLE_SNAPPY_BACKUP} == "true" ]] ; then
snappy_backup network_perf_pod_network_test
fi
log "Finished workload ${0}"
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ export NETWORK_POLICY=true

source ./common.sh
export SERVICEIP=true
if [[ "${isBareMetal}" == "true" ]]; then
export METADATA_TARGETED=true
fi

for pairs in 1 2 4; do
export pairs=${pairs}
Expand All @@ -15,3 +18,4 @@ for pairs in 1 2 4; do
run_benchmark_comparison
done
generate_csv
log "Finished workload ${0}"
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,4 @@ generate_csv
if [[ ${ENABLE_SNAPPY_BACKUP} == "true" ]] ; then
snappy_backup network_perf_serviceip_network_test
fi
log "Finished workload ${0}"
2 changes: 1 addition & 1 deletion workloads/network-perf/smoke_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,11 @@ if [[ ${ENABLE_SNAPPY_BACKUP} == "true" ]] ; then
tar -zcvf snappy_files.tar.gz ./files_list

export workload=network_perf_smoke_test

export snappy_path="$SNAPPY_USER_FOLDER/$runid$platform-$cluster_version-$network_type/$workload/$folder_date_time/"
generate_metadata > metadata.json
../../utils/snappy-move-results/run_snappy.sh snappy_files.tar.gz $snappy_path
../../utils/snappy-move-results/run_snappy.sh metadata.json $snappy_path
store_on_elastic
rm -rf files_list
fi
log "Finished workload ${0}"
Loading

0 comments on commit dd11ead

Please sign in to comment.