diff --git a/.github/actions/cpUtility-testing/action.yml b/.github/actions/cpUtility-testing/action.yml new file mode 100644 index 0000000000..e01f9d1f7a --- /dev/null +++ b/.github/actions/cpUtility-testing/action.yml @@ -0,0 +1,67 @@ +name: Build and Push Java-Agent Image after CPUtility Test +description: | + cpUtility Testing + This action assumes that Repo was checked out and Java was set correctly + +inputs: + aws-region: + required: true + description: "AWS Region" + image_uri_with_tag: + required: true + description: "Image URI with Tag" + image_registry: + required: true + description: "Image Registry" + adot-java-version: + required: true + description: "ADOT Java Version" + snapshot-ecr-role: + require: true + description: "IAM Role used for pushing to snapshot ecr" + + +runs: + using: "composite" + steps: + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ inputs.snapshot-ecr-role }} + aws-region: ${{ inputs.aws-region }} + + - name: Login to private staging ecr + uses: docker/login-action@v3 + with: + registry: ${{ inputs.image_registry }} + env: + AWS_REGION: ${{ inputs.aws-region }} + + - name: Build image for testing + uses: docker/build-push-action@v5 + with: + push: false + build-args: "ADOT_JAVA_VERSION=${{ inputs.adot-java-version }}" + context: . + platforms: linux/amd64 + tags: ${{ inputs.image_uri_with_tag }} + load: true + + - name: Test docker image + shell: bash + run: .github/scripts/test-adot-javaagent-image.sh "${{ inputs.image_uri_with_tag }}" "${{ inputs.adot-java-version }}" + + - name: Build and push image + uses: docker/build-push-action@v5 + with: + push: true + build-args: "ADOT_JAVA_VERSION=${{ inputs.adot-java-version }}" + context: . + platforms: linux/amd64,linux/arm64 + tags: ${{ inputs.image_uri_with_tag }} \ No newline at end of file diff --git a/.github/actions/patch-dependencies/action.yml b/.github/actions/patch-dependencies/action.yml index c144483dc1..048c480540 100644 --- a/.github/actions/patch-dependencies/action.yml +++ b/.github/actions/patch-dependencies/action.yml @@ -13,33 +13,52 @@ inputs: default: "false" required: false description: "If the workflow should run tests of the dependencies. Anything different than false will evaluate to true" - branch: - required: true - description: "The branch where this patches are being applied e.g.: release/v1.21.x" gpg_private_key: description: "The gpg key used to sign the artifacts" - required: true + required: false gpg_password: description: "The gpg key password" - required: true + required: false runs: using: "composite" steps: + - name: set environment variables + env: + INPUT_KEY: ${{ inputs.gpg_private_key }} + INPUT_PASSWORD: ${{ inputs.gpg_password }} + shell: bash + run: | + EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) + if [[ ! -z "$INPUT_KEY" ]]; then + { + echo "GPG_PRIVATE_KEY<<$EOF" + echo "$INPUT_KEY" + echo "$EOF" + } >> "$GITHUB_ENV" + fi + if [[ ! -z "$INPUT_PASSWORD" ]]; then + { + echo "GPG_PASSWORD<<$EOF" + echo "$INPUT_PASSWORD" + echo "$EOF" + } >> "$GITHUB_ENV" + fi + - name: check patches run: | - if [[ -f .github/patches/${{ inputs.branch }}/opentelemetry-java.patch ]]; then + if [[ -f .github/patches/opentelemetry-java.patch ]]; then echo 'patch_otel_java=true' >> $GITHUB_ENV fi - if [[ -f .github/patches/${{ inputs.branch }}/opentelemetry-java-instrumentation.patch ]]; then + if [[ -f .github/patches/opentelemetry-java-instrumentation.patch ]]; then echo 'patch_otel_java_instrumentation=true' >> $GITHUB_ENV fi - if [[ -f .github/patches/${{ inputs.branch }}/opentelemetry-java-contrib.patch ]]; then + if [[ -f .github/patches/opentelemetry-java-contrib.patch ]]; then echo 'patch_otel_java_contrib=true' >> $GITHUB_ENV fi shell: bash - name: Clone and patch repositories - run: .github/scripts/patch.sh "${{ inputs.branch }}" + run: .github/scripts/patch.sh if: ${{ env.patch_otel_java == 'true' || env.patch_otel_java_instrumentation == 'true' || env.patch_otel_java_contrib == 'true' }} @@ -51,9 +70,6 @@ runs: with: arguments: build publishToMavenLocal build-root-directory: opentelemetry-java - env: - GPG_PRIVATE_KEY: ${{ inputs.gpg_private_key }} - GPG_PASSWORD: ${{ inputs.gpg_password }} - name: Build opentelemetry-java uses: gradle/gradle-build-action@v2 @@ -61,9 +77,6 @@ runs: with: arguments: publishToMavenLocal build-root-directory: opentelemetry-java - env: - GPG_PRIVATE_KEY: ${{ inputs.gpg_private_key }} - GPG_PASSWORD: ${{ inputs.gpg_password }} - name: cleanup opentelemetry-java run: rm -rf opentelemetry-java @@ -76,9 +89,6 @@ runs: with: arguments: build publishToMavenLocal build-root-directory: opentelemetry-java-contrib - env: - GPG_PRIVATE_KEY: ${{ inputs.gpg_private_key }} - GPG_PASSWORD: ${{ inputs.gpg_password }} - name: Build opentelemetry-java-contrib uses: gradle/gradle-build-action@v2 @@ -86,9 +96,6 @@ runs: with: arguments: publishToMavenLocal build-root-directory: opentelemetry-java-contrib - env: - GPG_PRIVATE_KEY: ${{ inputs.gpg_private_key }} - GPG_PASSWORD: ${{ inputs.gpg_password }} - name: cleanup opentelemetry-java-contrib run: rm -rf opentelemetry-java-contrib @@ -101,9 +108,6 @@ runs: with: arguments: check -x spotlessCheck publishToMavenLocal build-root-directory: opentelemetry-java-instrumentation - env: - GPG_PRIVATE_KEY: ${{ inputs.gpg_private_key }} - GPG_PASSWORD: ${{ inputs.gpg_password }} - name: Build opentelemetry java instrumentation uses: gradle/gradle-build-action@v2 @@ -111,9 +115,6 @@ runs: with: arguments: publishToMavenLocal build-root-directory: opentelemetry-java-instrumentation - env: - GPG_PRIVATE_KEY: ${{ inputs.gpg_private_key }} - GPG_PASSWORD: ${{ inputs.gpg_password }} - name: cleanup opentelmetry-java-instrumentation run: rm -rf opentelemetry-java-instrumentation diff --git a/.github/scripts/patch.sh b/.github/scripts/patch.sh index ecbb47fc72..7bbfc7356a 100755 --- a/.github/scripts/patch.sh +++ b/.github/scripts/patch.sh @@ -2,10 +2,7 @@ # Enable debug mode, fail on any command that fail in this script and fail on unset variables set -x -e -u -# This parameter will help find the patches to be applied -BRANCH=$1 - -# .github/patches/$BRANCH/versions.sh should define all the versions of the dependencies that we are going to patch +# .github/patches/versions.sh should define all the versions of the dependencies that we are going to patch # This is used so that we can properly clone the upstream repositories. # This file should define the following variables: # OTEL_JAVA_VERSION. Tag of the opentelemetry-java repository to use. E.g.: JAVA_OTEL_JAVA_VERSION=v1.21.0 @@ -13,18 +10,18 @@ BRANCH=$1 # OTEL_JAVA_CONTRIB_VERSION. Tag of the opentelemetry-java-contrib repository. E.g.: OTEL_JAVA_CONTRIB_VERSION=v1.21.0 # This script will fail if a variable that is supposed to exist is referenced. -if [[ ! -f .github/patches/${BRANCH}/versions ]]; then +if [[ ! -f .github/patches/versions ]]; then echo "No versions file found. Skipping patching" exit 0 fi -source .github/patches/${BRANCH}/versions +source .github/patches/versions git config --global user.email "adot-patch-workflow@github.com" git config --global user.name "ADOT Patch workflow" -OTEL_JAVA_PATCH=".github/patches/${BRANCH}/opentelemetry-java.patch" +OTEL_JAVA_PATCH=".github/patches/opentelemetry-java.patch" if [[ -f "$OTEL_JAVA_PATCH" ]]; then git clone https://github.com/open-telemetry/opentelemetry-java.git cd opentelemetry-java @@ -37,7 +34,7 @@ else fi -OTEL_JAVA_CONTRIB_PATCH=".github/patches/${BRANCH}/opentelemetry-java-contrib.patch" +OTEL_JAVA_CONTRIB_PATCH=".github/patches/opentelemetry-java-contrib.patch" if [[ -f "$OTEL_JAVA_CONTRIB_PATCH" ]]; then git clone https://github.com/open-telemetry/opentelemetry-java-contrib.git cd opentelemetry-java-contrib @@ -50,7 +47,7 @@ else fi -OTEL_JAVA_INSTRUMENTATION_PATCH=".github/patches/${BRANCH}/opentelemetry-java-instrumentation.patch" +OTEL_JAVA_INSTRUMENTATION_PATCH=".github/patches/opentelemetry-java-instrumentation.patch" if [[ -f "$OTEL_JAVA_INSTRUMENTATION_PATCH" ]]; then git clone https://github.com/open-telemetry/opentelemetry-java-instrumentation.git cd opentelemetry-java-instrumentation diff --git a/.github/workflows/appsignals-e2e-ec2-test.yml b/.github/workflows/appsignals-e2e-ec2-test.yml index 6d1334e15c..428c8ad02e 100644 --- a/.github/workflows/appsignals-e2e-ec2-test.yml +++ b/.github/workflows/appsignals-e2e-ec2-test.yml @@ -17,10 +17,12 @@ permissions: contents: read env: - AWS_DEFAULT_REGION: ${{ inputs.aws-region }} # Used by terraform and AWS CLI commands + # The precense of this env var is required for use by terraform and AWS CLI commands + # It is not redundant + AWS_DEFAULT_REGION: ${{ inputs.aws-region }} TEST_ACCOUNT: ${{ secrets.APP_SIGNALS_E2E_TEST_ACC }} - SAMPLE_APP_FRONTEND_SERVICE_JAR: "s3://aws-appsignals-sample-app/main-service.jar" - SAMPLE_APP_REMOTE_SERVICE_JAR: "s3://aws-appsignals-sample-app/remote-service.jar" + SAMPLE_APP_FRONTEND_SERVICE_JAR: ${{ secrets.APP_SIGNALS_E2E_FE_SA_JAR }} + SAMPLE_APP_REMOTE_SERVICE_JAR: ${{ secrets.APP_SIGNALS_E2E_RE_SA_JAR }} APP_SIGNALS_ADOT_JAR: "https://github.com/aws-observability/aws-otel-java-instrumentation/releases/latest/download/aws-opentelemetry-agent.jar" METRIC_NAMESPACE: AppSignals LOG_GROUP_NAME: /aws/appsignals/generic @@ -33,14 +35,18 @@ jobs: with: fetch-depth: 0 + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + - name: Set CW Agent RPM environment variable run: | if [ ${{ env.AWS_DEFAULT_REGION }} == "us-east-1" ]; then - echo APP_SIGNALS_CW_AGENT_RPM="https://amazoncloudwatch-agent-us-east-1.s3.amazonaws.com/amazon_linux/amd64/1.300031.0b313/amazon-cloudwatch-agent.rpm" >> $GITHUB_ENV + echo GET_CW_AGENT_RPM_COMMAND="wget -O cw-agent.rpm https://amazoncloudwatch-agent-us-east-1.s3.amazonaws.com/amazon_linux/amd64/1.300031.0b313/amazon-cloudwatch-agent.rpm" >> $GITHUB_ENV else - echo APP_SIGNALS_CW_AGENT_RPM="https://amazoncloudwatch-agent-${{ env.AWS_DEFAULT_REGION }}.s3.${{ env.AWS_DEFAULT_REGION }}.amazonaws.com/amazon_linux/amd64/1.300031.0b313/amazon-cloudwatch-agent.rpm" >> $GITHUB_ENV + echo GET_CW_AGENT_RPM_COMMAND="wget -O cw-agent.rpm https://amazoncloudwatch-agent-${{ env.AWS_DEFAULT_REGION }}.s3.${{ env.AWS_DEFAULT_REGION }}.amazonaws.com/amazon_linux/amd64/1.300031.0b313/amazon-cloudwatch-agent.rpm" >> $GITHUB_ENV fi - - name: Generate testing id run: echo TESTING_ID="${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV @@ -51,6 +57,24 @@ jobs: role-to-assume: ${{ secrets.E2E_TEST_ROLE_ARN }} aws-region: ${{ env.AWS_DEFAULT_REGION }} + - uses: actions/download-artifact@v3 + if: inputs.caller-workflow-name == 'main-build' + with: + name: aws-opentelemetry-agent.jar + + - name: Upload main-build adot.jar to s3 + if: inputs.caller-workflow-name == 'main-build' + run: aws s3 cp ./aws-opentelemetry-agent-*-SNAPSHOT.jar s3://main-build-adot-staging-jar/aws-opentelemetry-agent.jar + + - name: Set Get ADOT.jar command environment variable + working-directory: testing/terraform/ec2 + run: | + if [ ${{ inputs.caller-workflow-name }} == "main-build" ]; then + echo GET_ADOT_JAR_COMMAND="aws s3 cp s3://main-build-adot-staging-jar/aws-opentelemetry-agent.jar ./adot.jar" >> $GITHUB_ENV + else + echo GET_ADOT_JAR_COMMAND="wget -O adot.jar https://github.com/aws-observability/aws-otel-java-instrumentation/releases/latest/download/aws-opentelemetry-agent.jar" >> $GITHUB_ENV + fi + - name: Set up terraform uses: hashicorp/setup-terraform@v3 with: @@ -76,8 +100,8 @@ jobs: -var="test_id=${{ env.TESTING_ID }}" \ -var="sample_app_jar=${{ env.SAMPLE_APP_FRONTEND_SERVICE_JAR }}" \ -var="sample_remote_app_jar=${{ env.SAMPLE_APP_REMOTE_SERVICE_JAR }}" \ - -var="cw_agent_rpm=${{ env.APP_SIGNALS_CW_AGENT_RPM }}" \ - -var="adot_jar=${{ env.APP_SIGNALS_ADOT_JAR }}" \ + -var="get_cw_agent_rpm_command=${{ env.GET_CW_AGENT_RPM_COMMAND }}" \ + -var="get_adot_jar_command=${{ env.GET_ADOT_JAR_COMMAND }}" \ || deployment_failed=$? if [ $deployment_failed -eq 1 ]; then @@ -123,6 +147,22 @@ jobs: fi done + # cache local patch outputs + - name: Cache local Maven repository + id: cache-local-maven-repo + uses: actions/cache@v3 + with: + path: | + ~/.m2/repository/io/opentelemetry/ + key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/opentelemetry-java*.patch') }} + + - name: Publish patched dependencies to maven local + uses: ./.github/actions/patch-dependencies + if: steps.cache-local-maven-repo.outputs.cache-hit != 'true' + with: + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + gpg_password: ${{ secrets.GPG_PASSPHRASE }} + - name: Get the ec2 instance ami id run: | echo "EC2_INSTANCE_AMI=$(terraform output ec2_instance_ami)" >> $GITHUB_ENV @@ -211,9 +251,7 @@ jobs: --region ${{ env.AWS_DEFAULT_REGION }} fi - # Clean up Procedures - - name: Terraform destroy if: always() continue-on-error: true diff --git a/.github/workflows/appsignals-e2e-eks-test.yml b/.github/workflows/appsignals-e2e-eks-test.yml index c5cb0f672a..315632f512 100644 --- a/.github/workflows/appsignals-e2e-eks-test.yml +++ b/.github/workflows/appsignals-e2e-eks-test.yml @@ -23,7 +23,9 @@ permissions: contents: read env: - AWS_DEFAULT_REGION: ${{ inputs.aws-region }} # Used by terraform and AWS CLI commands + # The precense of this env var is required for use by terraform and AWS CLI commands + # It is not redundant + AWS_DEFAULT_REGION: ${{ inputs.aws-region }} TEST_ACCOUNT: ${{ secrets.APP_SIGNALS_E2E_TEST_ACC }} ENABLEMENT_SCRIPT_S3_BUCKET: ${{ secrets.APP_SIGNALS_E2E_ENABLEMENT_SCRIPT }} SAMPLE_APP_NAMESPACE: sample-app-namespace @@ -40,6 +42,29 @@ jobs: with: fetch-depth: 0 + - name: Download enablement script + uses: actions/checkout@v4 + with: + repository: aws-observability/application-signals-demo + ref: main + path: enablement-script + sparse-checkout: | + scripts/eks/appsignals/enable-app-signals.sh + scripts/eks/appsignals/clean-app-signals.sh + sparse-checkout-cone-mode: false + + - name: Remove log group deletion command + if: always() + working-directory: enablement-script/scripts/eks/appsignals + run: | + delete_log_group="aws logs delete-log-group --log-group-name '${{ env.LOG_GROUP_NAME }}' --region \$REGION" + sed -i "s#$delete_log_group##g" clean-app-signals.sh + + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + - name: Generate testing id run: echo TESTING_ID="${{ env.AWS_DEFAULT_REGION }}-${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV @@ -83,18 +108,6 @@ jobs: with: terraform_wrapper: false - # Enable App Signals on the test cluster - - name: Pull and unzip enablement script from S3 - working-directory: testing/terraform/eks - run: aws s3 cp ${{ env.ENABLEMENT_SCRIPT_S3_BUCKET }} . && unzip -j onboarding.zip - - - name: Remove log group deletion command - if: always() - working-directory: testing/terraform/eks - run: | - delete_log_group="aws logs delete-log-group --log-group-name '${{ env.LOG_GROUP_NAME }}' --region \$REGION" - sed -i "s#$delete_log_group##g" clean-app-signals.sh - - name: Deploy sample app via terraform and wait for the endpoint to come online id: deploy-sample-app working-directory: testing/terraform/eks @@ -131,7 +144,7 @@ jobs: # after installing App Signals. Attempts to connect will be made for up to 10 minutes if [ $deployment_failed -eq 0 ]; then echo "Installing app signals to the sample app" - ./enable-app-signals.sh \ + ${GITHUB_WORKSPACE}/enablement-script/scripts/eks/appsignals/enable-app-signals.sh \ ${{ inputs.test-cluster-name }} \ ${{ env.AWS_DEFAULT_REGION }} \ ${{ env.SAMPLE_APP_NAMESPACE }} @@ -216,11 +229,19 @@ jobs: # cache local patch outputs - name: Cache local Maven repository + id: cache-local-maven-repo uses: actions/cache@v3 with: path: | ~/.m2/repository/io/opentelemetry/ - key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/**/opentelemetry-java-*.patch') }} + key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/opentelemetry-java*.patch') }} + + - name: Publish patched dependencies to maven local + uses: ./.github/actions/patch-dependencies + if: steps.cache-local-maven-repo.outputs.cache-hit != 'true' + with: + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + gpg_password: ${{ secrets.GPG_PASSPHRASE }} - name: Get the sample app endpoint run: echo "APP_ENDPOINT=$(terraform output sample_app_endpoint)" >> $GITHUB_ENV @@ -310,7 +331,7 @@ jobs: - name: Clean Up App Signals if: always() continue-on-error: true - working-directory: testing/terraform/eks + working-directory: enablement-script/scripts/eks/appsignals run: | ./clean-app-signals.sh \ ${{ inputs.test-cluster-name }} \ diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 98d06dec1b..024cef746b 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,13 +27,35 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: java + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + + - name: Cache local Maven repository + uses: actions/cache@v3 + with: + path: | + ~/.m2/repository/io/opentelemetry/ + key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/opentelemetry-java*.patch') }} + + - name: Publish patched dependencies to maven local + uses: ./.github/actions/patch-dependencies + if: steps.cache-local-maven-repo.outputs.cache-hit != 'true' + with: + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + gpg_password: ${{ secrets.GPG_PASSPHRASE }} + + - uses: gradle/wrapper-validation-action@v1 + - name: Manually build to avoid autobuild failures - run: | - ./gradlew build + uses: gradle/gradle-build-action@v3 + with: + arguments: build - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/docker-build-smoke-tests-fake-backend.yml b/.github/workflows/docker-build-smoke-tests-fake-backend.yml index ddd4be1f0b..5cd5f2956c 100644 --- a/.github/workflows/docker-build-smoke-tests-fake-backend.yml +++ b/.github/workflows/docker-build-smoke-tests-fake-backend.yml @@ -37,6 +37,6 @@ jobs: registry: public.ecr.aws - name: Build and push docker image - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 with: arguments: :smoke-tests:fakebackend:jib diff --git a/.github/workflows/e2e-tests-app-with-java-agent.yml b/.github/workflows/e2e-tests-app-with-java-agent.yml new file mode 100644 index 0000000000..37f303b258 --- /dev/null +++ b/.github/workflows/e2e-tests-app-with-java-agent.yml @@ -0,0 +1,184 @@ +name: End to End Tests with Java Agent in Sample App +on: + workflow_call: + inputs: + aws-region: + required: true + type: string + image_tag: + required: true + type: string + caller-workflow-name: + required: true + type: string + +permissions: + id-token: write + contents: read + +env: + # The precense of this env var is required. It is not redundant + AWS_DEFAULT_REGION: ${{ inputs.aws-region }} + +jobs: + build_Images_For_Testing_Sample_App_With_Java_Agent: + runs-on: ubuntu-latest + steps: + - name: Checkout Java Instrumentation repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + + # cache local patch outputs + - name: Cache local Maven repository + id: cache-local-maven-repo + uses: actions/cache@v3 + with: + path: | + ~/.m2/repository/io/opentelemetry/ + key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/opentelemetry-java*.patch') }} + + - name: Publish patched dependencies to maven local + uses: ./.github/actions/patch-dependencies + if: steps.cache-local-maven-repo.outputs.cache-hit != 'true' + with: + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + gpg_password: ${{ secrets.GPG_PASSPHRASE }} + + - name: Validate the checksums of Gradle Wrapper + uses: gradle/wrapper-validation-action@v1 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }} + aws-region: ${{ env.AWS_DEFAULT_REGION }} + + - name: Log in to AWS ECR + uses: docker/login-action@v3 + with: + registry: public.ecr.aws + + - name: Build and push agent and testing docker images with Gradle + uses: gradle/gradle-build-action@v3 + with: + arguments: jib + env: + COMMIT_HASH: ${{ inputs.image_tag }} + + - uses: codecov/codecov-action@v3 + + test_Spring_App_With_Java_Agent: + name: Test Spring App with AWS OTel Java agent + needs: [ build_Images_For_Testing_Sample_App_With_Java_Agent ] + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: 'temurin' + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }} + aws-region: ${{ env.AWS_DEFAULT_REGION }} + - name: Log in to AWS ECR + uses: docker/login-action@v3 + with: + registry: public.ecr.aws + + - name: Run test containers + run: docker-compose up --abort-on-container-exit + working-directory: .github/collector + env: + INSTANCE_ID: ${{ github.run_id }}-${{ github.run_number }} + LISTEN_ADDRESS: 0.0.0.0:8080 + APP_IMAGE: public.ecr.aws/aws-otel-test/aws-otel-java-springboot:${{ inputs.image_tag }} + VALIDATOR_COMMAND: -c springboot-otel-trace-metric-validation.yml --endpoint http://app:8080 --metric-namespace aws-otel-integ-test -t ${{ github.run_id }}-${{ github.run_number }} + + test_Spark_App_With_Java_Agent: + name: Test Spark App with AWS OTel Java agent + needs: [ build_Images_For_Testing_Sample_App_With_Java_Agent ] + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: 'temurin' + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }} + aws-region: ${{ env.AWS_DEFAULT_REGION }} + - name: Log in to AWS ECR + uses: docker/login-action@v3 + with: + registry: public.ecr.aws + + - name: Run test containers + run: docker-compose up --abort-on-container-exit + working-directory: .github/collector + env: + INSTANCE_ID: ${{ github.run_id }}-${{ github.run_number }} + LISTEN_ADDRESS: 0.0.0.0:4567 + APP_IMAGE: public.ecr.aws/aws-otel-test/aws-otel-java-spark:${{ inputs.image_tag }} + VALIDATOR_COMMAND: -c spark-otel-trace-metric-validation.yml --endpoint http://app:4567 --metric-namespace aws-otel-integ-test -t ${{ github.run_id }}-${{ github.run_number }} + + test_Spark_AWS_SDK_V1_App_With_Java_Agent: + name: Test Spark App (AWS SDK v1) with AWS OTel Java agent + needs: [ build_Images_For_Testing_Sample_App_With_Java_Agent ] + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: 'temurin' + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }} + aws-region: ${{ env.AWS_DEFAULT_REGION }} + - name: Log in to AWS ECR + uses: docker/login-action@v3 + with: + registry: public.ecr.aws + + - name: Run test containers + run: docker-compose up --abort-on-container-exit + working-directory: .github/collector + env: + INSTANCE_ID: ${{ github.run_id }}-${{ github.run_number }} + LISTEN_ADDRESS: 0.0.0.0:4567 + APP_IMAGE: public.ecr.aws/aws-otel-test/aws-otel-java-spark-awssdkv1:${{ inputs.image_tag }} + VALIDATOR_COMMAND: -c spark-otel-trace-metric-validation.yml --endpoint http://app:4567 --metric-namespace aws-otel-integ-test -t ${{ github.run_id }}-${{ github.run_number }} + + # publish status + publish-build-status: + needs: [ test_Spring_App_With_Java_Agent, test_Spark_App_With_Java_Agent, test_Spark_AWS_SDK_V1_App_With_Java_Agent ] + if: ${{ always() }} + uses: ./.github/workflows/publish-status.yml + with: + namespace: 'ADOT/GitHubActions' + repository: ${{ github.repository }} + branch: ${{ github.ref_name }} + workflow: ${{ inputs.caller-workflow-name }} + success: ${{ needs.test_Spring_App_With_Java_Agent.result == 'success' && + needs.test_Spark_App_With_Java_Agent.result == 'success' && + needs.test_Spark_AWS_SDK_V1_App_With_Java_Agent.result == 'success' }} + region: us-west-2 + secrets: + roleArn: ${{ secrets.METRICS_ROLE_ARN }} diff --git a/.github/workflows/e2e-tests-with-operator.yml b/.github/workflows/e2e-tests-with-operator.yml new file mode 100644 index 0000000000..b932076411 --- /dev/null +++ b/.github/workflows/e2e-tests-with-operator.yml @@ -0,0 +1,212 @@ +name: End to End Tests with Operator +on: + workflow_call: + inputs: + aws-region: + required: true + type: string + image_tag: + required: true + type: string + image_uri: + required: true + type: string + test_ref: + required: true + type: string + caller-workflow-name: + required: true + type: string + +env: + TESTING_FRAMEWORK_REPO: aws-observability/aws-otel-test-framework + NUM_BATCHES: 2 + DDB_TABLE_NAME: BatchTestCache + # The precense of this env var is required. It is not redundant + AWS_DEFAULT_REGION: ${{ inputs.aws-region }} + +permissions: + id-token: write + contents: read + +jobs: + # job to build testbatches for e2e integration test + build-sample-app: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + + # cache local patch outputs + - name: Cache local Maven repository + id: cache-local-maven-repo + uses: actions/cache@v3 + with: + path: | + ~/.m2/repository/io/opentelemetry/ + key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/opentelemetry-java*.patch') }} + + - name: Publish patched dependencies to maven local + uses: ./.github/actions/patch-dependencies + if: steps.cache-local-maven-repo.outputs.cache-hit != 'true' + with: + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + gpg_password: ${{ secrets.GPG_PASSPHRASE }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }} + aws-region: ${{ env.AWS_DEFAULT_REGION }} + + - name: Log in to AWS ECR + uses: docker/login-action@v3 + with: + registry: public.ecr.aws + + - name: Build and push Sample-Apps without Auto-Instrumentation Agent + uses: gradle/gradle-build-action@v3 + with: + arguments: jibBuildWithoutAgent + env: + COMMIT_HASH: ${{ inputs.image_tag }} + + get-testing-suites: + runs-on: ubuntu-latest + outputs: + test-case-batch-key: ${{ steps.set-batches.outputs.batch-keys }} + test-case-batch-value: ${{ steps.set-batches.outputs.batch-values }} + steps: + - name: Checkout Testing Framework repository + uses: actions/checkout@v4 + with: + repository: ${{ env.TESTING_FRAMEWORK_REPO }} + path: testing-framework + ref: ${{ inputs.test_ref }} + + - name: Checkout Java Instrumentation repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + path: aws-otel-java-instrumentation + + - name: Set up Go 1.x + uses: actions/setup-go@v5 + with: + go-version: '~1.18.9' + + - name: Create test batch key values + id: set-batches + working-directory: testing-framework/tools/batchTestGenerator + run: | + go build + ./batchTestGenerator github --testCaseFilePath=$GITHUB_WORKSPACE/aws-otel-java-instrumentation/.github/workflows/testcases.json --maxBatch=${{ env.NUM_BATCHES }} \ + --include=EKS_ADOT_OPERATOR,EKS_ADOT_OPERATOR_ARM64 + + - name: List testing suites + run: | + echo ${{ steps.set-batches.outputs.batch-keys }} + echo ${{ steps.set-batches.outputs.batch-values }} + + + # job to run the e2e integration tests + run-batch-job: + runs-on: ubuntu-latest + needs: [ build-sample-app, get-testing-suites ] + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.get-testing-suites.outputs.test-case-batch-key) }} + + steps: + # required for versioning + - name: Checkout Java Instrumentation repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + path: aws-otel-java-instrumentation + + - name: Set up JDK 11 + uses: actions/setup-java@v4 + with: + distribution: 'zulu' + java-version: '11' + + - name: Set up terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "~1.5" + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.JAVA_INSTRUMENTATION_INTEG_TEST_ARN}} + aws-region: us-west-2 + # 4 hours + role-duration-seconds: 14400 + + - name: Checkout Testing Framework repository + uses: actions/checkout@v4 + with: + repository: ${{ env.TESTING_FRAMEWORK_REPO }} + path: testing-framework + ref: ${{ inputs.test_ref }} + + - name: create test-case-batch file + run: | + jsonStr='${{ needs.get-testing-suites.outputs.test-case-batch-value }}' + jsonStr="$(jq -r '.${{ matrix.BatchKey }} | join("\n")' <<< "${jsonStr}")" + echo "$jsonStr" >> testing-framework/terraform/test-case-batch + cat testing-framework/terraform/test-case-batch + + - name: Get TTL_DATE for cache + id: date + run: echo "ttldate=$(date -u -d "+7 days" +%s)" >> $GITHUB_OUTPUT + + - name: run tests + working-directory: testing-framework/terraform + run: | + export TTL_DATE=${{ steps.date.outputs.ttldate }} + export TF_VAR_java_auto_instrumentation_tag=${{ inputs.image_tag }} + export TF_VAR_java_auto_instrumentation_repository="${{ inputs.image_uri }}" + export DDB_BATCH_CACHE_SK=${{ inputs.image_tag }} + make execute-batch-test + + - name: output cache misses + if: ${{ failure() }} + working-directory: testing-framework/terraform + run: | + export DDB_BATCH_CACHE_SK=${{ inputs.image_tag }} + make checkCacheHits + + # This is here just in case workflow cancel + # We first kill terraform processes to ensure that no state + # file locks are being held from SIGTERMS dispatched in previous + # steps. + - name: Destroy resources + if: ${{ cancelled() }} + shell: bash {0} + working-directory: testing-framework/terraform + run: | + ps -ef | grep terraform | grep -v grep | awk '{print $2}' | xargs -n 1 kill + make terraformCleanup + + # publish status + publish-build-status: + needs: [ run-batch-job ] + if: ${{ always() }} + uses: ./.github/workflows/publish-status.yml + with: + namespace: 'ADOT/GitHubActions' + repository: ${{ github.repository }} + branch: ${{ github.ref_name }} + workflow: ${{ inputs.caller-workflow-name }} + success: ${{ needs.run-batch-job.result == 'success' }} + region: us-west-2 + secrets: + roleArn: ${{ secrets.METRICS_ROLE_ARN }} \ No newline at end of file diff --git a/.github/workflows/main-build.yml b/.github/workflows/main-build.yml index d2bba62289..36e8993d0a 100644 --- a/.github/workflows/main-build.yml +++ b/.github/workflows/main-build.yml @@ -6,12 +6,8 @@ on: - "release/v*" env: AWS_DEFAULT_REGION: us-east-1 - TEST_TAG: public.ecr.aws/aws-observability/adot-autoinstrumentation-java:test STAGING_ECR_REGISTRY: 611364707713.dkr.ecr.us-west-2.amazonaws.com STAGING_ECR_REPOSITORY: adot-autoinstrumentation-java-operator-staging - TESTING_FRAMEWORK_REPO: aws-observability/aws-otel-test-framework - NUM_BATCHES: 2 - DDB_TABLE_NAME: BatchTestCache concurrency: group: java-agent-main-build @@ -25,7 +21,6 @@ jobs: testpatch: name: Test patches applied to dependencies runs-on: aws-otel-java-instrumentation_ubuntu-latest_32-core - if: ${{ startsWith(github.ref_name, 'release/v') }} steps: - uses: actions/checkout@v4 - uses: actions/setup-java@v4 @@ -48,7 +43,6 @@ jobs: - uses: ./.github/actions/patch-dependencies with: run_tests: "true" - branch: ${{ github.ref_name }} gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} gpg_password: ${{ secrets.GPG_PASSPHRASE }} @@ -56,7 +50,9 @@ jobs: runs-on: ubuntu-latest outputs: java_agent_tag: ${{ steps.java_agent_versioning.outputs.STAGING_TAG}} - staging-image-name: ${{ steps.imageNameOutput.outputs.imageName }} + staging-image: ${{ steps.imageOutput.outputs.stagingImage }} + staging_registry: ${{ steps.imageOutput.outputs.stagingRegistry }} + staging_repository: ${{ steps.imageOutput.outputs.stagingRepository }} steps: - uses: actions/checkout@v4 with: @@ -72,13 +68,11 @@ jobs: with: path: | ~/.m2/repository/io/opentelemetry/ - key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/**/opentelemetry-java-*.patch') }} + key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/opentelemetry-java*.patch') }} - name: Publish patched dependencies to maven local uses: ./.github/actions/patch-dependencies - if: ${{ startsWith(github.ref_name, 'release/v') }} with: - branch: ${{ github.ref_name }} gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} gpg_password: ${{ secrets.GPG_PASSPHRASE }} @@ -96,7 +90,7 @@ jobs: registry: public.ecr.aws - name: Build snapshot with Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 with: arguments: build integrationTests snapshot --stacktrace -PenableCoverage=true -PlocalDocker=true env: @@ -105,85 +99,49 @@ jobs: GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} - - name: Pull base image of Contract Tests Sample Apps - run: docker pull public.ecr.aws/docker/library/amazoncorretto:17-alpine - - - name: Run contract tests - uses: gradle/gradle-build-action@v2 - with: - arguments: contractTests -PlocalDocker=true - - name: Get current version + id: getADOTJavaVersion shell: bash - run: | - echo "ADOT_JAVA_VERSION=$(./gradlew printVersion -q )" >> $GITHUB_ENV - - - name: Build and push Sample-Apps without Auto-Instrumentation Agent - uses: gradle/gradle-build-action@v2 - with: - arguments: jibBuildWithoutAgent - env: - COMMIT_HASH: ${{ github.sha }} - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Build image for testing - uses: docker/build-push-action@v5 - with: - push: false - build-args: "ADOT_JAVA_VERSION=${{ env.ADOT_JAVA_VERSION }}" - context: . - platforms: linux/amd64 - tags: ${{ env.TEST_TAG }} - load: true - - - name: Test docker image - shell: bash - run: .github/scripts/test-adot-javaagent-image.sh "${{ env.TEST_TAG }}" "${{ env.ADOT_JAVA_VERSION }}" - - - name: Upload to GitHub Actions - uses: actions/upload-artifact@v3 - with: - name: aws-opentelemetry-agent.jar - path: otelagent/build/libs/aws-opentelemetry-agent-*.jar + run: echo "adot_java_version=$(./gradlew printVersion -q )" >> $GITHUB_OUTPUT - name: Get ADOT_JAVA_AGENT Image Tag id: java_agent_versioning run: | shortsha="$(git rev-parse --short HEAD)" - java_agent_tag=${{ env.ADOT_JAVA_VERSION }}-$shortsha + java_agent_tag=${{ steps.getADOTJavaVersion.outputs.adot_java_version }}-$shortsha echo "STAGING_TAG=$java_agent_tag" >> $GITHUB_OUTPUT - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ secrets.JAVA_INSTRUMENTATION_SNAPSHOT_ECR }} - aws-region: us-west-2 + - name: Set image to output + id: imageOutput + run: | + echo "stagingRegistry=${{ env.STAGING_ECR_REGISTRY }}" >> $GITHUB_OUTPUT + echo "stagingRepository=${{ env.STAGING_ECR_REPOSITORY }}" >> $GITHUB_OUTPUT + echo "stagingImage=${{ env.STAGING_ECR_REGISTRY }}/${{ env.STAGING_ECR_REPOSITORY }}:${{ steps.java_agent_versioning.outputs.STAGING_TAG }}" >> $GITHUB_OUTPUT - - name: Login to private staging ecr - uses: docker/login-action@v3 + - name: Build and Push Java Agent Image and Execute cpUtility Tests + uses: ./.github/actions/cpUtility-testing with: - registry: ${{ env.STAGING_ECR_REGISTRY }} - env: - AWS_REGION: us-west-2 + aws-region: us-west-2 + image_uri_with_tag: ${{ steps.imageOutput.outputs.stagingImage }} + image_registry: ${{ steps.imageOutput.outputs.stagingRegistry }} + adot-java-version: ${{ steps.getADOTJavaVersion.outputs.adot_java_version }} + snapshot-ecr-role: ${{ secrets.JAVA_INSTRUMENTATION_SNAPSHOT_ECR }} - - name: Build and push staging image for e2e integration test - uses: docker/build-push-action@v5 + - name: Upload to GitHub Actions + uses: actions/upload-artifact@v3 with: - push: true - build-args: "ADOT_JAVA_VERSION=${{ env.ADOT_JAVA_VERSION }}" - context: . - platforms: linux/amd64,linux/arm64 - tags: | - ${{ env.STAGING_ECR_REGISTRY }}/${{ env.STAGING_ECR_REPOSITORY }}:${{ steps.java_agent_versioning.outputs.STAGING_TAG }} + name: aws-opentelemetry-agent.jar + path: otelagent/build/libs/aws-opentelemetry-agent-*.jar - - name: Set image name to output - id: imageNameOutput - run: echo "imageName=${{ env.STAGING_ECR_REGISTRY }}/${{ env.STAGING_ECR_REPOSITORY }}:${{ steps.java_agent_versioning.outputs.STAGING_TAG }}" >> "$GITHUB_OUTPUT" + default-region-output: + runs-on: ubuntu-latest + outputs: + aws_default_region: ${{ steps.default_region_output.outputs.aws_default_region }} + steps: + - name: Set default region output + id: default_region_output + run: | + echo "aws_default_region=${{ env.AWS_DEFAULT_REGION }}" >> $GITHUB_OUTPUT create-test-ref: runs-on: ubuntu-latest @@ -199,148 +157,42 @@ jobs: echo "ref=terraform" >> $GITHUB_OUTPUT fi - # job to build testbatches for e2e integration test - get-testing-suites: - runs-on: ubuntu-latest - needs: [build, create-test-ref] - outputs: - test-case-batch-key: ${{ steps.set-batches.outputs.batch-keys }} - test-case-batch-value: ${{ steps.set-batches.outputs.batch-values }} - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - repository: ${{ env.TESTING_FRAMEWORK_REPO }} - path: testing-framework - ref: ${{ needs.create-test-ref.outputs.testRef }} - - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - path: aws-otel-java-instrumentation - - - name: Set up Go 1.x - uses: actions/setup-go@v4 - with: - go-version: '~1.18.9' - - - name: Create test batch key values - id: set-batches - run: | - cd testing-framework/tools/batchTestGenerator - go build - ./batchTestGenerator github --testCaseFilePath=$GITHUB_WORKSPACE/aws-otel-java-instrumentation/.github/workflows/testcases.json --maxBatch=${{ env.NUM_BATCHES }} \ - --include=EKS_ADOT_OPERATOR,EKS_ADOT_OPERATOR_ARM64 - - - name: List testing suites - run: | - echo ${{ steps.set-batches.outputs.batch-keys }} - echo ${{ steps.set-batches.outputs.batch-values }} - - # job to run the e2e integration tests - run-batch-job: - runs-on: ubuntu-latest - needs: [build, create-test-ref, get-testing-suites] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.get-testing-suites.outputs.test-case-batch-key) }} - - steps: - # required for versioning - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - path: aws-otel-java-instrumentation - - - name: Set up JDK 11 - uses: actions/setup-java@v4 - with: - distribution: 'zulu' - java-version: '11' - - - name: Set up terraform - uses: hashicorp/setup-terraform@v3 - with: - terraform_version: "~1.5" - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ secrets.JAVA_INSTRUMENTATION_INTEG_TEST_ARN}} - aws-region: us-west-2 - # 4 hours - role-duration-seconds: 14400 - - - name: Checkout testing framework - uses: actions/checkout@v4 - with: - repository: ${{ env.TESTING_FRAMEWORK_REPO }} - path: testing-framework - ref: ${{ needs.create-test-ref.outputs.testRef }} - - - name: create test-case-batch file - run: | - jsonStr='${{ needs.get-testing-suites.outputs.test-case-batch-value }}' - jsonStr="$(jq -r '.${{ matrix.BatchKey }} | join("\n")' <<< "${jsonStr}")" - echo "$jsonStr" >> testing-framework/terraform/test-case-batch - cat testing-framework/terraform/test-case-batch - - - name: Get TTL_DATE for cache - id: date - run: echo "ttldate=$(date -u -d "+7 days" +%s)" >> $GITHUB_OUTPUT - - - name: run tests - run: | - export TTL_DATE=${{ steps.date.outputs.ttldate }} - export TF_VAR_java_auto_instrumentation_tag=${{ needs.build.outputs.java_agent_tag }} - export TF_VAR_java_auto_instrumentation_repository="${{ env.STAGING_ECR_REGISTRY }}/${{ env.STAGING_ECR_REPOSITORY }}" - export DDB_BATCH_CACHE_SK=${{ needs.build.outputs.java_agent_tag }} - cd testing-framework/terraform - make execute-batch-test - - - name: output cache misses - if: ${{ failure() }} - run: | - export DDB_BATCH_CACHE_SK=${{ needs.build.outputs.java_agent_tag }} - cd testing-framework/terraform - make checkCacheHits - - # This is here just in case workflow cancel - # We first kill terraform processes to ensure that no state - # file locks are being held from SIGTERMS dispatched in previous - # steps. - - name: Destroy resources - if: ${{ cancelled() }} - shell: bash {0} - run: | - ps -ef | grep terraform | grep -v grep | awk '{print $2}' | xargs -n 1 kill - cd testing-framework/terraform - make terraformCleanup + e2e-operator-test: + concurrency: + group: e2e-adot-agent-operator-test + cancel-in-progress: false + needs: [ build, create-test-ref, default-region-output ] + uses: ./.github/workflows/e2e-tests-with-operator.yml + secrets: inherit + with: + aws-region: ${{ needs.default-region-output.outputs.aws_default_region }} + image_tag: ${{ needs.build.outputs.java_agent_tag }} + image_uri: ${{ needs.build.outputs.staging_registry }}/${{ needs.build.outputs.staging_repository }} + test_ref: ${{ needs.create-test-ref.outputs.testRef }} + caller-workflow-name: 'main-build' + # E2E tests where SampleApp has Java Agent + e2e-test: + needs: [build, default-region-output] + uses: ./.github/workflows/e2e-tests-app-with-java-agent.yml + secrets: inherit + with: + aws-region: ${{ needs.default-region-output.outputs.aws_default_region }} + image_tag: ${{ github.sha }} + caller-workflow-name: 'main-build' - build_Images_For_Testing_Sample_App_With_Java_Agent: + # AppSignals Contract Tests + contract-tests: runs-on: ubuntu-latest needs: build steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-java@v4 with: java-version: 17 - distribution: temurin - - # cache local patch outputs - - name: Cache local Maven repository - uses: actions/cache@v3 - with: - path: | - ~/.m2/repository/io/opentelemetry/ - key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/**/opentelemetry-java-*.patch') }} - + distribution: 'temurin' - uses: gradle/wrapper-validation-action@v1 - name: Configure AWS Credentials @@ -354,123 +206,48 @@ jobs: with: registry: public.ecr.aws - - name: Build and push agent and testing docker images with Gradle - uses: gradle/gradle-build-action@v2 - with: - arguments: jib - env: - COMMIT_HASH: ${{ github.sha }} - - - uses: codecov/codecov-action@v3 - - test_Spring_App_With_Java_Agent: - name: Test Spring App with AWS OTel Java agent - needs: build_Images_For_Testing_Sample_App_With_Java_Agent - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - uses: actions/setup-java@v4 - with: - java-version: 17 - distribution: 'temurin' - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }} - aws-region: ${{ env.AWS_DEFAULT_REGION }} - - name: Log in to AWS ECR - uses: docker/login-action@v3 - with: - registry: public.ecr.aws - - - name: Run test containers - run: docker-compose up --abort-on-container-exit - working-directory: .github/collector - env: - INSTANCE_ID: ${{ github.run_id }}-${{ github.run_number }} - LISTEN_ADDRESS: 0.0.0.0:8080 - APP_IMAGE: public.ecr.aws/aws-otel-test/aws-otel-java-springboot:${{ github.sha }} - VALIDATOR_COMMAND: -c springboot-otel-trace-metric-validation.yml --endpoint http://app:8080 --metric-namespace aws-otel-integ-test -t ${{ github.run_id }}-${{ github.run_number }} - - test_Spark_App_With_Java_Agent: - name: Test Spark App with AWS OTel Java agent - needs: build_Images_For_Testing_Sample_App_With_Java_Agent - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - uses: actions/setup-java@v4 - with: - java-version: 17 - distribution: 'temurin' - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }} - aws-region: ${{ env.AWS_DEFAULT_REGION }} - - name: Log in to AWS ECR - uses: docker/login-action@v3 + # cache local patch outputs + - name: Cache local Maven repository + id: cache-local-maven-repo + uses: actions/cache@v3 with: - registry: public.ecr.aws - - - name: Run test containers - run: docker-compose up --abort-on-container-exit - working-directory: .github/collector - env: - INSTANCE_ID: ${{ github.run_id }}-${{ github.run_number }} - LISTEN_ADDRESS: 0.0.0.0:4567 - APP_IMAGE: public.ecr.aws/aws-otel-test/aws-otel-java-spark:${{ github.sha }} - VALIDATOR_COMMAND: -c spark-otel-trace-metric-validation.yml --endpoint http://app:4567 --metric-namespace aws-otel-integ-test -t ${{ github.run_id }}-${{ github.run_number }} - - test_Spark_AWS_SDK_V1_App_With_Java_Agent: - name: Test Spark App (AWS SDK v1) with AWS OTel Java agent - needs: build_Images_For_Testing_Sample_App_With_Java_Agent - runs-on: ubuntu-latest + path: | + ~/.m2/repository/io/opentelemetry/ + key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/opentelemetry-java*.patch') }} - steps: - - uses: actions/checkout@v4 + - name: Pull base image of Contract Tests Sample Apps + run: docker pull public.ecr.aws/docker/library/amazoncorretto:17-alpine - - uses: actions/setup-java@v4 - with: - java-version: 17 - distribution: 'temurin' - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }} - aws-region: ${{ env.AWS_DEFAULT_REGION }} - - name: Log in to AWS ECR - uses: docker/login-action@v3 + - name: Build snapshot with Gradle + uses: gradle/gradle-build-action@v3 with: - registry: public.ecr.aws - - - name: Run test containers - run: docker-compose up --abort-on-container-exit - working-directory: .github/collector - env: - INSTANCE_ID: ${{ github.run_id }}-${{ github.run_number }} - LISTEN_ADDRESS: 0.0.0.0:4567 - APP_IMAGE: public.ecr.aws/aws-otel-test/aws-otel-java-spark-awssdkv1:${{ github.sha }} - VALIDATOR_COMMAND: -c spark-otel-trace-metric-validation.yml --endpoint http://app:4567 --metric-namespace aws-otel-integ-test -t ${{ github.run_id }}-${{ github.run_number }} + arguments: contractTests -PlocalDocker=true - e2e-test: + # AppSignals specific e2e tests + appsignals-e2e-eks-test: concurrency: group: e2e-adot-test cancel-in-progress: false - needs: build + needs: [build, default-region-output] uses: ./.github/workflows/appsignals-e2e-eks-test.yml secrets: inherit with: - aws-region: us-east-1 + aws-region: ${{ needs.default-region-output.outputs.aws_default_region }} test-cluster-name: "e2e-adot-test" - appsignals-adot-image-name: ${{ needs.build.outputs.staging-image-name }} + appsignals-adot-image-name: ${{ needs.build.outputs.staging-image }} + caller-workflow-name: 'main-build' + + # AppSignals specific e2e tests for ec2 + appsignals-e2e-ec2-test: + needs: [build, default-region-output] + uses: ./.github/workflows/appsignals-e2e-ec2-test.yml + secrets: inherit + with: + aws-region: ${{ needs.default-region-output.outputs.aws_default_region }} caller-workflow-name: 'main-build' publish-build-status: - needs: [test_Spring_App_With_Java_Agent, test_Spark_App_With_Java_Agent, test_Spark_AWS_SDK_V1_App_With_Java_Agent, run-batch-job] + needs: [ build, contract-tests ] if: ${{ always() }} uses: ./.github/workflows/publish-status.yml with: @@ -478,10 +255,8 @@ jobs: repository: ${{ github.repository }} branch: ${{ github.ref_name }} workflow: main-build - success: ${{ needs.test_Spring_App_With_Java_Agent.result == 'success' && - needs.test_Spark_App_With_Java_Agent.result == 'success' && - needs.test_Spark_AWS_SDK_V1_App_With_Java_Agent.result == 'success' && - needs.run-batch-job.result == 'success' }} + success: ${{ needs.build.result == 'success' && + needs.contract-tests.result == 'success' }} region: us-west-2 secrets: roleArn: ${{ secrets.METRICS_ROLE_ARN }} diff --git a/.github/workflows/nightly-upstream-snapshot-build.yml b/.github/workflows/nightly-upstream-snapshot-build.yml index 4f78c4135b..eacd1a7931 100644 --- a/.github/workflows/nightly-upstream-snapshot-build.yml +++ b/.github/workflows/nightly-upstream-snapshot-build.yml @@ -6,7 +6,8 @@ on: env: AWS_DEFAULT_REGION: us-east-1 - IMAGE_TAG: 611364707713.dkr.ecr.us-west-2.amazonaws.com/adot-autoinstrumentation-java-nightly:nightly + IMAGE_REGISTRY: 611364707713.dkr.ecr.us-west-2.amazonaws.com + IMAGE_NAME: adot-autoinstrumentation-java-nightly permissions: id-token: write @@ -16,15 +17,21 @@ jobs: build: runs-on: ubuntu-latest outputs: - release-candidate-image: ${{ steps.imageNameOutput.outputs.imageName }} + time_stamp_tag: ${{ steps.generate_time_stamp.outputs.nowTimeTag}} + release-candidate-image: ${{ steps.imageOutput.outputs.rcImage }} + image_registry: ${{ steps.imageOutput.outputs.imageRegistry }} + image_name: ${{ steps.imageOutput.outputs.imageName }} + steps: - uses: actions/checkout@v4 with: fetch-depth: 0 + - uses: actions/setup-java@v4 with: java-version: 17 distribution: 'temurin' + - uses: gradle/wrapper-validation-action@v1 - name: Configure AWS Credentials @@ -38,90 +45,120 @@ jobs: with: registry: public.ecr.aws - - name: Pull base image of Contract Tests Sample Apps - run: docker pull public.ecr.aws/docker/library/amazoncorretto:17-alpine - - name: Build snapshot with Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 with: - arguments: build contractTests --stacktrace -PenableCoverage=true -PtestUpstreamSnapshots=true -PlocalDocker=true + arguments: build --stacktrace -PenableCoverage=true -PtestUpstreamSnapshots=true env: PUBLISH_USERNAME: ${{ secrets.PUBLISH_USERNAME }} PUBLISH_PASSWORD: ${{ secrets.PUBLISH_PASSWORD }} GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 + - name: Get current version + id: getADOTJavaVersion + shell: bash + run: echo "adot_java_version=$(./gradlew printVersion -q -PtestUpstreamSnapshots=true )" >> $GITHUB_OUTPUT + + - name: Generate timestamp for image tag + id: generate_time_stamp + run: echo "nowTimeTag=$(date +'%Y-%m-%dT%H-%M-%S')" >> $GITHUB_OUTPUT + + - name: Set image to output + id: imageOutput + run: | + echo "imageRegistry=${{ env.IMAGE_REGISTRY }}" >> $GITHUB_OUTPUT + echo "imageName=${{ env.IMAGE_NAME }}" >> $GITHUB_OUTPUT + echo "rcImage=${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.generate_time_stamp.outputs.nowTimeTag }}" >> $GITHUB_OUTPUT + + - name: Build and Push Java Agent Image and Execute cpUtility Tests + uses: ./.github/actions/cpUtility-testing with: - role-to-assume: ${{ secrets.JAVA_INSTRUMENTATION_SNAPSHOT_ECR }} aws-region: us-west-2 + image_uri_with_tag: ${{ steps.imageOutput.outputs.rcImage }} + image_registry: ${{ steps.imageOutput.outputs.imageRegistry }} + adot-java-version: ${{ steps.getADOTJavaVersion.outputs.adot_java_version }} + snapshot-ecr-role: ${{ secrets.JAVA_INSTRUMENTATION_SNAPSHOT_ECR }} - - name: Login to private aws ecr - uses: docker/login-action@v3 + - name: Upload to GitHub Actions + uses: actions/upload-artifact@v3 with: - registry: 611364707713.dkr.ecr.us-west-2.amazonaws.com + name: aws-opentelemetry-agent.jar + path: otelagent/build/libs/aws-opentelemetry-agent-*.jar - - name: Get current version - shell: bash + default-region-output: + runs-on: ubuntu-latest + outputs: + aws_default_region: ${{ steps.default_region_output.outputs.aws_default_region }} + steps: + - name: Set default region output + id: default_region_output run: | - echo "ADOT_JAVA_VERSION=$(./gradlew printVersion -q -PtestUpstreamSnapshots=true )" >> $GITHUB_ENV + echo "aws_default_region=${{ env.AWS_DEFAULT_REGION }}" >> $GITHUB_OUTPUT - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + e2e-operator-test: + concurrency: + group: e2e-adot-agent-operator-test + cancel-in-progress: false + needs: [build, default-region-output] + uses: ./.github/workflows/e2e-tests-with-operator.yml + secrets: inherit + with: + aws-region: ${{ needs.default-region-output.outputs.aws_default_region }} + image_tag: ${{ needs.build.outputs.time_stamp_tag }} + image_uri: ${{ needs.build.outputs.image_registry }}/${{ needs.build.outputs.image_name }} + test_ref: 'terraform' + caller-workflow-name: 'nightly-upstream-snapshot-build' - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + # AppSignals Contract Tests + contract-tests: + runs-on: ubuntu-latest + needs: build + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: 'temurin' + - uses: gradle/wrapper-validation-action@v1 - - name: Build image for testing - uses: docker/build-push-action@v5 + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 with: - push: false - build-args: "ADOT_JAVA_VERSION=${{ env.ADOT_JAVA_VERSION }}" - context: . - platforms: linux/amd64 - tags: ${{ env.IMAGE_TAG }} - load: true - - - name: Test docker image - shell: bash - run: .github/scripts/test-adot-javaagent-image.sh "${{ env.IMAGE_TAG }}" "${{ env.ADOT_JAVA_VERSION }}" + role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }} + aws-region: ${{ env.AWS_DEFAULT_REGION }} - - name: Build and push image - uses: docker/build-push-action@v5 + - name: Log in to AWS ECR + uses: docker/login-action@v3 with: - push: true - build-args: "ADOT_JAVA_VERSION=${{ env.ADOT_JAVA_VERSION }}" - context: . - platforms: linux/amd64,linux/arm64 - tags: | - ${{ env.IMAGE_TAG }} + registry: public.ecr.aws - - name: Set image name to output - id: imageNameOutput - run: echo "imageName=${{ env.IMAGE_NAME }}" >> "$GITHUB_OUTPUT" + - name: Pull base image of Contract Tests Sample Apps + run: docker pull public.ecr.aws/docker/library/amazoncorretto:17-alpine - - name: Upload to GitHub Actions - uses: actions/upload-artifact@v3 + - name: Build snapshot with Gradle + uses: gradle/gradle-build-action@v3 with: - name: aws-opentelemetry-agent.jar - path: otelagent/build/libs/aws-opentelemetry-agent-*.jar + arguments: contractTests -PtestUpstreamSnapshots=true -PlocalDocker=true - e2e-test: + # AppSignals specific e2e tests + appsignals-e2e-test: concurrency: group: e2e-adot-test cancel-in-progress: false - needs: build + needs: [build,default-region-output] uses: ./.github/workflows/appsignals-e2e-eks-test.yml secrets: inherit with: - aws-region: us-east-1 + aws-region: ${{ needs.default-region-output.outputs.aws_default_region }} test-cluster-name: "e2e-adot-test" appsignals-adot-image-name: ${{ needs.build.outputs.release-candidate-image }} caller-workflow-name: 'nightly-upstream-snapshot-build' publish-build-status: - needs: [build] + needs: [ build, contract-tests ] if: ${{ always() }} uses: ./.github/workflows/publish-status.yml with: @@ -129,7 +166,8 @@ jobs: repository: ${{ github.repository }} branch: ${{ github.ref_name }} workflow: nightly-upstream-snapshot-build - success: ${{ needs.build.result == 'success' }} + success: ${{ needs.build.result == 'success' && + needs.contract-tests.result == 'success' }} region: us-west-2 secrets: roleArn: ${{ secrets.METRICS_ROLE_ARN }} diff --git a/.github/workflows/owasp.yml b/.github/workflows/owasp.yml index 01fb3fed14..2e63029d56 100644 --- a/.github/workflows/owasp.yml +++ b/.github/workflows/owasp.yml @@ -21,7 +21,7 @@ jobs: java-version: 17 distribution: 'temurin' - name: Build snapshot with Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 with: arguments: ":otelagent:dependencyCheckAnalyze" - name: Upload report @@ -43,7 +43,7 @@ jobs: java-version: 17 distribution: 'temurin' - name: Build snapshot with Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 with: arguments: ":javaagent:dependencyCheckAnalyze" - name: Upload report diff --git a/.github/workflows/patch-release-build.yml b/.github/workflows/patch-release-build.yml index fb77ce5a0f..9c1a36357f 100644 --- a/.github/workflows/patch-release-build.yml +++ b/.github/workflows/patch-release-build.yml @@ -90,7 +90,7 @@ jobs: done - name: Build release with Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 with: arguments: build integrationTests -PlocalDocker=true -Prelease.version=${{ github.event.inputs.version }} --stacktrace @@ -136,7 +136,7 @@ jobs: public.ecr.aws/aws-observability/adot-autoinstrumentation-java:v${{ github.event.inputs.version }} - name: Build and Publish release with Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 with: arguments: build final closeAndReleaseSonatypeStagingRepository -Prelease.version=${{ github.event.inputs.version }} --stacktrace env: diff --git a/.github/workflows/pr-build.yml b/.github/workflows/pr-build.yml index c1eff47c51..0f1becaf9b 100644 --- a/.github/workflows/pr-build.yml +++ b/.github/workflows/pr-build.yml @@ -11,7 +11,6 @@ jobs: testpatch: name: Test patches applied to dependencies runs-on: aws-otel-java-instrumentation_ubuntu-latest_32-core - if: ${{ startsWith(github.event.pull_request.base.ref, 'release/v') }} steps: - uses: actions/checkout@v4 @@ -37,9 +36,6 @@ jobs: - uses: ./.github/actions/patch-dependencies with: run_tests: "true" - branch: ${{ github.event.pull_request.base.ref }} - gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} - gpg_password: ${{ secrets.GPG_PASSPHRASE }} build: name: Build on ${{ matrix.os }} @@ -67,16 +63,26 @@ jobs: - uses: gradle/wrapper-validation-action@v1 + # cache local patch outputs + - name: Cache local Maven repository + uses: actions/cache@v3 + with: + path: | + ~/.m2/repository/io/opentelemetry/ + key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/opentelemetry-java*.patch') }} + - name: Publish patched dependencies to maven local uses: ./.github/actions/patch-dependencies - if: ${{ startsWith(github.event.pull_request.base.ref, 'release/v') }} + if: ${{ matrix.os != 'windows-latest' }} # Skip patch on windows as it is not possible to build opentelemetry-java on windows + + - name: Dry Run Validator + uses: gradle/gradle-build-action@v3 + if: ${{ matrix.os == 'ubuntu-latest' }} with: - branch: ${{ github.event.pull_request.base.ref }} - gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} - gpg_password: ${{ secrets.GPG_PASSPHRASE }} + arguments: testing:validator:build - name: Build with Gradle with Integration tests - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 if: ${{ matrix.os == 'ubuntu-latest' }} with: arguments: build integrationTests --stacktrace -PenableCoverage=true -PlocalDocker=true @@ -86,7 +92,7 @@ jobs: run: docker pull public.ecr.aws/docker/library/amazoncorretto:17-alpine - name: Run contract tests - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 if: ${{ matrix.os == 'ubuntu-latest' }} with: arguments: contractTests -PlocalDocker=true -i @@ -122,8 +128,8 @@ jobs: run: .github/scripts/test-adot-javaagent-image.sh "${{ env.TEST_TAG }}" "${{ env.ADOT_JAVA_VERSION }}" - name: Build with Gradle - uses: gradle/gradle-build-action@v2 - if: ${{ matrix.os != 'ubuntu-latest' }} + uses: gradle/gradle-build-action@v3 + if: ${{ matrix.os != 'ubuntu-latest' && (hashFiles('.github/patches/opentelemetry-java*.patch') == '' || matrix.os != 'windows-latest' ) }} # build on windows as well unless a patch exists with: arguments: build --stacktrace -PenableCoverage=true - uses: codecov/codecov-action@v3 diff --git a/.github/workflows/release-build.yml b/.github/workflows/release-build.yml index 8865805e69..e8e3f4d4a1 100644 --- a/.github/workflows/release-build.yml +++ b/.github/workflows/release-build.yml @@ -7,8 +7,12 @@ on: required: true env: - AWS_DEFAULT_REGION: us-east-1 + AWS_PUBLIC_ECR_REGION: us-east-1 + AWS_PRIVATE_ECR_REGION: us-west-2 TEST_TAG: public.ecr.aws/aws-observability/adot-autoinstrumentation-java:test + PUBLIC_REPOSITORY: public.ecr.aws/aws-observability/adot-autoinstrumentation-java + PRIVATE_REPOSITORY: 020628701572.dkr.ecr.us-west-2.amazonaws.com/adot-autoinstrumentation-java + PRIVATE_REGISTRY: 020628701572.dkr.ecr.us-west-2.amazonaws.com permissions: id-token: write @@ -27,9 +31,7 @@ jobs: - name: Publish patched dependencies to maven local uses: ./.github/actions/patch-dependencies - if: ${{ startsWith(github.ref_name, 'release/v') }} with: - branch: ${{ github.ref_name }} gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} gpg_password: ${{ secrets.GPG_PASSPHRASE }} @@ -37,7 +39,7 @@ jobs: uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }} - aws-region: ${{ env.AWS_DEFAULT_REGION }} + aws-region: ${{ env.AWS_PUBLIC_ECR_REGION }} - name: Log in to AWS ECR uses: docker/login-action@v3 @@ -45,7 +47,7 @@ jobs: registry: public.ecr.aws - name: Build release with Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 with: arguments: build integrationTests -PlocalDocker=true -Prelease.version=${{ github.event.inputs.version }} --stacktrace @@ -53,13 +55,24 @@ jobs: uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN_RELEASE }} - aws-region: ${{ env.AWS_DEFAULT_REGION }} + aws-region: ${{ env.AWS_PUBLIC_ECR_REGION }} - name: Log in to AWS ECR uses: docker/login-action@v3 with: registry: public.ecr.aws + - name: Configure AWS Credentials for Private ECR + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN_RELEASE }} + aws-region: ${{ env.AWS_PRIVATE_ECR_REGION }} + + - name: Log in to AWS private ECR + uses: docker/login-action@v3 + with: + registry: ${{ env.PRIVATE_REGISTRY }} + - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -88,10 +101,11 @@ jobs: context: . platforms: linux/amd64,linux/arm64 tags: | - public.ecr.aws/aws-observability/adot-autoinstrumentation-java:v${{ github.event.inputs.version }} + ${{ env.PUBLIC_REPOSITORY }}:v${{ github.event.inputs.version }} + ${{ env.PRIVATE_REPOSITORY }}:v${{ github.event.inputs.version }} - name: Build and Publish release with Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 with: arguments: build final closeAndReleaseSonatypeStagingRepository -Prelease.version=${{ github.event.inputs.version }} --stacktrace env: diff --git a/.github/workflows/stale-bot.yml b/.github/workflows/stale-bot.yml index c75ddae50f..2104ad0b4f 100644 --- a/.github/workflows/stale-bot.yml +++ b/.github/workflows/stale-bot.yml @@ -22,7 +22,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Mark the issues/pr - uses: actions/stale@v8 + uses: actions/stale@v9 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} #Github workflow will add a temporary token when executing the workflow with: diff --git a/Dockerfile b/Dockerfile index a823338eeb..eb4d7476e3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,7 +13,7 @@ # permissions and limitations under the License. # Stage 1: Build the cp-utility binary -FROM rust:1.74 as builder +FROM rust:1.75 as builder WORKDIR /usr/src/cp-utility COPY ./tools/cp-utility . diff --git a/README.md b/README.md index cbd16b1f80..0f515b791f 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ data from a Java application without any code changes. ## Getting Started -Check out the [getting started documentation](https://aws-otel.github.io/docs/getting-started/java-sdk/trace-auto-instr). +Check out the [getting started documentation](https://aws-otel.github.io/docs/getting-started/java-sdk/auto-instr). ## Supported Java libraries and frameworks @@ -45,4 +45,4 @@ In addition to the sample apps in this repository, there are also a set of [stan Please note that as per policy, we're providing support via GitHub on a best effort basis. However, if you have AWS Enterprise Support you can create a ticket and we will provide direct support within the respective SLAs. ## Security issue notifications -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. \ No newline at end of file +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. diff --git a/RELEASING.md b/RELEASING.md index 2a33ed1afe..7d08b2806c 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -61,10 +61,10 @@ Select the branch and provide the version. If you need to patch upstream dependencies, you need: -* Provide patch files for each repository that will need to be patched. These files should be located in `.github/patches/release/v..x` and should be named +* Provide patch files for each repository that will need to be patched. These files should be located in `.github/patches` and should be named using the convention `.patch`. The following repositories are supported: opentelemetry-java, opentelemetry-java-instrumentation and opentelemetry-java-contrib. Provide one patch file per repository. The adot patch version of each upstream dependency should be `-adot` where `version` is the version of the upstream dependency and `number` is the number of this patch that should be incremented from 1 per patch version. -* Create a `versions` file in the directory `.github/patches/release/v..x`. This file should contain shell variables with the versions of the tags of the repositories which will receive patches. +* Create a `versions` file in the directory `.github/patches/release`. This file should contain shell variables with the versions of the tags of the repositories which will receive patches. This file should define the following variables: * `OTEL_JAVA_VERSION`. Tag of the opentelemetry-java repository to use. E.g.: `JAVA_OTEL_JAVA_VERSION=v1.21.0` * `OTEL_JAVA_INSTRUMENTATION_VERSION`. Tag of the opentelemetry-java-instrumentation repository to use, e.g.: `OTEL_JAVA_INSTRUMENTATION_VERSION=v1.21.0` diff --git a/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/awssdk/base/AwsSdkBaseTest.java b/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/awssdk/base/AwsSdkBaseTest.java index d304f94eef..9cb019509c 100644 --- a/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/awssdk/base/AwsSdkBaseTest.java +++ b/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/awssdk/base/AwsSdkBaseTest.java @@ -163,6 +163,7 @@ private void assertSemanticConventionsAttributes( assertThat(attributesList) .satisfiesOnlyOnce(assertAttribute(SemanticConventionsConstants.RPC_METHOD, method)) .satisfiesOnlyOnce(assertAttribute(SemanticConventionsConstants.RPC_SERVICE, service)) + .satisfiesOnlyOnce(assertAttribute(SemanticConventionsConstants.RPC_SYSTEM, "aws-api")) .satisfiesOnlyOnce(assertAttribute(SemanticConventionsConstants.NET_PEER_NAME, peerName)) .satisfiesOnlyOnce(assertAttribute(SemanticConventionsConstants.NET_PEER_PORT, peerPort)) .satisfiesOnlyOnce( @@ -182,6 +183,7 @@ private void assertSemanticConventionsSqsConsumerAttributes( assertThat(attributesList) .satisfiesOnlyOnce(assertAttribute(SemanticConventionsConstants.RPC_METHOD, method)) .satisfiesOnlyOnce(assertAttribute(SemanticConventionsConstants.RPC_SERVICE, service)) + .satisfiesOnlyOnce(assertAttribute(SemanticConventionsConstants.RPC_SYSTEM, "aws-api")) .satisfiesOnlyOnce(assertAttribute(SemanticConventionsConstants.NET_PEER_NAME, peerName)) .satisfiesOnlyOnce(assertAttribute(SemanticConventionsConstants.NET_PEER_PORT, peerPort)) .satisfiesOnlyOnce(assertAttributeStartsWith(SemanticConventionsConstants.HTTP_URL, url)) @@ -474,7 +476,7 @@ protected void doTestS3CreateBucket() throws Exception { var localService = getApplicationOtelServiceName(); var localOperation = "GET /s3/createbucket/:bucketname"; - var target = "create-bucket"; + var target = "::s3:::create-bucket"; assertSpanClientAttributes( traces, @@ -532,7 +534,7 @@ protected void doTestS3CreateObject() throws Exception { var localService = getApplicationOtelServiceName(); var localOperation = "GET /s3/createobject/:bucketname/:objectname"; - var target = "put-object"; + var target = "::s3:::put-object"; assertSpanClientAttributes( traces, @@ -589,7 +591,7 @@ protected void doTestS3GetObject() throws Exception { var localService = getApplicationOtelServiceName(); var localOperation = "GET /s3/getobject/:bucketName/:objectname"; - var target = "get-object"; + var target = "::s3:::get-object"; assertSpanClientAttributes( traces, @@ -646,7 +648,7 @@ protected void doTestS3Error() { var localService = getApplicationOtelServiceName(); var localOperation = "GET /s3/error"; - var target = "error-bucket"; + var target = "::s3:::error-bucket"; assertSpanClientAttributes( traces, @@ -703,7 +705,7 @@ protected void doTestS3Fault() { var localService = getApplicationOtelServiceName(); var localOperation = "GET /s3/fault"; - var target = "fault-bucket"; + var target = "::s3:::fault-bucket"; assertSpanClientAttributes( traces, @@ -768,7 +770,7 @@ protected void doTestDynamoDbCreateTable() { var localService = getApplicationOtelServiceName(); var localOperation = "GET /ddb/createtable/:tablename"; - var target = "some-table"; + var target = "::dynamodb:::table/some-table"; assertSpanClientAttributes( traces, @@ -825,7 +827,7 @@ protected void doTestDynamoDbPutItem() { var localService = getApplicationOtelServiceName(); var localOperation = "GET /ddb/putitem/:tablename/:partitionkey"; - var target = "putitem-table"; + var target = "::dynamodb:::table/putitem-table"; assertSpanClientAttributes( traces, @@ -882,7 +884,7 @@ protected void doTestDynamoDbError() throws Exception { var localService = getApplicationOtelServiceName(); var localOperation = "GET /ddb/error"; - var target = "nonexistanttable"; + var target = "::dynamodb:::table/nonexistanttable"; assertSpanClientAttributes( traces, @@ -945,7 +947,7 @@ protected void doTestDynamoDbFault() throws Exception { var localService = getApplicationOtelServiceName(); var localOperation = "GET /ddb/fault"; - var target = "nonexistanttable"; + var target = "::dynamodb:::table/nonexistanttable"; assertSpanClientAttributes( traces, @@ -1002,7 +1004,7 @@ protected void doTestSQSCreateQueue() throws Exception { var localService = getApplicationOtelServiceName(); var localOperation = "GET /sqs/createqueue/:queuename"; - var target = "some-queue"; + var target = "::sqs:::some-queue"; assertSpanClientAttributes( traces, @@ -1289,7 +1291,7 @@ protected void doTestKinesisPutRecord() throws Exception { var localService = getApplicationOtelServiceName(); var localOperation = "GET /kinesis/putrecord/:streamname"; - var target = "my-stream"; + var target = "::kinesis:::stream/my-stream"; assertSpanClientAttributes( traces, @@ -1346,7 +1348,7 @@ protected void doTestKinesisError() throws Exception { var localService = getApplicationOtelServiceName(); var localOperation = "GET /kinesis/error"; - var target = "nonexistantstream"; + var target = "::kinesis:::stream/nonexistantstream"; assertSpanClientAttributes( traces, @@ -1404,7 +1406,7 @@ protected void doTestKinesisFault() throws Exception { var localService = getApplicationOtelServiceName(); var localOperation = "GET /kinesis/fault"; - var target = "faultstream"; + var target = "::kinesis:::stream/faultstream"; assertSpanClientAttributes( traces, diff --git a/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/awssdk/v1/AwsSdkV1Test.java b/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/awssdk/v1/AwsSdkV1Test.java index d33d17ae75..1730b83a5c 100644 --- a/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/awssdk/v1/AwsSdkV1Test.java +++ b/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/awssdk/v1/AwsSdkV1Test.java @@ -59,22 +59,22 @@ protected String getKinesisSpanNamePrefix() { @Override protected String getS3ServiceName() { - return "Amazon S3"; + return "AWS.SDK.Amazon S3"; } @Override protected String getDynamoDbServiceName() { - return "AmazonDynamoDBv2"; + return "AWS.SDK.AmazonDynamoDBv2"; } @Override protected String getSqsServiceName() { - return "AmazonSQS"; + return "AWS.SDK.AmazonSQS"; } @Override protected String getKinesisServiceName() { - return "AmazonKinesis"; + return "AWS.SDK.AmazonKinesis"; } protected String getS3RpcServiceName() { diff --git a/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/base/ContractTestBase.java b/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/base/ContractTestBase.java index 14f7cdaaf0..5e81f7ddce 100644 --- a/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/base/ContractTestBase.java +++ b/appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/base/ContractTestBase.java @@ -78,11 +78,11 @@ public abstract class ContractTestBase { .waitingFor(getApplicationWaitCondition()) .withEnv("JAVA_TOOL_OPTIONS", "-javaagent:/opentelemetry-javaagent-all.jar") .withEnv("OTEL_METRIC_EXPORT_INTERVAL", "100") // 100 ms - .withEnv("OTEL_SMP_ENABLED", "true") + .withEnv("OTEL_AWS_APP_SIGNALS_ENABLED", "true") .withEnv("OTEL_METRICS_EXPORTER", "none") .withEnv("OTEL_BSP_SCHEDULE_DELAY", "0") // Don't wait to export spans to the collector .withEnv( - "OTEL_AWS_SMP_EXPORTER_ENDPOINT", + "OTEL_AWS_APP_SIGNALS_EXPORTER_ENDPOINT", "http://" + COLLECTOR_HOSTNAME + ":" + COLLECTOR_PORT) .withEnv( "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT", diff --git a/appsignals-tests/images/kafka/kafka-consumers/build.gradle.kts b/appsignals-tests/images/kafka/kafka-consumers/build.gradle.kts index 045271e337..29fc199788 100644 --- a/appsignals-tests/images/kafka/kafka-consumers/build.gradle.kts +++ b/appsignals-tests/images/kafka/kafka-consumers/build.gradle.kts @@ -27,11 +27,11 @@ java { dependencies { implementation("com.sparkjava:spark-core") - implementation("org.apache.kafka:kafka-clients:3.6.0") + implementation("org.apache.kafka:kafka-clients:3.6.1") implementation("org.slf4j:slf4j-api:2.0.9") implementation("org.slf4j:slf4j-simple:2.0.9") testImplementation("org.junit.jupiter:junit-jupiter-api:5.9.2") - testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:5.9.2") + testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:5.10.1") } tasks.getByName("test") { diff --git a/appsignals-tests/images/kafka/kafka-producers/build.gradle.kts b/appsignals-tests/images/kafka/kafka-producers/build.gradle.kts index fe4c99c585..ffc26bb0c2 100644 --- a/appsignals-tests/images/kafka/kafka-producers/build.gradle.kts +++ b/appsignals-tests/images/kafka/kafka-producers/build.gradle.kts @@ -27,11 +27,11 @@ java { dependencies { implementation("com.sparkjava:spark-core") - implementation("org.apache.kafka:kafka-clients:3.6.0") + implementation("org.apache.kafka:kafka-clients:3.6.1") implementation("org.slf4j:slf4j-api:2.0.9") implementation("org.slf4j:slf4j-simple:2.0.9") testImplementation("org.junit.jupiter:junit-jupiter-api:5.9.2") - testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:5.9.2") + testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:5.10.1") } tasks.getByName("test") { diff --git a/awsagentprovider/build.gradle.kts b/awsagentprovider/build.gradle.kts index 459343f78e..5b99011ab7 100644 --- a/awsagentprovider/build.gradle.kts +++ b/awsagentprovider/build.gradle.kts @@ -34,6 +34,8 @@ dependencies { implementation("io.opentelemetry.contrib:opentelemetry-aws-xray") // AWS Resource Detectors implementation("io.opentelemetry.contrib:opentelemetry-aws-resources") + // Json file reader + implementation("com.fasterxml.jackson.core:jackson-databind:2.16.1") // Export configuration compileOnly("io.opentelemetry:opentelemetry-exporter-otlp") @@ -45,7 +47,7 @@ dependencies { compileOnly("com.google.code.findbugs:jsr305:3.0.2") testImplementation("org.mockito:mockito-core:5.3.1") - testImplementation("org.mockito:mockito-junit-jupiter:5.3.1") + testImplementation("org.mockito:mockito-junit-jupiter:5.8.0") } tasks { diff --git a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAppSignalsCustomizerProvider.java b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAppSignalsCustomizerProvider.java index 2c2aae6db0..aa6c993506 100644 --- a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAppSignalsCustomizerProvider.java +++ b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAppSignalsCustomizerProvider.java @@ -18,10 +18,13 @@ import io.opentelemetry.api.metrics.MeterProvider; import io.opentelemetry.contrib.awsxray.AlwaysRecordSampler; import io.opentelemetry.contrib.awsxray.ResourceHolder; +import io.opentelemetry.exporter.otlp.http.metrics.OtlpHttpMetricExporter; +import io.opentelemetry.exporter.otlp.internal.OtlpConfigUtil; import io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter; import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizer; import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizerProvider; import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; +import io.opentelemetry.sdk.autoconfigure.spi.ConfigurationException; import io.opentelemetry.sdk.metrics.Aggregation; import io.opentelemetry.sdk.metrics.InstrumentType; import io.opentelemetry.sdk.metrics.SdkMeterProvider; @@ -48,8 +51,9 @@ *
  • Add AwsMetricAttributesSpanExporter to add more attributes to all spans. * * - *

    You can control when these customizations are applied using the property otel.smp.enabled or - * the environment variable OTEL_SMP_ENABLED. This flag is enabled by default. + *

    You can control when these customizations are applied using the property + * otel.aws.app.signals.enabled or the environment variable OTEL_AWS_APP_SIGNALS_ENABLED. This flag + * is disabled by default. */ public class AwsAppSignalsCustomizerProvider implements AutoConfigurationCustomizerProvider { private static final Duration DEFAULT_METRIC_EXPORT_INTERVAL = Duration.ofMinutes(1); @@ -62,12 +66,13 @@ public void customize(AutoConfigurationCustomizer autoConfiguration) { autoConfiguration.addSpanExporterCustomizer(this::customizeSpanExporter); } - private boolean isSmpEnabled(ConfigProperties configProps) { - return configProps.getBoolean("otel.smp.enabled", false); + private boolean isAppSignalsEnabled(ConfigProperties configProps) { + return configProps.getBoolean( + "otel.aws.app.signals.enabled", configProps.getBoolean("otel.smp.enabled", false)); } private Sampler customizeSampler(Sampler sampler, ConfigProperties configProps) { - if (isSmpEnabled(configProps)) { + if (isAppSignalsEnabled(configProps)) { return AlwaysRecordSampler.create(sampler); } return sampler; @@ -75,15 +80,12 @@ private Sampler customizeSampler(Sampler sampler, ConfigProperties configProps) private SdkTracerProviderBuilder customizeTracerProviderBuilder( SdkTracerProviderBuilder tracerProviderBuilder, ConfigProperties configProps) { - if (isSmpEnabled(configProps)) { - logger.info("Span Metrics Processor enabled"); - String smpEndpoint = - configProps.getString( - "otel.aws.smp.exporter.endpoint", "http://cloudwatch-agent.amazon-cloudwatch:4317"); + if (isAppSignalsEnabled(configProps)) { + logger.info("AWS AppSignals enabled"); Duration exportInterval = configProps.getDuration("otel.metric.export.interval", DEFAULT_METRIC_EXPORT_INTERVAL); - logger.log(Level.FINE, String.format("Span Metrics endpoint: %s", smpEndpoint)); - logger.log(Level.FINE, String.format("Span Metrics export interval: %s", exportInterval)); + logger.log( + Level.FINE, String.format("AppSignals Metrics export interval: %s", exportInterval)); // Cap export interval to 60 seconds. This is currently required for metrics-trace correlation // to work correctly. if (exportInterval.compareTo(DEFAULT_METRIC_EXPORT_INTERVAL) > 0) { @@ -97,17 +99,8 @@ private SdkTracerProviderBuilder customizeTracerProviderBuilder( AttributePropagatingSpanProcessorBuilder.create().build()); // Construct meterProvider MetricExporter metricsExporter = - OtlpGrpcMetricExporter.builder() - .setEndpoint(smpEndpoint) - .setDefaultAggregationSelector( - instrumentType -> { - if (instrumentType == InstrumentType.HISTOGRAM) { - return Aggregation.base2ExponentialBucketHistogram(); - } - return Aggregation.defaultAggregation(); - }) - .setAggregationTemporalitySelector(AggregationTemporalitySelector.deltaPreferred()) - .build(); + AppSignalsExporterProvider.INSTANCE.createExporter(configProps); + MetricReader metricReader = PeriodicMetricReader.builder(metricsExporter).setInterval(exportInterval).build(); @@ -116,7 +109,7 @@ private SdkTracerProviderBuilder customizeTracerProviderBuilder( .setResource(ResourceHolder.getResource()) .registerMetricReader(metricReader) .build(); - // Construct and set span metrics processor + // Construct and set AppSignals metrics processor SpanProcessor spanMetricsProcessor = AwsSpanMetricsProcessorBuilder.create(meterProvider, ResourceHolder.getResource()) .build(); @@ -127,7 +120,7 @@ private SdkTracerProviderBuilder customizeTracerProviderBuilder( private SpanExporter customizeSpanExporter( SpanExporter spanExporter, ConfigProperties configProps) { - if (isSmpEnabled(configProps)) { + if (isAppSignalsEnabled(configProps)) { return AwsMetricAttributesSpanExporterBuilder.create( spanExporter, ResourceHolder.getResource()) .build(); @@ -135,4 +128,42 @@ private SpanExporter customizeSpanExporter( return spanExporter; } + + private enum AppSignalsExporterProvider { + INSTANCE; + + public MetricExporter createExporter(ConfigProperties configProps) { + String protocol = + OtlpConfigUtil.getOtlpProtocol(OtlpConfigUtil.DATA_TYPE_METRICS, configProps); + logger.log(Level.FINE, String.format("AppSignals export protocol: %s", protocol)); + + String appSignalsEndpoint = + configProps.getString( + "otel.aws.app.signals.exporter.endpoint", + configProps.getString("otel.aws.smp.exporter.endpoint", "http://localhost:4315")); + logger.log(Level.FINE, String.format("AppSignals export endpoint: %s", appSignalsEndpoint)); + + if (protocol.equals(OtlpConfigUtil.PROTOCOL_HTTP_PROTOBUF)) { + return OtlpHttpMetricExporter.builder() + .setEndpoint(appSignalsEndpoint) + .setDefaultAggregationSelector(this::getAggregation) + .setAggregationTemporalitySelector(AggregationTemporalitySelector.deltaPreferred()) + .build(); + } else if (protocol.equals(OtlpConfigUtil.PROTOCOL_GRPC)) { + return OtlpGrpcMetricExporter.builder() + .setEndpoint(appSignalsEndpoint) + .setDefaultAggregationSelector(this::getAggregation) + .setAggregationTemporalitySelector(AggregationTemporalitySelector.deltaPreferred()) + .build(); + } + throw new ConfigurationException("Unsupported AppSignals export protocol: " + protocol); + } + + private Aggregation getAggregation(InstrumentType instrumentType) { + if (instrumentType == InstrumentType.HISTOGRAM) { + return Aggregation.base2ExponentialBucketHistogram(); + } + return Aggregation.defaultAggregation(); + } + } } diff --git a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAttributeKeys.java b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAttributeKeys.java index 6c6debf01a..1126dabae5 100644 --- a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAttributeKeys.java +++ b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAttributeKeys.java @@ -46,6 +46,7 @@ private AwsAttributeKeys() {} // https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/8710 static final AttributeKey AWS_BUCKET_NAME = AttributeKey.stringKey("aws.bucket.name"); + static final AttributeKey AWS_QUEUE_URL = AttributeKey.stringKey("aws.queue.url"); static final AttributeKey AWS_QUEUE_NAME = AttributeKey.stringKey("aws.queue.name"); static final AttributeKey AWS_STREAM_NAME = AttributeKey.stringKey("aws.stream.name"); static final AttributeKey AWS_TABLE_NAME = AttributeKey.stringKey("aws.table.name"); diff --git a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsMetricAttributeGenerator.java b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsMetricAttributeGenerator.java index 4229f33f59..ad0c97f97b 100644 --- a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsMetricAttributeGenerator.java +++ b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsMetricAttributeGenerator.java @@ -17,6 +17,7 @@ import static io.opentelemetry.semconv.ResourceAttributes.SERVICE_NAME; import static io.opentelemetry.semconv.SemanticAttributes.DB_OPERATION; +import static io.opentelemetry.semconv.SemanticAttributes.DB_STATEMENT; import static io.opentelemetry.semconv.SemanticAttributes.DB_SYSTEM; import static io.opentelemetry.semconv.SemanticAttributes.FAAS_INVOKED_NAME; import static io.opentelemetry.semconv.SemanticAttributes.FAAS_TRIGGER; @@ -37,12 +38,15 @@ import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_LOCAL_OPERATION; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_LOCAL_SERVICE; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_QUEUE_NAME; +import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_QUEUE_URL; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_REMOTE_OPERATION; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_REMOTE_SERVICE; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_REMOTE_TARGET; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_SPAN_KIND; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_STREAM_NAME; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_TABLE_NAME; +import static software.amazon.opentelemetry.javaagent.providers.AwsSpanProcessingUtil.MAX_KEYWORD_LENGTH; +import static software.amazon.opentelemetry.javaagent.providers.AwsSpanProcessingUtil.SQL_DIALECT_PATTERN; import static software.amazon.opentelemetry.javaagent.providers.AwsSpanProcessingUtil.UNKNOWN_OPERATION; import static software.amazon.opentelemetry.javaagent.providers.AwsSpanProcessingUtil.UNKNOWN_REMOTE_OPERATION; import static software.amazon.opentelemetry.javaagent.providers.AwsSpanProcessingUtil.UNKNOWN_REMOTE_SERVICE; @@ -67,6 +71,7 @@ import java.util.Optional; import java.util.logging.Level; import java.util.logging.Logger; +import java.util.regex.Matcher; import javax.annotation.Nullable; /** @@ -144,13 +149,27 @@ private static void setRemoteTarget(SpanData span, AttributesBuilder builder) { */ private static Optional getRemoteTarget(SpanData span) { if (isKeyPresent(span, AWS_BUCKET_NAME)) { - return Optional.ofNullable(span.getAttributes().get(AWS_BUCKET_NAME)); - } else if (isKeyPresent(span, AWS_QUEUE_NAME)) { - return Optional.ofNullable(span.getAttributes().get(AWS_QUEUE_NAME)); - } else if (isKeyPresent(span, AWS_STREAM_NAME)) { - return Optional.ofNullable(span.getAttributes().get(AWS_STREAM_NAME)); - } else if (isKeyPresent(span, AWS_TABLE_NAME)) { - return Optional.ofNullable(span.getAttributes().get(AWS_TABLE_NAME)); + return Optional.ofNullable("::s3:::" + span.getAttributes().get(AWS_BUCKET_NAME)); + } + + if (isKeyPresent(span, AWS_QUEUE_URL)) { + String arn = SqsUrlParser.getSqsRemoteTarget(span.getAttributes().get(AWS_QUEUE_URL)); + + if (arn != null) { + return Optional.ofNullable(arn); + } + } + + if (isKeyPresent(span, AWS_QUEUE_NAME)) { + return Optional.ofNullable("::sqs:::" + span.getAttributes().get(AWS_QUEUE_NAME)); + } + + if (isKeyPresent(span, AWS_STREAM_NAME)) { + return Optional.ofNullable("::kinesis:::stream/" + span.getAttributes().get(AWS_STREAM_NAME)); + } + + if (isKeyPresent(span, AWS_TABLE_NAME)) { + return Optional.ofNullable("::dynamodb:::table/" + span.getAttributes().get(AWS_TABLE_NAME)); } return Optional.empty(); } @@ -197,10 +216,7 @@ private static void setEgressOperation(SpanData span, AttributesBuilder builder) // add `AWS.SDK.` as prefix to indicate the metrics resulted from current span is from AWS SDK private static String normalizeServiceName(SpanData span, String serviceName) { if (AwsSpanProcessingUtil.isAwsSDKSpan(span)) { - String scopeName = span.getInstrumentationScopeInfo().getName(); - if (scopeName.contains("aws-sdk-2.")) { - return "AWS.SDK." + serviceName; - } + return "AWS.SDK." + serviceName; } return serviceName; } @@ -238,9 +254,9 @@ private static String normalizeServiceName(SpanData span, String serviceName) { * * * if the selected attributes are still producing the UnknownRemoteService or - * UnknownRemoteOperation, `net.peer.name`, `net.peer.port`, `net.peer.sock.addr` and - * `net.peer.sock.port` will be used to derive the RemoteService. And `http.method` and `http.url` - * will be used to derive the RemoteOperation. + * UnknownRemoteOperation, `net.peer.name`, `net.peer.port`, `net.peer.sock.addr`, + * `net.peer.sock.port` and `http.url` will be used to derive the RemoteService. And `http.method` + * and `http.url` will be used to derive the RemoteOperation. */ private static void setRemoteServiceAndOperation(SpanData span, AttributesBuilder builder) { String remoteService = UNKNOWN_REMOTE_SERVICE; @@ -251,9 +267,15 @@ private static void setRemoteServiceAndOperation(SpanData span, AttributesBuilde } else if (isKeyPresent(span, RPC_SERVICE) || isKeyPresent(span, RPC_METHOD)) { remoteService = normalizeServiceName(span, getRemoteService(span, RPC_SERVICE)); remoteOperation = getRemoteOperation(span, RPC_METHOD); - } else if (isKeyPresent(span, DB_SYSTEM) || isKeyPresent(span, DB_OPERATION)) { + } else if (isKeyPresent(span, DB_SYSTEM) + || isKeyPresent(span, DB_OPERATION) + || isKeyPresent(span, DB_STATEMENT)) { remoteService = getRemoteService(span, DB_SYSTEM); - remoteOperation = getRemoteOperation(span, DB_OPERATION); + if (isKeyPresent(span, DB_OPERATION)) { + remoteOperation = getRemoteOperation(span, DB_OPERATION); + } else { + remoteOperation = getDBStatementRemoteOperation(span, DB_STATEMENT); + } } else if (isKeyPresent(span, FAAS_INVOKED_NAME) || isKeyPresent(span, FAAS_TRIGGER)) { remoteService = getRemoteService(span, FAAS_INVOKED_NAME); remoteOperation = getRemoteOperation(span, FAAS_TRIGGER); @@ -324,6 +346,19 @@ private static String generateRemoteService(SpanData span) { Long port = span.getAttributes().get(NET_SOCK_PEER_PORT); remoteService += ":" + port; } + } else if (isKeyPresent(span, HTTP_URL)) { + String httpUrl = span.getAttributes().get(HTTP_URL); + try { + URL url = new URL(httpUrl); + if (!url.getHost().isEmpty()) { + remoteService = url.getHost(); + if (url.getPort() != -1) { + remoteService += ":" + url.getPort(); + } + } + } catch (MalformedURLException e) { + logger.log(Level.FINEST, "invalid http.url attribute: ", httpUrl); + } } else { logUnknownAttribute(AWS_REMOTE_SERVICE, span); } @@ -421,6 +456,36 @@ private static String getRemoteOperation(SpanData span, AttributeKey rem return remoteOperation; } + /** + * If no db.operation attribute provided in the span, we use db.statement to compute a valid + * remote operation in a best-effort manner. To do this, we take the first substring of the + * statement and compare to a regex list of known SQL keywords. The substring length is determined + * by the longest known SQL keywords. + */ + private static String getDBStatementRemoteOperation( + SpanData span, AttributeKey remoteOperationKey) { + String remoteOperation = span.getAttributes().get(remoteOperationKey); + if (remoteOperation == null) { + remoteOperation = UNKNOWN_REMOTE_OPERATION; + } + + // Remove all whitespace and newline characters from the beginning of remote_operation + // and retrieve the first MAX_KEYWORD_LENGTH characters + remoteOperation = remoteOperation.stripLeading(); + if (remoteOperation.length() > MAX_KEYWORD_LENGTH) { + remoteOperation = remoteOperation.substring(0, MAX_KEYWORD_LENGTH); + } + + Matcher matcher = SQL_DIALECT_PATTERN.matcher(remoteOperation.toUpperCase()); + if (matcher.find() && !matcher.group(0).isEmpty()) { + remoteOperation = matcher.group(0); + } else { + remoteOperation = UNKNOWN_REMOTE_OPERATION; + } + + return remoteOperation; + } + private static void logUnknownAttribute(AttributeKey attributeKey, SpanData span) { String[] params = { attributeKey.getKey(), span.getKind().name(), span.getSpanContext().getSpanId() diff --git a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanProcessingUtil.java b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanProcessingUtil.java index 119e3fc772..a41ca5984e 100644 --- a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanProcessingUtil.java +++ b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanProcessingUtil.java @@ -22,11 +22,20 @@ import static io.opentelemetry.semconv.SemanticAttributes.RPC_SYSTEM; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_LOCAL_OPERATION; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectReader; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.SpanContext; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.sdk.common.InstrumentationScopeInfo; import io.opentelemetry.sdk.trace.data.SpanData; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Pattern; /** Utility class designed to support shared logic across AWS Span Processors. */ final class AwsSpanProcessingUtil { @@ -40,6 +49,29 @@ final class AwsSpanProcessingUtil { static final String LOCAL_ROOT = "LOCAL_ROOT"; static final String SQS_RECEIVE_MESSAGE_SPAN_NAME = "Sqs.ReceiveMessage"; static final String AWS_SDK_INSTRUMENTATION_SCOPE_PREFIX = "io.opentelemetry.aws-sdk-"; + // Max keyword length supported by parsing into remote_operation from DB_STATEMENT. + // The current longest command word is DATETIME_INTERVAL_PRECISION at 27 characters. + // If we add a longer keyword to the sql dialect keyword list, need to update the constant below. + static final int MAX_KEYWORD_LENGTH = 27; + static final Pattern SQL_DIALECT_PATTERN = + Pattern.compile("^(?:" + String.join("|", getDialectKeywords()) + ")\\b"); + + private static final String SQL_DIALECT_KEYWORDS_JSON = "configuration/sql_dialect_keywords.json"; + + static List getDialectKeywords() { + try (InputStream jsonFile = + AwsSpanProcessingUtil.class + .getClassLoader() + .getResourceAsStream(SQL_DIALECT_KEYWORDS_JSON)) { + ObjectMapper mapper = new ObjectMapper(); + JsonNode jsonNode = mapper.readValue(jsonFile, JsonNode.class); + JsonNode arrayNode = jsonNode.get("keywords"); + ObjectReader reader = mapper.readerFor(new TypeReference>() {}); + return reader.readValue(arrayNode); + } catch (IOException e) { + return new ArrayList<>(); + } + } /** * Ingress operation (i.e. operation for Server and Consumer spans) will be generated from diff --git a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/SqsUrlParser.java b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/SqsUrlParser.java new file mode 100644 index 0000000000..7620bf88dd --- /dev/null +++ b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/SqsUrlParser.java @@ -0,0 +1,189 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.opentelemetry.javaagent.providers; + +public class SqsUrlParser { + private static final char ARN_DELIMETER = ':'; + private static final String HTTP_SCHEMA = "http://"; + private static final String HTTPS_SCHEMA = "https://"; + + public static String getSqsRemoteTarget(String sqsUrl) { + sqsUrl = stripSchemaFromUrl(sqsUrl); + + if (!isSqsUrl(sqsUrl) && !isLegacySqsUrl(sqsUrl) && !isCustomUrl(sqsUrl)) { + return null; + } + + String region = getRegion(sqsUrl); + String accountId = getAccountId(sqsUrl); + String partition = getPartition(sqsUrl); + String queueName = getQueueName(sqsUrl); + + StringBuilder remoteTarget = new StringBuilder(); + + if (region == null && accountId == null && partition == null && queueName == null) { + return null; + } + + if (region != null && accountId != null && partition != null && queueName != null) { + remoteTarget.append("arn"); + } + + remoteTarget + .append(ARN_DELIMETER) + .append(nullToEmpty(partition)) + .append(ARN_DELIMETER) + .append("sqs") + .append(ARN_DELIMETER) + .append(nullToEmpty(region)) + .append(ARN_DELIMETER) + .append(nullToEmpty(accountId)) + .append(ARN_DELIMETER) + .append(queueName); + + return remoteTarget.toString(); + } + + private static String stripSchemaFromUrl(String url) { + return url.replace(HTTP_SCHEMA, "").replace(HTTPS_SCHEMA, ""); + } + + private static String getRegion(String sqsUrl) { + if (sqsUrl == null) { + return null; + } + + if (sqsUrl.startsWith("queue.amazonaws.com/")) { + return "us-east-1"; + } else if (isSqsUrl(sqsUrl)) { + return getRegionFromSqsUrl(sqsUrl); + } else if (isLegacySqsUrl(sqsUrl)) { + return getRegionFromLegacySqsUrl(sqsUrl); + } else { + return null; + } + } + + private static boolean isSqsUrl(String sqsUrl) { + String[] split = sqsUrl.split("/"); + + return split.length == 3 + && split[0].startsWith("sqs.") + && split[0].endsWith(".amazonaws.com") + && isAccountId(split[1]) + && isValidQueueName(split[2]); + } + + private static boolean isLegacySqsUrl(String sqsUrl) { + String[] split = sqsUrl.split("/"); + + return split.length == 3 + && split[0].endsWith(".queue.amazonaws.com") + && isAccountId(split[1]) + && isValidQueueName(split[2]); + } + + private static boolean isCustomUrl(String sqsUrl) { + String[] split = sqsUrl.split("/"); + return split.length == 3 && isAccountId(split[1]) && isValidQueueName(split[2]); + } + + private static boolean isValidQueueName(String input) { + if (input.length() == 0 || input.length() > 80) { + return false; + } + + for (Character c : input.toCharArray()) { + if (c != '_' && c != '-' && !Character.isAlphabetic(c) && !Character.isDigit(c)) { + return false; + } + } + + return true; + } + + private static boolean isAccountId(String input) { + if (input.length() != 12) { + return false; + } + + try { + Long.valueOf(input); + } catch (Exception e) { + return false; + } + + return true; + } + + private static String getRegionFromSqsUrl(String sqsUrl) { + String[] split = sqsUrl.split("\\."); + + if (split.length >= 2) { + return split[1]; + } + + return null; + } + + private static String getRegionFromLegacySqsUrl(String sqsUrl) { + String[] split = sqsUrl.split("\\."); + return split[0]; + } + + private static String getAccountId(String sqsUrl) { + if (sqsUrl == null) { + return null; + } + + String[] split = sqsUrl.split("/"); + if (split.length >= 2) { + return split[1]; + } + + return null; + } + + private static String getPartition(String sqsUrl) { + String region = getRegion(sqsUrl); + + if (region == null) { + return null; + } + + if (region.startsWith("us-gov-")) { + return "aws-us-gov"; + } else if (region.startsWith("cn-")) { + return "aws-cn"; + } else { + return "aws"; + } + } + + private static String getQueueName(String sqsUrl) { + String[] split = sqsUrl.split("/"); + + if (split.length >= 3) { + return split[2]; + } + + return null; + } + + private static String nullToEmpty(String input) { + return input == null ? "" : input; + } +} diff --git a/awsagentprovider/src/main/resources/configuration/sql_dialect_keywords.json b/awsagentprovider/src/main/resources/configuration/sql_dialect_keywords.json new file mode 100644 index 0000000000..cabebe3430 --- /dev/null +++ b/awsagentprovider/src/main/resources/configuration/sql_dialect_keywords.json @@ -0,0 +1,818 @@ +{ + "top_comments": " The keywords are sorted based on descending order of the length of the keyword characters.This list was generated by combining keywords from various SQL systems: SQL Server, PostgreSQL, SQLite, Oracle.", + "keywords": [ + "DATETIME_INTERVAL_PRECISION", + "PARAMETER_SPECIFIC_CATALOG", + "PARAMETER_ORDINAL_POSITION", + "USER_DEFINED_TYPE_CATALOG", + "PARAMETER_SPECIFIC_SCHEMA", + "TRANSACTIONS_ROLLED_BACK", + "USER_DEFINED_TYPE_SCHEMA", + "PARAMETER_SPECIFIC_NAME", + "USER_DEFINED_TYPE_NAME", + "TRANSACTIONS_COMMITTED", + "DATETIME_INTERVAL_CODE", + "RETURNED_OCTET_LENGTH", + "COMMAND_FUNCTION_CODE", + "CHARACTER_SET_CATALOG", + "DYNAMIC_FUNCTION_CODE", + "MESSAGE_OCTET_LENGTH", + "CHARACTER_SET_SCHEMA", + "INSERT INTO SELECT", + "CHARACTER_SET_NAME", + "CONSTRAINT_CATALOG", + "CHARACTER VARYING", + "RETURNED_SQLSTATE", + "TRANSATION_ACTIVE", + "CURRENT_TIMESTAMP", + "CONSTRAINT_SCHEMA", + "COLLATION_CATALOG", + "DOUBLE PRECISION", + "DYNAMIC_FUNCTION", + "CONDITION_NUMBER", + "COMMAND_FUNCTION", + "CHARACTER_LENGTH", + "COLLATION_SCHEMA", + "FULL OUTER JOIN", + "DROP CONSTRAINT", + "BACKUP DATABASE", + "CREATE DATABASE", + "SELECT DISTINCT", + "TIMEZONE_MINUTE", + "ROUTINE_CATALOG", + "TRIGGER_CATALOG", + "SUBCLASS_ORIGIN", + "MIN_PART_STRING", + "CONNECTION_NAME", + "PARSE_URL_TUPLE", + "MAX_PART_STRING", + "RETURNED_LENGTH", + "CHARACTERISTICS", + "CONSTRAINT_NAME", + "TRUNCATE TABLE", + "PARAMETER_NAME", + "UNIX_TIMESTAMP", + "ARRAY_CONTAINS", + "MESSAGE_LENGTH", + "LOCALTIMESTAMP", + "TRIGGER_SCHEMA", + "ROUTINE_SCHEMA", + "IMPLEMENTATION", + "AUTO_INCREMENT", + "SIMPLE_INTEGER", + "COLLATION_NAME", + "PARAMETER_MODE", + "REGEXP_REPLACE", + "DROP DATABASE", + "STRAIGHT_JOIN", + "SYS_REFCURSOR", + "UTC_TIMESTAMP", + "MIN_PART_DATE", + "DETERMINISTIC", + "TBLPROPERTIES", + "PART_COUNT_BY", + "TIMEZONE_HOUR", + "SIMPLE_DOUBLE", + "MAX_PART_DATE", + "FROM_UNIXTIME", + "SPECIFIC_NAME", + "MAXLOGMEMBERS", + "ON_ERROR_STOP", + "TXID_SNAPSHOT", + "MAXLOGHISTORY", + "CORRESPONDING", + "AUTHORIZATION", + "TIMESTAMP_ISO", + "CREATE INDEX", + "DROP DEFAULT", + "CREATE TABLE", + "ALTER COLUMN", + "CURRENT_ROLE", + "MAXINSTANCES", + "CONCURRENTLY", + "ROUTINE_NAME", + "MIN_PART_INT", + "SESSION_USER", + "MESSAGE_TEXT", + "INSTANTIABLE", + "MAX_PART_INT", + "STATEMENT_ID", + "OCTET_LENGTH", + "CURRENT_DATE", + "NOARCHIVELOG", + "TO_TIMESTAMP", + "COLLECT_LIST", + "CATALOG_NAME", + "TRIGGER_NAME", + "SIMPLE_FLOAT", + "NOCREATEUSER", + "CURRENT_PATH", + "CLASS_ORIGIN", + "MAXDATAFILES", + "CURRENT_TIME", + "SQLEXCEPTION", + "SERIALIZABLE", + "SPECIFICTYPE", + "CURRENT_USER", + "IS NOT NULL", + "SELECT INTO", + "DROP COLUMN", + "PRIMARY KEY", + "FOREIGN KEY", + "ALTER TABLE", + "BIT VARYING", + "INSERT INTO", + "CREATE VIEW", + "CONTROLFILE", + "SERVER_NAME", + "REFERENCING", + "TRANSLATION", + "DBMS_OUTPUT", + "CARDINALITY", + "UNCOMMITTED", + "CHAR_LENGTH", + "PLS_INTEGER", + "SYSTEM_USER", + "SMALLSERIAL", + "UNENCRYPTED", + "MAXLOGFILES", + "DISTINCTROW", + "CONSTRAINTS", + "PCTINCREASE", + "CURSOR_NAME", + "LANCOMPILER", + "NORESETLOGS", + "COLUMN_NAME", + "INSENSITIVE", + "DIAGNOSTICS", + "SCHEMA_NAME", + "CONSTRUCTOR", + "TRANSACTION", + "OUTER JOIN", + "INNER JOIN", + "DROP TABLE", + "RIGHT JOIN", + "SELECT TOP", + "DROP INDEX", + "POSEXPLODE", + "CONNECTION", + "NOCOMPRESS", + "MINEXTENTS", + "ARCHIVELOG", + "SAVE_POINT", + "MAXEXTENTS", + "SQLWARNING", + "COMPLETION", + "ORDINALITY", + "CHECKPOINT", + "PRIVILEGES", + "ASSIGNMENT", + "ASYMMETRIC", + "TRANSFORMS", + "DELIMITERS", + "PROCEDURAL", + "EXTERNALLY", + "CREATEUSER", + "REFERENCES", + "DESCRIPTOR", + "CONSTRAINT", + "SUCCESSFUL", + "MAXELEMENT", + "CONVERSION", + "TABLE_NAME", + "UNIQUEJOIN", + "DEFERRABLE", + "PARAMETERS", + "NOCREATEDB", + "IDENTIFIED", + "EXCEPTIONS", + "OVERRIDING", + "DEALLOCATE", + "NOMAXVALUE", + "PART_COUNT", + "KEY_MEMBER", + "NOMINVALUE", + "STATISTICS", + "BIT_LENGTH", + "RESTRICTED", + "DICTIONARY", + "INITIALIZE", + "REPEATABLE", + "DISTRIBUTE", + "ASENSITIVE", + "TABLESPACE", + "TRANSATION", + "DESTRUCTOR", + "DISCONNECT", + "MINELEMENT", + "DROP VIEW", + "LEFT JOIN", + "UNION ALL", + "UNBOUNDED", + "PRECISION", + "FILE_TYPE", + "TIMESTAMP", + "CONDITION", + "OPERATION", + "SUBSTRING", + "CHARACTER", + "INCLUDING", + "TRANSFORM", + "SAVEPOINT", + "FOLLOWING", + "UNLIMITED", + "EXCLUDING", + "STRUCTURE", + "PROCEDURE", + "POSITIVEN", + "SYMMETRIC", + "OVERWRITE", + "ISOLATION", + "DELIMITER", + "EXCLUSIVE", + "RETURNING", + "COLLATION", + "FREELISTS", + "VALIDATOR", + "DIRECTORY", + "AGGREGATE", + "LOCALTIME", + "HIERARCHY", + "TEMPORARY", + "EXCEPTION", + "ENCRYPTED", + "ASSERTION", + "COMMITTED", + "STATEMENT", + "INITIALLY", + "PARTITION", + "TERMINATE", + "BIGSERIAL", + "IMMEDIATE", + "RESETLOGS", + "PARAMETER", + "INDICATOR", + "GENERATED", + "RECURSIVE", + "SENSITIVE", + "TRANSLATE", + "INCREMENT", + "IMMUTABLE", + "INTERSECT", + "ROW_COUNT", + "ORDER BY", + "NOT NULL", + "GROUP BY", + "LOCATION", + "TRUNCATE", + "PREORDER", + "TRAILING", + "BACKWARD", + "NVARCHAR", + "OVERLAPS", + "SQLSTATE", + "CREATEDB", + "EXISTING", + "ABSOLUTE", + "GROUPING", + "MAXTRANS", + "FREELIST", + "POSITION", + "DATE_SUB", + "INTERVAL", + "UNSIGNED", + "MAXINDEX", + "DATAFILE", + "DEFERRED", + "TSVECTOR", + "ROWLABEL", + "MININDEX", + "CONFLICT", + "ROLLBACK", + "DISPATCH", + "OPERATOR", + "PARALLEL", + "PUT_LINE", + "VARIABLE", + "CASCADED", + "ENCODING", + "FUNCTION", + "SECURITY", + "EXCHANGE", + "NOTFOUND", + "TEMPLATE", + "CONTAINS", + "IDENTITY", + "NATURALN", + "TRIGGERS", + "SPECIFIC", + "RESIGNAL", + "INITRANS", + "EXTENDED", + "SQLERROR", + "PART_LOC", + "VARCHAR2", + "SIGNTYPE", + "SMALLINT", + "COMPRESS", + "DATABASE", + "ALLOCATE", + "PRESERVE", + "NULLABLE", + "CONTINUE", + "INSTANCE", + "WHENEVER", + "RESTRICT", + "CONTENTS", + "POSITIVE", + "SEQUENCE", + "MAXVALUE", + "ARRAYLEN", + "MODIFIES", + "UNLISTEN", + "RELATIVE", + "LANGUAGE", + "NATIONAL", + "VOLATILE", + "VALIDATE", + "DISMOUNT", + "IMPLICIT", + "DISTINCT", + "COALESCE", + "UTL_FILE", + "ELEMENTS", + "RESOURCE", + "KEY_TYPE", + "EXTERNAL", + "DESCRIBE", + "MINVALUE", + "END-EXEC", + "DATE_ADD", + "INHERITS", + "DEFAULTS", + "SNAPSHOT", + "IS NULL", + "PERFORM", + "LOGFILE", + "DEFAULT", + "FORWARD", + "INTEGER", + "DESTROY", + "FOREACH", + "POSTFIX", + "SYSDATE", + "RECOVER", + "SECTION", + "INDEXES", + "TO_CHAR", + "INITIAL", + "ITERATE", + "OPTIONS", + "PRIVATE", + "ARCHIVE", + "OPTIMAL", + "ROUTINE", + "CHECKED", + "SYNONYM", + "TSQUERY", + "INCLUDE", + "NOORDER", + "INHERIT", + "STORAGE", + "SESSION", + "BREADTH", + "PREPARE", + "PRIMARY", + "GENERAL", + "CLUSTER", + "CONNECT", + "PCTUSED", + "PLACING", + "CURRENT", + "REINDEX", + "EXPLODE", + "TINYINT", + "CATALOG", + "SUBLIST", + "OVERLAY", + "EXECUTE", + "DECIMAL", + "LATERAL", + "UNNAMED", + "PACKAGE", + "DECLARE", + "COMPILE", + "CHARSET", + "PLPGSQL", + "NOTNULL", + "COMMENT", + "POLYGON", + "BETWEEN", + "FOREIGN", + "RESTART", + "TRIGGER", + "MACADDR", + "ANALYSE", + "VERBOSE", + "COLLATE", + "EXPLAIN", + "SIMILAR", + "GRANTED", + "VARCHAR", + "PENDANT", + "TRACING", + "INVOKER", + "COLLECT", + "CONVERT", + "NATURAL", + "TRUSTED", + "INSTEAD", + "NOCACHE", + "NOCYCLE", + "NOTHING", + "FORTRAN", + "SERIAL8", + "SEGMENT", + "PARTIAL", + "UNKNOWN", + "RETURNS", + "PCTFREE", + "LEADING", + "QUARTER", + "CASCADE", + "REPLACE", + "VARYING", + "SUMMARY", + "ANALYZE", + "RECHECK", + "NUMERIC", + "LOCATOR", + "VERSION", + "OFFLINE", + "EXTRACT", + "PROFILE", + "BOOLEAN", + "SQLCODE", + "WITHOUT", + "DEFINER", + "DISABLE", + "TO_DATE", + "NOAUDIT", + "DEFINED", + "DYNAMIC", + "SIGNED", + "NOTICE", + "SOURCE", + "VACUUM", + "STABLE", + "ENGINE", + "STRICT", + "REDUCE", + "SECOND", + "ISNULL", + "SIMPLE", + "MANUAL", + "BEFORE", + "METHOD", + "PG_LSN", + "EXISTS", + "CIRCLE", + "EQUALS", + "NULLIF", + "UNIQUE", + "DOUBLE", + "CANCEL", + "STRING", + "OBJECT", + "FREEZE", + "EVENTS", + "SUBSTR", + "THREAD", + "ATOMIC", + "VALUES", + "MODIFY", + "PREFIX", + "ROWNUM", + "BECOME", + "UPDATE", + "UNLOCK", + "UNNEST", + "SEARCH", + "RENAME", + "PASCAL", + "BACKUP", + "BITVAR", + "DECODE", + "SWITCH", + "EXTENT", + "OUTPUT", + "TABLES", + "RESULT", + "PUBLIC", + "SQLBUF", + "ONLINE", + "ENABLE", + "NOTIFY", + "SELECT", + "BINARY", + "GLOBAL", + "CREATE", + "STATIC", + "INLINE", + "NOWAIT", + "STDOUT", + "REVOKE", + "ESCAPE", + "MINUTE", + "SCROLL", + "MODULE", + "NOSORT", + "UROWID", + "SCHEMA", + "DOMAIN", + "WINDOW", + "ROLLUP", + "OPTION", + "CURSOR", + "OFFSET", + "CONCAT", + "COMMIT", + "UPSERT", + "NUMBER", + "INSERT", + "CALLED", + "LENGTH", + "BIGINT", + "SIGNAL", + "LISTEN", + "RETURN", + "CHANGE", + "NORMAL", + "SERIAL", + "COLUMN", + "SHARED", + "IGNORE", + "MANAGE", + "SYSTEM", + "STRUCT", + "ACCESS", + "HAVING", + "EXCEPT", + "LOCATE", + "DELETE", + "FOUND", + "LEAVE", + "OWNER", + "RTRIM", + "INDEX", + "VIEWS", + "CHAIN", + "ELSIF", + "WHILE", + "RIGHT", + "UNION", + "CHECK", + "ALIAS", + "WHERE", + "ROLES", + "OUTER", + "RAISE", + "SCOPE", + "INFIX", + "FIRST", + "COBOL", + "NCHAR", + "DEREF", + "LOCAL", + "FLOOR", + "GRANT", + "TOAST", + "INNER", + "STACK", + "RANGE", + "SPLIT", + "TREAT", + "PRIOR", + "USAGE", + "BYTEA", + "SPACE", + "QUOTA", + "ORDER", + "NCLOB", + "ARRAY", + "LIMIT", + "ALTER", + "TABLE", + "JSONB", + "PRINT", + "UPPER", + "COUNT", + "STYPE", + "ABORT", + "POINT", + "LISTS", + "USING", + "INPUT", + "MOUNT", + "FORCE", + "INOUT", + "MATCH", + "CLASS", + "LOWER", + "FINAL", + "MERGE", + "BLOCK", + "READS", + "EVERY", + "VALID", + "RESET", + "FALSE", + "INSTR", + "NAMES", + "WRITE", + "START", + "TRUNC", + "ROWID", + "GROUP", + "MINUS", + "UNDER", + "BEGIN", + "CYCLE", + "MONTH", + "SHARE", + "CLOSE", + "FETCH", + "SETOF", + "REUSE", + "STDIN", + "FLUSH", + "ILIKE", + "SCALE", + "UNTIL", + "AUDIT", + "LARGE", + "ADMIN", + "LEVEL", + "SYSID", + "LAYER", + "MONEY", + "BREAK", + "CROSS", + "MUMPS", + "AFTER", + "CACHE", + "FLOAT", + "DUMP", + "INT8", + "SETS", + "MODE", + "LESS", + "WEEK", + "CUBE", + "NONE", + "NULL", + "DATE", + "THEN", + "NEXT", + "YEAR", + "REAL", + "ONLY", + "LINK", + "WHEN", + "BLOB", + "SQRT", + "ELSE", + "DROP", + "PATH", + "CASE", + "PLAN", + "SOME", + "LIKE", + "DESC", + "SIZE", + "LEFT", + "FROM", + "HOST", + "ZONE", + "CIDR", + "RULE", + "HOLD", + "THAN", + "CHAR", + "EXEC", + "TEXT", + "HOUR", + "LSEG", + "ROLE", + "JOIN", + "STOP", + "READ", + "LAST", + "LOAD", + "FILE", + "DATA", + "EACH", + "BOTH", + "USER", + "OIDS", + "SHOW", + "GOTO", + "LONG", + "JSON", + "ROWS", + "LOCK", + "SORT", + "INTO", + "LOOP", + "BODY", + "TIME", + "CAST", + "TRUE", + "NVL2", + "OPEN", + "TRIM", + "INET", + "UUID", + "EXIT", + "FULL", + "CALL", + "FREE", + "MOVE", + "WITH", + "TEMP", + "OVER", + "SIGN", + "SELF", + "VIEW", + "CLOB", + "MORE", + "COPY", + "TYPE", + "WORK", + "LINE", + "UID", + "END", + "TOP", + "FOR", + "GET", + "MAP", + "REF", + "NOT", + "MOD", + "SQL", + "ROW", + "PLI", + "SET", + "LEN", + "OFF", + "DBA", + "MAX", + "ALL", + "DAY", + "RAW", + "STR", + "ADD", + "USE", + "MIN", + "OUT", + "DIV", + "FTP", + "ADA", + "SIN", + "XML", + "CMP", + "ASC", + "NVL", + "ANY", + "PAD", + "ABS", + "OLD", + "SUM", + "BOX", + "AND", + "INT", + "OWN", + "ARE", + "KEY", + "BIT", + "AVG", + "NEW", + "SCN", + "DEC", + "NOW", + "ON", + "OF", + "OR", + "AS", + "NO", + "IN", + "IF", + "TO", + "BY", + "AT", + "GO", + "IS", + "DO" + ], + "end_comments": " The keywords are sorted based on descending order of the length of the keyword characters.This list was generated by combining keywords from various SQL systems: SQL Server, PostgreSQL, SQLite, Oracle." +} \ No newline at end of file diff --git a/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsMetricAttributeGeneratorTest.java b/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsMetricAttributeGeneratorTest.java index d4f7375994..f3d813ecfd 100644 --- a/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsMetricAttributeGeneratorTest.java +++ b/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsMetricAttributeGeneratorTest.java @@ -25,6 +25,7 @@ import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_LOCAL_OPERATION; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_LOCAL_SERVICE; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_QUEUE_NAME; +import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_QUEUE_URL; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_REMOTE_OPERATION; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_REMOTE_SERVICE; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_REMOTE_TARGET; @@ -436,6 +437,7 @@ public void testRemoteAttributesCombinations() { mockAttribute(RPC_METHOD, "TestString"); mockAttribute(DB_SYSTEM, "TestString"); mockAttribute(DB_OPERATION, "TestString"); + mockAttribute(DB_STATEMENT, "TestString"); mockAttribute(FAAS_INVOKED_PROVIDER, "TestString"); mockAttribute(FAAS_INVOKED_NAME, "TestString"); mockAttribute(MESSAGING_SYSTEM, "TestString"); @@ -460,6 +462,21 @@ public void testRemoteAttributesCombinations() { // Validate behaviour of various combinations of DB attributes, then remove them. validateAndRemoveRemoteAttributes(DB_SYSTEM, "DB system", DB_OPERATION, "DB operation"); + // Validate db.operation not exist, but db.statement exist, where SpanAttributes.DB_STATEMENT is + // invalid + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_STATEMENT, "invalid DB statement"); + mockAttribute(DB_OPERATION, null); + validateAndRemoveRemoteAttributes( + DB_SYSTEM, "DB system", DB_OPERATION, UNKNOWN_REMOTE_OPERATION); + + // Validate both db.operation and db.statement not exist. + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_OPERATION, null); + mockAttribute(DB_STATEMENT, null); + validateAndRemoveRemoteAttributes( + DB_SYSTEM, "DB system", DB_OPERATION, UNKNOWN_REMOTE_OPERATION); + // Validate behaviour of various combinations of FAAS attributes, then remove them. validateAndRemoveRemoteAttributes( FAAS_INVOKED_NAME, "FAAS invoked name", FAAS_TRIGGER, "FAAS trigger name"); @@ -498,14 +515,36 @@ public void testRemoteAttributesCombinations() { mockAttribute(NET_SOCK_PEER_ADDR, null); mockAttribute(NET_SOCK_PEER_PORT, null); - // Validate behavior of Remote Operation from HttpTarget - with 1st api part, then remove it + // Validate behavior of Remote Operation from HttpTarget - with 1st api part. Also validates + // that RemoteService is extracted from HttpUrl. mockAttribute(HTTP_URL, "http://www.example.com/payment/123"); - validateExpectedRemoteAttributes(UNKNOWN_REMOTE_SERVICE, "/payment"); + validateExpectedRemoteAttributes("www.example.com", "/payment"); mockAttribute(HTTP_URL, null); - // Validate behavior of Remote Operation from HttpTarget - without 1st api part, then remove it + // Validate behavior of Remote Operation from HttpTarget - with 1st api part. Also validates + // that RemoteService is extracted from HttpUrl. mockAttribute(HTTP_URL, "http://www.example.com"); - validateExpectedRemoteAttributes(UNKNOWN_REMOTE_SERVICE, "/"); + validateExpectedRemoteAttributes("www.example.com", "/"); + mockAttribute(HTTP_URL, null); + + // Validate behavior of Remote Service from HttpUrl + mockAttribute(HTTP_URL, "http://192.168.1.1:8000"); + validateExpectedRemoteAttributes("192.168.1.1:8000", "/"); + mockAttribute(HTTP_URL, null); + + // Validate behavior of Remote Service from HttpUrl + mockAttribute(HTTP_URL, "http://192.168.1.1"); + validateExpectedRemoteAttributes("192.168.1.1", "/"); + mockAttribute(HTTP_URL, null); + + // Validate behavior of Remote Service from HttpUrl + mockAttribute(HTTP_URL, ""); + validateExpectedRemoteAttributes(UNKNOWN_REMOTE_SERVICE, UNKNOWN_REMOTE_OPERATION); + mockAttribute(HTTP_URL, null); + + // Validate behavior of Remote Service from HttpUrl + mockAttribute(HTTP_URL, null); + validateExpectedRemoteAttributes(UNKNOWN_REMOTE_SERVICE, UNKNOWN_REMOTE_OPERATION); mockAttribute(HTTP_URL, null); // Validate behavior of Remote Operation from HttpTarget - invalid url, then remove it @@ -522,6 +561,72 @@ public void testRemoteAttributesCombinations() { validateExpectedRemoteAttributes(UNKNOWN_REMOTE_SERVICE, UNKNOWN_REMOTE_OPERATION); } + // Validate behaviour of various combinations of DB attributes. + @Test + public void testGetDBStatementRemoteOperation() { + // Set all expected fields to a test string, we will overwrite them in descending order to test + mockAttribute(DB_SYSTEM, "TestString"); + mockAttribute(DB_OPERATION, "TestString"); + mockAttribute(DB_STATEMENT, "TestString"); + + // Validate SpanAttributes.DB_OPERATION not exist, but SpanAttributes.DB_STATEMENT exist, + // where SpanAttributes.DB_STATEMENT is valid + // Case 1: Only 1 valid keywords match + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_STATEMENT, "SELECT DB statement"); + mockAttribute(DB_OPERATION, null); + validateExpectedRemoteAttributes("DB system", "SELECT"); + + // Case 2: More than 1 valid keywords match, we want to pick the longest match + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_STATEMENT, "DROP VIEW DB statement"); + mockAttribute(DB_OPERATION, null); + validateExpectedRemoteAttributes("DB system", "DROP VIEW"); + + // Case 3: More than 1 valid keywords match, but the other keywords is not + // at the start of the SpanAttributes.DB_STATEMENT. We want to only pick start match + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_STATEMENT, "SELECT data FROM domains"); + mockAttribute(DB_OPERATION, null); + validateExpectedRemoteAttributes("DB system", "SELECT"); + + // Case 4: Have valid keywords,but it is not at the start of SpanAttributes.DB_STATEMENT + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_STATEMENT, "invalid SELECT DB statement"); + mockAttribute(DB_OPERATION, null); + validateExpectedRemoteAttributes("DB system", UNKNOWN_REMOTE_OPERATION); + + // Case 5: Have valid keywords, match the longest word + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_STATEMENT, "UUID"); + mockAttribute(DB_OPERATION, null); + validateExpectedRemoteAttributes("DB system", "UUID"); + + // Case 6: Have valid keywords, match with first word + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_STATEMENT, "FROM SELECT *"); + mockAttribute(DB_OPERATION, null); + validateExpectedRemoteAttributes("DB system", "FROM"); + + // Case 7: Have valid keyword, match with first word + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_STATEMENT, "SELECT FROM *"); + mockAttribute(DB_OPERATION, null); + validateExpectedRemoteAttributes("DB system", "SELECT"); + + // Case 8: Have valid keywords, match with upper case + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_STATEMENT, "seLeCt *"); + mockAttribute(DB_OPERATION, null); + validateExpectedRemoteAttributes("DB system", "SELECT"); + + // Case 9: Both DB_OPERATION and DB_STATEMENT are set but the former takes precedence + mockAttribute(DB_SYSTEM, "DB system"); + mockAttribute(DB_STATEMENT, "SELECT FROM *"); + mockAttribute(DB_OPERATION, "DB operation"); + validateExpectedRemoteAttributes("DB system", "DB operation"); + } + @Test public void testPeerServiceDoesOverrideOtherRemoteServices() { validatePeerServiceDoesOverride(RPC_SERVICE); @@ -550,25 +655,128 @@ public void testPeerServiceDoesNotOverrideAwsRemoteService() { public void testClientSpanWithRemoteTargetAttributes() { // Validate behaviour of aws bucket name attribute, then remove it. mockAttribute(AWS_BUCKET_NAME, "aws_s3_bucket_name"); - validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "aws_s3_bucket_name"); + validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "::s3:::aws_s3_bucket_name"); mockAttribute(AWS_BUCKET_NAME, null); // Validate behaviour of AWS_QUEUE_NAME attribute, then remove it. mockAttribute(AWS_QUEUE_NAME, "aws_queue_name"); - validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "aws_queue_name"); + validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "::sqs:::aws_queue_name"); + mockAttribute(AWS_QUEUE_NAME, null); + + // Validate behaviour of having both AWS_QUEUE_NAME and AWS_QUEUE_URL attribute, then remove + // them. + mockAttribute(AWS_QUEUE_URL, "https://sqs.us-east-2.amazonaws.com/123456789012/Queue"); + mockAttribute(AWS_QUEUE_NAME, "aws_queue_name"); + validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "arn:aws:sqs:us-east-2:123456789012:Queue"); + mockAttribute(AWS_QUEUE_URL, null); + mockAttribute(AWS_QUEUE_NAME, null); + + // Valid queue name with invalid queue URL, we should default to using the queue name. + mockAttribute(AWS_QUEUE_URL, "invalidUrl"); + mockAttribute(AWS_QUEUE_NAME, "aws_queue_name"); + validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "::sqs:::aws_queue_name"); + mockAttribute(AWS_QUEUE_URL, null); mockAttribute(AWS_QUEUE_NAME, null); // Validate behaviour of AWS_STREAM_NAME attribute, then remove it. mockAttribute(AWS_STREAM_NAME, "aws_stream_name"); - validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "aws_stream_name"); + validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "::kinesis:::stream/aws_stream_name"); mockAttribute(AWS_STREAM_NAME, null); // Validate behaviour of AWS_TABLE_NAME attribute, then remove it. mockAttribute(AWS_TABLE_NAME, "aws_table_name"); - validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "aws_table_name"); + validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "::dynamodb:::table/aws_table_name"); mockAttribute(AWS_TABLE_NAME, null); } + @Test + public void testSqsClientSpanBasicUrls() { + testSqsUrl( + "https://sqs.us-east-1.amazonaws.com/123412341234/Q_Name-5", + "arn:aws:sqs:us-east-1:123412341234:Q_Name-5"); + testSqsUrl( + "https://sqs.af-south-1.amazonaws.com/999999999999/-_ThisIsValid", + "arn:aws:sqs:af-south-1:999999999999:-_ThisIsValid"); + testSqsUrl( + "http://sqs.eu-west-3.amazonaws.com/000000000000/FirstQueue", + "arn:aws:sqs:eu-west-3:000000000000:FirstQueue"); + testSqsUrl( + "sqs.sa-east-1.amazonaws.com/123456781234/SecondQueue", + "arn:aws:sqs:sa-east-1:123456781234:SecondQueue"); + } + + @Test + public void testSqsClientSpanUsGovUrls() { + testSqsUrl( + "https://sqs.us-gov-east-1.amazonaws.com/123456789012/MyQueue", + "arn:aws-us-gov:sqs:us-gov-east-1:123456789012:MyQueue"); + testSqsUrl( + "sqs.us-gov-west-1.amazonaws.com/112233445566/Queue", + "arn:aws-us-gov:sqs:us-gov-west-1:112233445566:Queue"); + } + + @Test + public void testSqsClientSpanLegacyFormatUrls() { + testSqsUrl( + "https://ap-northeast-2.queue.amazonaws.com/123456789012/MyQueue", + "arn:aws:sqs:ap-northeast-2:123456789012:MyQueue"); + testSqsUrl( + "http://cn-northwest-1.queue.amazonaws.com/123456789012/MyQueue", + "arn:aws-cn:sqs:cn-northwest-1:123456789012:MyQueue"); + testSqsUrl( + "http://cn-north-1.queue.amazonaws.com/123456789012/MyQueue", + "arn:aws-cn:sqs:cn-north-1:123456789012:MyQueue"); + testSqsUrl( + "ap-south-1.queue.amazonaws.com/123412341234/MyLongerQueueNameHere", + "arn:aws:sqs:ap-south-1:123412341234:MyLongerQueueNameHere"); + testSqsUrl( + "https://us-gov-east-1.queue.amazonaws.com/123456789012/MyQueue", + "arn:aws-us-gov:sqs:us-gov-east-1:123456789012:MyQueue"); + } + + @Test + public void testSqsClientSpanNorthVirginiaLegacyUrl() { + testSqsUrl( + "https://queue.amazonaws.com/123456789012/MyQueue", + "arn:aws:sqs:us-east-1:123456789012:MyQueue"); + } + + @Test + public void testSqsClientSpanCustomUrls() { + testSqsUrl("http://127.0.0.1:1212/123456789012/MyQueue", "::sqs::123456789012:MyQueue"); + testSqsUrl("https://127.0.0.1:1212/123412341234/RRR", "::sqs::123412341234:RRR"); + testSqsUrl("127.0.0.1:1212/123412341234/QQ", "::sqs::123412341234:QQ"); + testSqsUrl("https://amazon.com/123412341234/BB", "::sqs::123412341234:BB"); + } + + @Test + public void testSqsClientSpanLongUrls() { + String queueName = "a".repeat(80); + testSqsUrl( + "http://127.0.0.1:1212/123456789012/" + queueName, "::sqs::123456789012:" + queueName); + + String queueNameTooLong = "a".repeat(81); + testSqsUrl("http://127.0.0.1:1212/123456789012/" + queueNameTooLong, null); + } + + @Test + public void testClientSpanSqsInvalidOrEmptyUrls() { + testSqsUrl(null, null); + testSqsUrl("", null); + testSqsUrl("invalidUrl", null); + testSqsUrl("https://www.amazon.com", null); + testSqsUrl("https://sqs.us-east-1.amazonaws.com/123412341234/.", null); + testSqsUrl("https://sqs.us-east-1.amazonaws.com/12/Queue", null); + testSqsUrl("https://sqs.us-east-1.amazonaws.com/A/A", null); + testSqsUrl("https://sqs.us-east-1.amazonaws.com/123412341234/A/ThisShouldNotBeHere", null); + } + + private void testSqsUrl(String sqsUrl, String expectedRemoteTarget) { + mockAttribute(AWS_QUEUE_URL, sqsUrl); + validateRemoteTargetAttributes(AWS_REMOTE_TARGET, expectedRemoteTarget); + mockAttribute(AWS_QUEUE_URL, null); + } + @Test public void testHttpStatusAttributeNotAwsSdk() { validateHttpStatusWithThrowable(new ThrowableWithMethodGetStatusCode(500), null); @@ -761,7 +969,7 @@ public void testNormalizeServiceNameAwsSdkV1Span() { Attributes actualAttributes = GENERATOR.generateMetricAttributeMapFromSpan(spanDataMock, resource).get(DEPENDENCY_METRIC); - assertThat(actualAttributes.get(AWS_REMOTE_SERVICE)).isEqualTo("Amazon S3"); + assertThat(actualAttributes.get(AWS_REMOTE_SERVICE)).isEqualTo("AWS.SDK.Amazon S3"); } @Test diff --git a/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanProcessingUtilTest.java b/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanProcessingUtilTest.java index 4f319aa6d0..90f53ed513 100644 --- a/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanProcessingUtilTest.java +++ b/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanProcessingUtilTest.java @@ -22,19 +22,22 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_LOCAL_OPERATION; +import static software.amazon.opentelemetry.javaagent.providers.AwsSpanProcessingUtil.MAX_KEYWORD_LENGTH; +import static software.amazon.opentelemetry.javaagent.providers.AwsSpanProcessingUtil.getDialectKeywords; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.SpanContext; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.sdk.common.InstrumentationScopeInfo; import io.opentelemetry.sdk.trace.data.SpanData; +import java.util.List; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; public class AwsSpanProcessingUtilTest { private static final String DEFAULT_PATH_VALUE = "/"; private static final String UNKNOWN_OPERATION = "UnknownOperation"; - private String INTERNAL_OPERATIONN = "InternalOperation"; + private static final String INTERNAL_OPERATION = "InternalOperation"; private Attributes attributesMock; private SpanData spanDataMock; @@ -62,7 +65,7 @@ public void testGetIngressOperationWithNotServer() { when(spanDataMock.getName()).thenReturn(validName); when(spanDataMock.getKind()).thenReturn(SpanKind.CLIENT); String actualOperation = AwsSpanProcessingUtil.getIngressOperation(spanDataMock); - assertThat(actualOperation).isEqualTo(INTERNAL_OPERATIONN); + assertThat(actualOperation).isEqualTo(INTERNAL_OPERATION); } @Test @@ -123,7 +126,7 @@ public void testGetEgressOperationUseInternalOperation() { when(spanDataMock.getName()).thenReturn(invalidName); when(spanDataMock.getKind()).thenReturn(SpanKind.CONSUMER); String actualOperation = AwsSpanProcessingUtil.getEgressOperation(spanDataMock); - assertThat(actualOperation).isEqualTo(AwsSpanProcessingUtil.INTERNAL_OPERATION); + assertThat(actualOperation).isEqualTo(INTERNAL_OPERATION); } @Test @@ -158,14 +161,14 @@ public void testExtractAPIPathValueNoSlash() { } @Test - public void testExtractAPIPathValueNOnlySlash() { + public void testExtractAPIPathValueOnlySlash() { String invalidTarget = "/"; String pathValue = AwsSpanProcessingUtil.extractAPIPathValue(invalidTarget); assertThat(pathValue).isEqualTo(DEFAULT_PATH_VALUE); } @Test - public void testExtractAPIPathValueNOnlySlashAtEnd() { + public void testExtractAPIPathValueOnlySlashAtEnd() { String invalidTarget = "users/"; String pathValue = AwsSpanProcessingUtil.extractAPIPathValue(invalidTarget); assertThat(pathValue).isEqualTo(DEFAULT_PATH_VALUE); @@ -381,4 +384,23 @@ public void testNoMetricAttributesForAwsSdkSqsConsumerProcessSpan() { assertThat(AwsSpanProcessingUtil.shouldGenerateDependencyMetricAttributes(spanDataMock)) .isTrue(); } + + @Test + public void testSqlDialectKeywordsOrder() { + List keywords = getDialectKeywords(); + int prevKeywordLength = Integer.MAX_VALUE; + for (String keyword : keywords) { + int currKeywordLength = keyword.length(); + assertThat(prevKeywordLength >= currKeywordLength); + prevKeywordLength = currKeywordLength; + } + } + + @Test + public void testSqlDialectKeywordsMaxLength() { + List keywords = getDialectKeywords(); + for (String keyword : keywords) { + assertThat(MAX_KEYWORD_LENGTH >= keyword.length()); + } + } } diff --git a/dependencyManagement/build.gradle.kts b/dependencyManagement/build.gradle.kts index 567418961a..6e8310ad49 100644 --- a/dependencyManagement/build.gradle.kts +++ b/dependencyManagement/build.gradle.kts @@ -36,9 +36,9 @@ val otelJavaAgentVersion = if (!TEST_SNAPSHOTS) otelVersion else "$otelSnapshotV val DEPENDENCY_BOMS = listOf( "com.amazonaws:aws-java-sdk-bom:1.12.599", "com.fasterxml.jackson:jackson-bom:2.16.0", - "com.google.guava:guava-bom:32.1.3-jre", + "com.google.guava:guava-bom:33.0.0-jre", "com.google.protobuf:protobuf-bom:3.25.1", - "com.linecorp.armeria:armeria-bom:1.26.3", + "com.linecorp.armeria:armeria-bom:1.26.4", "io.grpc:grpc-bom:1.59.1", "io.opentelemetry.instrumentation:opentelemetry-instrumentation-bom-alpha:$otelAlphaVersion", "org.apache.logging.log4j:log4j-bom:2.21.1", diff --git a/instrumentation/log4j-2.13.2/build.gradle.kts b/instrumentation/log4j-2.13.2/build.gradle.kts index 171d083217..a6c273ac66 100644 --- a/instrumentation/log4j-2.13.2/build.gradle.kts +++ b/instrumentation/log4j-2.13.2/build.gradle.kts @@ -25,5 +25,5 @@ dependencies { compileOnly("io.opentelemetry.javaagent:opentelemetry-javaagent-extension-api") compileOnly("net.bytebuddy:byte-buddy") - compileOnly("org.apache.logging.log4j:log4j-core:2.20.0") + compileOnly("org.apache.logging.log4j:log4j-core:2.22.1") } diff --git a/sample-apps/spark/build.gradle.kts b/sample-apps/spark/build.gradle.kts index c494d6ab42..47ee944a4f 100644 --- a/sample-apps/spark/build.gradle.kts +++ b/sample-apps/spark/build.gradle.kts @@ -51,6 +51,7 @@ tasks { dependsOn(":otelagent:jibDockerBuild") } register("jibBuildWithoutAgent") { + dependsOn(":sample-apps:spark:build") val j = JibExtension(project) j.configureImages( "eclipse-temurin:17", diff --git a/testing/README.md b/testing/README.md new file mode 100644 index 0000000000..d5e94a6ab3 --- /dev/null +++ b/testing/README.md @@ -0,0 +1,111 @@ +# How to Test E2E Resource Changes +This guide will give a step by step instruction on how to test changes made to E2E testing resources before pushing a PR. +The guide will include the following: +- Setting up IAM roles and an EKS cluster +- Setting up VPC settings and IAM role for EC2 instances +- Buliding sample app images/files and putting them into ECRs/S3 buckets +- Forking a repository and setting up necessary secrets + + +### 1. Create an IAM Role with OIDC Identity Provider +This step is needed to allow Github Action to have access to resources in the AWS account +#### Create an OIDC Provider +- First step is to create an OIDC Identity Provider to allow Github action access to the AWS account resource. Login to AWS, go to the IAM console and click on the Identity Providers tab. +- Click on Add Provider, choose OpenID Connect and type `https://token.actions.githubusercontent.com` in the Provider URL. Click "Get thumbprint". For Audience, use `sts.amazonaws.com`. Finally, click "Add provider" +#### Create an IAM role +- Next, an IAM role needs to be created using the OIDC Identity Provider. Go to the Roles tab and click Create role. +- Choose Web Identity, and choose `token.actions.githubusercontent.com` as the Identity provider, Audience as `sts.amazonaws.com`, and for Github organizations put your github username down. Click next. +- Add the AdministratorAccess policy. Click next. +- Enter your Role name. Click "Create role". +#### Add Additional Permission +- After the role is created, search the role name in the roles tab, click on the role, and go to the Trust relationships tab. Click on "Edit trust policy". +- In the Statement list, add the following item: +`{ + "Sid": "accessToRole", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::root" + }, + "Action": "sts:AssumeRole" + }`. This additional permission is need to allow Github Action to assume roles and have access to the EKS cluster. + +Additional Resource: https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services + +### 2. Create EKS Clusters +The E2E EKS test uses an EKS cluster to deploy the sample apps. +#### Setup Environment with the Appropriate Roles and Permissions. +Note: Make sure to replace with the correct value. +- First, login to the testing account +- Assume the e2e test role by running + - `output=$(aws sts assume-role --role-arn arn:aws:iam:::role/ --role-session-name AWSCLI-Session)` + - `export AWS_ACCESS_KEY_ID=$(echo $output | jq -r .Credentials.AccessKeyId)` + - `export AWS_SECRET_ACCESS_KEY=$(echo $output | jq -r .Credentials.SecretAccessKey)` + - `export AWS_SESSION_TOKEN=$(echo $output | jq -r .Credentials.SessionToken)` +- Run `aws sts get-caller-identity` to check if you are in the correct role +#### Create a new Cluster +Note: Make sure to replace with the correct value (e.g. us-east-1) +- Next, create the cluster by running `eksctl create cluster --name e2e-canary-test --region --zones a,b`. This will take around ~10 minutes. +#### Install AWS Load Balancer Controller Add-on +- Finally, install the AWS Load Balancer Controller add-on by running the following commands. Make sure to replace the `` and `` with the correct value. + ``` + eksctl utils associate-iam-oidc-provider --cluster e2e-canary-test --region --approve + curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.7/docs/install/iam_policy.json + aws iam create-policy --policy-name AWSLoadBalancerControllerIAMPolicy --policy-document file://iam_policy.json --region + eksctl create iamserviceaccount --cluster=e2e-canary-test --namespace=kube-system --name=aws-load-balancer-controller --attach-policy-arn=arn:aws:iam:::policy/AWSLoadBalancerControllerIAMPolicy --region --approve + kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml + curl -Lo v2_4_7_full.yaml https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/download/v2.4.7/v2_4_7_full.yaml + sed -i.bak -e '561,569d' ./v2_4_7_full.yaml + sed -i.bak -e 's|your-cluster-name|e2e-canary-test|' ./v2_4_7_full.yaml + kubectl apply -f v2_4_7_full.yaml + curl -Lo v2_4_7_ingclass.yaml https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/download/v2.4.7/v2_4_7_ingclass.yaml + kubectl apply -f v2_4_7_ingclass.yaml``` +#### Create a New Cluster and Install the AWS Load Balancer Controller Add-on for All Regions +Repeat the previous two steps for all 5 regions. (us-east-1, us-east-2, eu-west-1, ap-northeast-1, ap-southeast-2) + +Note: If you do not want to test all 5 regions, you can create one for us-east-1, then on the [appsignals-e2e-eks-canary-test.yml](.github/workflows/appsignals-e2e-eks-canary-test.yml) comment out the other regions on line 24 + +### 3. Setting up Environment for EC2 Tests +#### Create IAM Role for EC2 Instance +- Login to AWS, go to the IAM console and click on the Roles tab. Click Create role. +- Choose AWS service, and choose EC2 as the use case. Click Next. +- Choose AmazonS3ReadOnlyAccess, AWSXrayWriteOnlyAccess, and CloudWatchAgentServerPolicy as the permission. +- Type the role name as `APP_SIGNALS_EC2_TEST_ROLE` and click "Create role". + +#### Setting Up Default VPC +- Go to the VPC console and on the routing table for the default VPC, click Edit routes. (The default VPC should have the `-` name if it hasn't been assigned to another VPC before) +- Click add routes, for destination add `0.0.0.0/0`, for target add Internet Gateway and save changes. +- Go to the Security groups tab, find the security group attached to the default VPC, click Edit inbound rules, choose type: All Traffic, Source: custom, and CIDR block: 0.0.0.0/0. Save rules. + +### 4. Building Sample App to ECR +Create two ECR repositories: one for the sample app main service and another for the sample app remote service. +Follow the instructions [here](./sample-apps/README.md) to build the sample app image and upload it to the ECR + +### 5. Building Sample App to S3 Bucket +Create an S3 Bucket to store the .jar files for the sample app main service and sample app remote service. +Follow the instructions under [here](./sample-apps/README.md) to build the sample app .jar and upload it to the bucket + +### 6. Setting up repository +- Go to https://github.com/aws-observability/aws-otel-java-instrumentation and create a fork +- Go to the forked repo and enable action on the Action tab +- Add the following secrets to the repository + - APP_SIGNALS_E2E_TEST_ACC: `` + - E2E_TEST_ROLE_ARN: `arn:aws:iam:::role/` + - APP_SIGNALS_E2E_FE_SA_IMG: `.dkr.ecr.us-east-1.amazonaws.com/` + - APP_SIGNALS_E2E_RE_SA_IMG: `.dkr.ecr.us-east-1.amazonaws.com/` + - APP_SIGNALS_E2E_FE_SA_JAR: s3:/// + - APP_SIGNALS_E2E_RE_SA_JAR: s3:/// + + +### 7. Running the tests +Go to the Actions tab on the forked repository. + +- To run the EKS test, go to `App Signals Enablement - E2E EKS Canary Testing`, and click `Enable Workflow`, then `Run Workflow`. +- To run the EC2 test, go to `App Signals Enablement - E2E EC2 Canary Testing`, and click `Enable Workflow`, then `Run Workflow`. + +If you don't want the canaries to run every 15 minutes on the forked repository, click the `...` button on the top right and choose `Disable Workflow` + +### E2E Testing Resources +- `./.github/workflows/appsignals-e2e-*`: workflow files for running e2e tests +- `./testing/sample-apps/*`: files for building the sample app +- `./testing/validator/*`: files for validating logs/metrics/traces generated by sample app +- `./testing/terraform/*`: files for launching the sample app to EKS cluster or EC2 instances \ No newline at end of file diff --git a/testing/sample-apps/README.md b/testing/sample-apps/README.md index a5c93c74e5..468bcf0912 100644 --- a/testing/sample-apps/README.md +++ b/testing/sample-apps/README.md @@ -10,14 +10,29 @@ Ensure that none of the repositories are currently using the image about to be u To update the image, first push the update to a backup image (or generate a new one), then switch the address on the three repositories to the backup image one by one. Once all three repositories are pointing to the backup image, push the update to the main image and revert the addresses on the repositories back to the original. Be careful to ensure the image names are appropriately stored in secrets. +### Setting up the environment: +1. Run `./.github/scripts/patch.sh` in the repository root. You should have a new folder called `opentelemetry-java-instrumentation` +2. Cd to the new folder, then run `gradle publishToMavenLocal` +3. Run `rm -rf opentelemetry-java-instrumentation` to delete the folder. + ### Steps to update image: -1. Use `ada` commands to autheticate into the testing account -2. Login to ECR Repository: `aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin {REPOSITORY}` +1. Login to the testing account +2. Create a new ECR repository if there's no existing one. +2. Login to ECR Repository: `aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin {REPOSITORY}`. 3. Change repository name in the `build.gradle.kts` file under `testing/sample-apps/springboot` or `testing/sample-apps/sprintboot-remote-service` +4. Change the `tasks.named("jib").enabled` value on the `build.gradle.kts` file from false to true 4. Run `gradle jib` under the respective directory. -## [WIP] EC2 Use Case: Building the JAR Files +## EC2 Use Case: Building the JAR Files To build the JAR files of the sample application, simply `cd` into each application, e.g. `cd testing/sample-apps/springboot`, and run `gradle build`. -This will create a JAR file in the `build/libs/` folder. To update the JAR file in the testing account: -- Use `ada` commands to authenticate into the testing account -- Only after you're sure of your changes and if they do not break the tests running in other repos, use `aws s3api put-object --bucket --body build/libs/.jar --key .jar` to push the JAR to S3 +This will create JAR files in the `build/libs/` folder with the format: +- springboot-*-SNAPSHOT-javadoc.jar +- springboot-*-SNAPSHOT-plain.jar +- springboot-*-SNAPSHOT-sources.jar +- springboot-*-SNAPSHOT.jar. + +To update the JAR file in the testing account: +- Login to the testing account +- Only after you're sure of your changes and if they do not break the tests running in other repos, use `aws s3api put-object --bucket --body build/libs/springboot-*-SNAPSHOT.jar --key .jar` + +Note: Replace * with the version number and `.jar` is the desired name of the .jar file once in the s3 bucket. e.g. `sample-app-main-service.jar` diff --git a/testing/terraform/ec2/main.tf b/testing/terraform/ec2/main.tf index a5efa41dea..55576e8ff2 100644 --- a/testing/terraform/ec2/main.tf +++ b/testing/terraform/ec2/main.tf @@ -27,8 +27,16 @@ locals { } data "aws_ami" "ami" { - executable_users = ["self"] + owners = ["amazon"] most_recent = true + filter { + name = "name" + values = ["al20*-ami-minimal-*-x86_64"] + } + filter { + name = "state" + values = ["available"] + } filter { name = "architecture" values = ["x86_64"] @@ -81,20 +89,20 @@ resource "null_resource" "main_service_setup" { provisioner "remote-exec" { inline = [ - # Install Java 11 and tmux - "yes | sudo amazon-linux-extras install java-openjdk11", + # Install Java 11 and wget + "sudo yum install wget java-11-amazon-corretto -y", # Copy in CW Agent configuration "agent_config='${replace(replace(file("./amazon-cloudwatch-agent.json"), "/\\s+/", ""), "$REGION", var.aws_region)}'", "echo $agent_config > amazon-cloudwatch-agent.json", # Get and run CW agent rpm - "wget -O cw-agent.rpm ${var.cw_agent_rpm}", + "${var.get_cw_agent_rpm_command}", "sudo rpm -U ./cw-agent.rpm", "sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c file:./amazon-cloudwatch-agent.json", # Get ADOT - "wget -O adot.jar ${var.adot_jar}", + "${var.get_adot_jar_command}", # Get and run the sample application with configuration "aws s3 cp ${var.sample_app_jar} ./main-service.jar", @@ -142,20 +150,20 @@ resource "null_resource" "remote_service_setup" { provisioner "remote-exec" { inline = [ - # Install Java 11 and tmux - "yes | sudo amazon-linux-extras install java-openjdk11", + # Install Java 11 and wget + "sudo yum install wget java-11-amazon-corretto -y", # Copy in CW Agent configuration "agent_config='${replace(replace(file("./amazon-cloudwatch-agent.json"), "/\\s+/", ""), "$REGION", var.aws_region)}'", "echo $agent_config > amazon-cloudwatch-agent.json", # Get and run CW agent rpm - "wget -O cw-agent.rpm ${var.cw_agent_rpm}", + "${var.get_cw_agent_rpm_command}", "sudo rpm -U ./cw-agent.rpm", "sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c file:./amazon-cloudwatch-agent.json", # Get ADOT - "wget -O adot.jar ${var.adot_jar}", + "${var.get_adot_jar_command}", # Get and run the sample application with configuration "aws s3 cp ${var.sample_remote_app_jar} ./remote-service.jar", diff --git a/testing/terraform/ec2/variables.tf b/testing/terraform/ec2/variables.tf index 9466cc17bc..8d128cf17d 100644 --- a/testing/terraform/ec2/variables.tf +++ b/testing/terraform/ec2/variables.tf @@ -33,10 +33,10 @@ variable "sample_remote_app_jar" { default = "s3:///" } -variable "cw_agent_rpm" { - default = "s3:///" +variable "get_cw_agent_rpm_command" { + default = " s3:///" } -variable "adot_jar" { - default = "s3:///" +variable "get_adot_jar_command" { + default = " s3:///" } \ No newline at end of file diff --git a/testing/validator/build.gradle.kts b/testing/validator/build.gradle.kts index c3c175f94d..050b40c5c3 100644 --- a/testing/validator/build.gradle.kts +++ b/testing/validator/build.gradle.kts @@ -40,7 +40,7 @@ dependencies { // log implementation(group = "org.apache.logging.log4j", name = "log4j-api", version = "2.20.0") - implementation(group = "org.apache.logging.log4j", name = "log4j-core", version = "2.20.0") + implementation(group = "org.apache.logging.log4j", name = "log4j-core", version = "2.22.1") // mustache template implementation(group = "com.github.spullara.mustache.java", name = "compiler", version = "0.9.10") diff --git a/testing/validator/src/main/resources/expected-data-template/ec2/aws-sdk-call-metric.mustache b/testing/validator/src/main/resources/expected-data-template/ec2/aws-sdk-call-metric.mustache index cf1f9e2554..cd23971e96 100644 --- a/testing/validator/src/main/resources/expected-data-template/ec2/aws-sdk-call-metric.mustache +++ b/testing/validator/src/main/resources/expected-data-template/ec2/aws-sdk-call-metric.mustache @@ -95,7 +95,7 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name - metricName: Latency @@ -115,7 +115,7 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name - metricName: Error @@ -214,7 +214,7 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name - metricName: Error @@ -234,7 +234,7 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name - metricName: Fault @@ -333,7 +333,7 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name - metricName: Fault @@ -353,6 +353,6 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name diff --git a/testing/validator/src/main/resources/expected-data-template/ec2/aws-sdk-call-trace.mustache b/testing/validator/src/main/resources/expected-data-template/ec2/aws-sdk-call-trace.mustache index bbf708777a..e882f3fbb6 100644 --- a/testing/validator/src/main/resources/expected-data-template/ec2/aws-sdk-call-trace.mustache +++ b/testing/validator/src/main/resources/expected-data-template/ec2/aws-sdk-call-trace.mustache @@ -42,7 +42,7 @@ "aws_local_operation": "^GET /aws-sdk-call$", "aws_remote_service": "^AWS\\.SDK\\.S3$", "aws_remote_operation": "^GetBucketLocation$", - "aws_remote_target": "^e2e-test-bucket-name$" + "aws_remote_target": "^::s3:::e2e-test-bucket-name$" }, "metadata": { "default": { diff --git a/testing/validator/src/main/resources/expected-data-template/eks/aws-sdk-call-metric.mustache b/testing/validator/src/main/resources/expected-data-template/eks/aws-sdk-call-metric.mustache index b14fe76b2a..cca8709915 100644 --- a/testing/validator/src/main/resources/expected-data-template/eks/aws-sdk-call-metric.mustache +++ b/testing/validator/src/main/resources/expected-data-template/eks/aws-sdk-call-metric.mustache @@ -113,7 +113,7 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name - metricName: Latency @@ -136,7 +136,7 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name - metricName: Error @@ -253,7 +253,7 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name - metricName: Error @@ -276,7 +276,7 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name - metricName: Fault @@ -393,7 +393,7 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name + value: ::s3:::e2e-test-bucket-name - metricName: Fault @@ -416,4 +416,4 @@ value: AWS.SDK.S3 - name: RemoteTarget - value: e2e-test-bucket-name \ No newline at end of file + value: ::s3:::e2e-test-bucket-name \ No newline at end of file diff --git a/testing/validator/src/main/resources/expected-data-template/eks/aws-sdk-call-trace.mustache b/testing/validator/src/main/resources/expected-data-template/eks/aws-sdk-call-trace.mustache index a7d92f8785..44d00db7a0 100644 --- a/testing/validator/src/main/resources/expected-data-template/eks/aws-sdk-call-trace.mustache +++ b/testing/validator/src/main/resources/expected-data-template/eks/aws-sdk-call-trace.mustache @@ -44,7 +44,7 @@ "aws_local_operation": "^GET /aws-sdk-call$", "aws_remote_service": "^AWS\\.SDK\\.S3$", "aws_remote_operation": "GetBucketLocation", - "aws_remote_target": "e2e-test-bucket-name" + "aws_remote_target": "::s3:::e2e-test-bucket-name" }, "metadata": { "default": { diff --git a/tools/cp-utility/Cargo.lock b/tools/cp-utility/Cargo.lock index 4c3522d80f..a1aa5dd68a 100644 --- a/tools/cp-utility/Cargo.lock +++ b/tools/cp-utility/Cargo.lock @@ -14,12 +14,6 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" -[[package]] -name = "cc" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" - [[package]] name = "cfg-if" version = "1.0.0" @@ -36,25 +30,14 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.1" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ - "errno-dragonfly", "libc", "windows-sys", ] -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "fastrand" version = "2.0.0" @@ -74,15 +57,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.149" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" [[package]] name = "ppv-lite86" @@ -131,9 +114,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ "bitflags 2.4.1", "errno", @@ -144,9 +127,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.1" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", "fastrand", @@ -173,18 +156,18 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "windows-sys" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -197,42 +180,42 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" diff --git a/tools/cp-utility/Cargo.toml b/tools/cp-utility/Cargo.toml index 5abe0d55fe..7169660e4d 100644 --- a/tools/cp-utility/Cargo.toml +++ b/tools/cp-utility/Cargo.toml @@ -10,7 +10,7 @@ edition = "2021" [dev-dependencies] # dependencies only used during tests -tempfile = "3.8.1" +tempfile = "3.9.0" uuid = { version = "1.5.0", features = ["v4", "fast-rng"] } [profile.release]