diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index f8f95b19e..2bb983012 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -9,31 +9,53 @@ on: jobs: CodeQL-Build: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 + + permissions: + security-events: write + + strategy: + matrix: + maven-version: ['3.9.2'] + java-version: ['8'] steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: # We must fetch at least the immediate parents so that if this is # a pull request then we can checkout the head. fetch-depth: 2 - - name: Set up JDK - uses: actions/setup-java@v1 + # WD requires Java 8 to build + - name: Set up JDK ${{ matrix.java-version }} + uses: actions/setup-java@v2 with: - java-version: 8 - + distribution: 'zulu' + java-version: ${{ matrix.java-version }} + java-package: jdk + server-id: sonatype-nexus-snapshots # Value of the distributionManagement/repository/id field of the pom.xml + server-username: SONATYPE_USERNAME # env variable for username in deploy + server-password: SONATYPE_PASSWORD # env variable for token in deploy + # only signed artifacts will be released to maven central. this sets up things for the maven-gpg-plugin + gpg-private-key: ${{ secrets.HCOM_GPG_PRIVATE_KEY }} # Value of the GPG private key to import + gpg-passphrase: GPG_PASSPHRASE # env variable for GPG private key passphrase + # this creates a settings.xml on build server + settings-path: ${{ github.workspace }} + cache: maven + - name: Maven Verify + run: mvn -B clean verify --file pom.xml + # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v1 + uses: github/codeql-action/init@v2 with: languages: java # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v1 + uses: github/codeql-action/autobuild@v2 # ℹī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -47,4 +69,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ffeda16a5..fe79e2fd0 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,15 +1,15 @@ name: Java CI -on: +on: pull_request: push: - branches: + branches: - main jobs: test: name: Package and run all tests - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 with: @@ -26,10 +26,26 @@ jobs: echo "Coveralls token available" COVERALLS_SKIP=false fi - echo "COVERALLS_SKIP=${COVERALLS_SKIP}" >> $GITHUB_ENV + echo "COVERALLS_SKIP=${COVERALLS_SKIP}" >> $GITHUB_ENV + - name: Set up JDK - uses: actions/setup-java@v1 + uses: actions/setup-java@v2 with: - java-version: 8 + distribution: 'adopt' + java-version: '8' + java-package: jdk + server-id: sonatype-nexus-snapshots # Value of the distributionManagement/repository/id field of the pom.xml + server-username: SONATYPE_USERNAME # env variable for username in deploy + server-password: SONATYPE_PASSWORD # env variable for token in deploy + # only signed artifacts will be released to maven central. this sets up things for the maven-gpg-plugin + gpg-private-key: ${{ secrets.HCOM_GPG_PRIVATE_KEY }} # Value of the GPG private key to import + gpg-passphrase: GPG_PASSPHRASE # env variable for GPG private key passphrase + # this creates a settings.xml on build server + settings-path: ${{ github.workspace }} + - name: Run Maven Targets - run: mvn package jacoco:report coveralls:report --batch-mode --show-version --activate-profiles coveralls -Dcoveralls.skip=$COVERALLS_SKIP -DrepoToken=${{ secrets.COVERALLS_REPO_TOKEN }} + run: mvn deploy jacoco:report coveralls:report --settings $GITHUB_WORKSPACE/settings.xml --batch-mode --show-version --activate-profiles coveralls,sonatype-oss-release-github-actions -Dcoveralls.skip=$COVERALLS_SKIP -DrepoToken=${{ secrets.COVERALLS_REPO_TOKEN }} + env: + SONATYPE_PASSWORD: ${{ secrets.HCOM_SONATYPE_PASSWORD }} + SONATYPE_USERNAME: ${{ secrets.HCOM_SONATYPE_USERNAME }} + GPG_PASSPHRASE: ${{secrets.HCOM_GPG_PRIVATE_KEY_PASSPHRASE}} diff --git a/CHANGELOG.md b/CHANGELOG.md index 63b41c150..2cd6d578d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,23 @@ +## [4.0.0] - TBD +### Updated Dependency +* `hive` updated to `3.1.3` (was `3.1.2`). +* `hadoop` updated to `3.3.6` (was `3.1.0`). +* `spring` updated to `2.7.13` (was `2.0.4-RELEASE`). +* `guava` updated to `31.1-jre` (was `23.0`). +* `guice` updated to `4.13.2` (was `4.13.1`). +* `junit` updated to `5.1.0` (was `4.0`). +* `aspectj` updated to `1.9.7` (was `1.8.9`). +* `hcommon-hive-metastore` updated to `1.4.2` (was `1.2.3`). + +### Newly Added Dependency +* `lombok` - `1.18.24`. +* `jakarta` - `6.0.0`. +* `apache-commons` - `3.12.0`. + +### Fixed +* Added lombok +* Fixed test cases + ## [3.9.5] - TBD ### Changed * `commons-io` updated to `2.7.` (was `2.6`). diff --git a/lombok.config b/lombok.config new file mode 100644 index 000000000..260dcff72 --- /dev/null +++ b/lombok.config @@ -0,0 +1,2 @@ +config.stopbubbling = true +lombok.addLombokGeneratedAnnotation = true \ No newline at end of file diff --git a/pom.xml b/pom.xml index d83eebd1c..45c6a96c3 100644 --- a/pom.xml +++ b/pom.xml @@ -7,18 +7,19 @@ 6.1.0 + com.hotels waggle-dance-parent 4.0.0-SNAPSHOT pom Waggle Dance Parent Hive Metastore federation service. - https://github.com/HotelsDotCom/waggle-dance + https://github.com/ExpediaGroup/waggle-dance 2016 - scm:git:https://${GIT_USERNAME}:${GIT_PASSWORD}@github.com/HotelsDotCom/waggle-dance.git - scm:git:https://${GIT_USERNAME}:${GIT_PASSWORD}@github.com/HotelsDotCom/waggle-dance.git - https://github.com/HotelsDotCom/waggle-dance + scm:git:https://${GIT_USERNAME}:${GIT_PASSWORD}@github.com/ExpediaGroup/waggle-dance.git + scm:git:https://${GIT_USERNAME}:${GIT_PASSWORD}@github.com/ExpediaGroup/waggle-dance.git + https://github.com/ExpediaGroup/waggle-dance HEAD @@ -33,20 +34,23 @@ - 2.5.3 - 2.0.4.RELEASE - 3.1.0 + 3.0.1 + 2.7.13 + 3.3.6 2.2 - 3.1.2 - 4.13.1 + 3.1.3 + 4.13.2 3.5.15 - 3.1.5 - 1.9 - 1.8.9 + 1.14.0 + 1.9.7 5.0.0 - 23.0 - 4.0 - 1.2.3 + 31.1-jre + 5.1.0 + 1.4.2 + 2.12.5 + 6.0.0 + 1.18.24 + 3.12.0 @@ -64,54 +68,17 @@ commons-io commons-io - 2.7 + 2.13.0 - commons-lang - commons-lang - 2.6 + org.apache.commons + commons-lang3 + ${apache.commons.version} - javax.el - javax.el-api - 3.0.0 - - - org.glassfish.web - javax.el - 2.2.6 - - - junit - junit - ${junit.version} - test - - - org.hamcrest - hamcrest-core - - - - - org.mockito - mockito-core - ${mockito.version} - test - - - - net.bytebuddy - byte-buddy - 1.10.15 - test - - - net.bytebuddy - byte-buddy-agent - 1.10.15 - test + jakarta.el + jakarta.el-api + 5.0.1 org.springframework.boot @@ -125,48 +92,266 @@ hadoop-common ${hadoop.version} compile + + + org.eclipse.jetty + * + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + + javax.servlet + * + + org.apache.hadoop hadoop-mapreduce-client-core ${hadoop.version} compile + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + + org.eclipse.jetty + * + + + org.eclipse.jetty.websocket + * + + + javax.servlet + * + + org.apache.hive hive-common ${hive.version} compile + + + org.apache.hive + hive-shims + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + + org.eclipse.jetty + * + + + log4j-slf4j-impl + org.apache.logging.log4j + + + org.mortbay.jetty + * + + + org.eclipse.jetty.aggregate + * + + + org.eclipse.jetty.orbit + * + + + org.codehaus.jettison + * + + + javax.servlet + * + + org.apache.hive - hive-metastore + hive-standalone-metastore ${hive.version} compile + + + log4j-web + org.apache.looging.log4j + + + javax.servlet + javax.servlet-api + + + log4j + log4j + + + org.apache.hbase + hbase-client + + + org.apache.hive + hive-shims + + org.apache.hive hive-exec ${hive.version} compile + + + log4j + log4j + + + org.pentaho + pentaho-aggdesigner-algorithm + + + org.apache.hive + hive-shims + + - org.hamcrest - hamcrest - ${hamcrest.version} - test + org.apache.hive + hive-service + ${hive.version} + + + org.eclipse.jetty + * + + + org.pentaho + pentaho-aggdesigner-algorithm + + + org.apache.hbase + * + + + javax.servlet + * + + org.yaml snakeyaml - 1.23 + 1.33 + + + com.hotels + hcommon-hive-metastore + ${hcommon-hive-metastore.version} + + + org.hibernate + hibernate-validator + + + com.hotels + hcommon-ssh + + + javax.validation + * + + + + + joda-time + joda-time + ${joda-time.version} + + + jakarta.servlet + jakarta.servlet-api + ${jakarta.version} + + + org.projectlombok + lombok + ${lombok.version} + provided + + + + org.apache.commons + commons-vfs2 + 2.9.0 + + + + + + junit + junit + ${junit.version} + test + + + org.hamcrest + hamcrest-core + + + + org.mockito + mockito-core + ${mockito.version} + test + + + + net.bytebuddy + byte-buddy + 1.14.5 + test + + + net.bytebuddy + byte-buddy-agent + 1.14.5 + test + + + org.hamcrest + hamcrest + ${hamcrest.version} + test + + org.apache.derby derby - 10.10.2.0 + 10.14.1.0 test @@ -174,11 +359,84 @@ beeju ${beeju.version} test + + + javax.servlet + * + + + org.eclipse.jetty + * + + + org.apache.hadoop + * + + + org.apache.hive + * + + + org.codehaus.jackson + * + + + org.slf4j + * + + + org.eclipse.jetty.aggregate + jetty-all + + + org.apache.hive.shims + * + + + com.sun.jersey + * + + - com.hotels - hcommon-hive-metastore - ${hcommon-hive-metastore.version} + fm.last.commons + lastcommons-test + 7.0.2 + test + + + org.mockito + mockito-all + + + log4j + log4j + + + + + org.springframework.boot + spring-boot-starter-test + ${spring-boot.version} + test + + + spring-boot-starter-logging + org.springframework.boot + + + org.mockito + * + + + org.hamcrest + * + + + org.junit.jupiter + * + + @@ -186,6 +444,18 @@ + + org.sonatype.plugins + nexus-staging-maven-plugin + ${nexus.staging.maven.plugin.version} + true + + sonatype-nexus-staging + https://oss.sonatype.org/ + true + 30 + + org.apache.maven.plugins maven-release-plugin @@ -203,6 +473,15 @@ true ${jdk.version} true + ignore + + + **/*.java + + true + + + org.springframework @@ -223,11 +502,30 @@ + default-compile process-classes compile + + + + + ${project.build.directory}/classes + + + + + default-testCompile + process-test-classes + + test-compile + + + ${project.build.directory}/test-classes + + @@ -272,5 +570,4 @@ - diff --git a/waggle-dance-api/pom.xml b/waggle-dance-api/pom.xml index ba51c9721..5b18aa203 100644 --- a/waggle-dance-api/pom.xml +++ b/waggle-dance-api/pom.xml @@ -42,6 +42,14 @@ spring-boot-configuration-processor true + + org.springframework.boot + spring-boot-starter-validation + + + org.springframework.boot + spring-boot-starter-jersey + @@ -54,7 +62,6 @@ org.apache.commons commons-vfs2 - 2.1 @@ -76,23 +83,18 @@ org.apache.commons commons-lang3 - - org.springframework.boot - spring-boot-starter-jersey - com.google.inject guice - - commons-lang - commons-lang - - com.hotels hcommon-hive-metastore + + org.projectlombok + lombok + diff --git a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/WaggleDanceException.java b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/WaggleDanceException.java index 68622200b..23080d5b6 100644 --- a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/WaggleDanceException.java +++ b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/WaggleDanceException.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/AbstractMetaStore.java b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/AbstractMetaStore.java index dc53571c5..fdc91fa88 100644 --- a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/AbstractMetaStore.java +++ b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/AbstractMetaStore.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,6 +27,8 @@ import javax.validation.constraints.NotBlank; import javax.validation.constraints.NotNull; +import lombok.NoArgsConstructor; + import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonSubTypes; @@ -42,6 +44,7 @@ @JsonSubTypes({ @Type(value = PrimaryMetaStore.class, name = "PRIMARY"), @Type(value = FederatedMetaStore.class, name = "FEDERATED") }) +@NoArgsConstructor public abstract class AbstractMetaStore { private String databasePrefix; private String hiveMetastoreFilterHook; @@ -57,8 +60,6 @@ public abstract class AbstractMetaStore { private long latency = 0; private transient @JsonIgnore HashBiMap databaseNameBiMapping = HashBiMap.create(); - public AbstractMetaStore() {} - public AbstractMetaStore(String name, String remoteMetaStoreUris, AccessControlType accessControlType) { this.name = name; this.remoteMetaStoreUris = remoteMetaStoreUris; diff --git a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/FederatedMetaStore.java b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/FederatedMetaStore.java index b57936529..196096959 100644 --- a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/FederatedMetaStore.java +++ b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/FederatedMetaStore.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2020 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,10 +18,11 @@ import java.util.Collections; import java.util.List; +import lombok.NoArgsConstructor; -public class FederatedMetaStore extends AbstractMetaStore { - public FederatedMetaStore() {} +@NoArgsConstructor +public class FederatedMetaStore extends AbstractMetaStore { public FederatedMetaStore(String name, String remoteMetaStoreUris) { this(name, remoteMetaStoreUris, AccessControlType.READ_ONLY); diff --git a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/Federations.java b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/Federations.java index 83d237492..6b568f487 100644 --- a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/Federations.java +++ b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/Federations.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,32 +19,14 @@ import javax.validation.Valid; -public class Federations { +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; +@AllArgsConstructor +@NoArgsConstructor +@Data +public class Federations { private @Valid PrimaryMetaStore primaryMetaStore; private @Valid List federatedMetaStores; - - public Federations() { - } - - public Federations(PrimaryMetaStore primaryMetaStore, List federatedMetaStores) { - this.primaryMetaStore = primaryMetaStore; - this.federatedMetaStores = federatedMetaStores; - } - - public PrimaryMetaStore getPrimaryMetaStore() { - return primaryMetaStore; - } - - public void setPrimaryMetaStore(PrimaryMetaStore primaryMetaStore) { - this.primaryMetaStore = primaryMetaStore; - } - - public List getFederatedMetaStores() { - return federatedMetaStores; - } - - public void setFederatedMetaStores(List federatedMetaStores) { - this.federatedMetaStores = federatedMetaStores; - } } diff --git a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/MappedTables.java b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/MappedTables.java index dbe5e34fb..a4e5fd575 100644 --- a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/MappedTables.java +++ b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/MappedTables.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,31 +20,14 @@ import javax.validation.constraints.NotBlank; import javax.validation.constraints.NotEmpty; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@AllArgsConstructor +@NoArgsConstructor +@Data public class MappedTables { @NotBlank private String database; @NotEmpty private List mappedTables; - - public MappedTables() { - } - - public MappedTables(String database, List mappedTables) { - this.database = database; - this.mappedTables = mappedTables; - } - - public String getDatabase() { - return database; - } - - public void setDatabase(String database) { - this.database = database; - } - - public List getMappedTables() { - return mappedTables; - } - - public void setMappedTables(List mappedTables) { - this.mappedTables = mappedTables; - } } diff --git a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/MetaStoreStatus.java b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/MetaStoreStatus.java index 199e338a0..6eec4099c 100644 --- a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/MetaStoreStatus.java +++ b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/MetaStoreStatus.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,10 +15,20 @@ */ package com.hotels.bdp.waggledance.api.model; + +import lombok.AllArgsConstructor; + +@AllArgsConstructor public enum MetaStoreStatus { - AVAILABLE, - UNAVAILABLE, - UNKNOWN + AVAILABLE(0), + UNAVAILABLE(1), + UNKNOWN(2); + + private final int intValue; + + public int getIntValue() { + return intValue; + } } diff --git a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/PrimaryMetaStore.java b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/PrimaryMetaStore.java index cec7cd149..d72bb41c0 100644 --- a/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/PrimaryMetaStore.java +++ b/waggle-dance-api/src/main/java/com/hotels/bdp/waggledance/api/model/PrimaryMetaStore.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,12 +20,13 @@ import javax.validation.constraints.NotNull; +import lombok.NoArgsConstructor; + +@NoArgsConstructor public class PrimaryMetaStore extends AbstractMetaStore { private static final String EMPTY_PREFIX = ""; - public PrimaryMetaStore() {} - public PrimaryMetaStore( String name, String remoteMetaStoreUris, diff --git a/waggle-dance-core/pom.xml b/waggle-dance-core/pom.xml index ea7d40f99..2deeb3552 100644 --- a/waggle-dance-core/pom.xml +++ b/waggle-dance-core/pom.xml @@ -10,23 +10,30 @@ waggle-dance-core - 2.0.7 - 0.22.6 + 2.0.9 + 0.25.1 + + org.apache.hive.shims + hive-shims-common + 3.1.0 + com.hotels hcommon-ssh 1.0.2 + + + javax.validation + * + + - javax.el - javax.el-api - - - org.glassfish.web - javax.el + jakarta.el + jakarta.el-api @@ -67,6 +74,14 @@ org.springframework.boot spring-boot-starter-aop + + org.springframework.boot + spring-boot-starter-validation + + + org.springframework.boot + spring-boot-starter-jersey + org.springframework @@ -87,8 +102,8 @@ - javax.servlet - javax.servlet-api + jakarta.servlet + jakarta.servlet-api @@ -117,74 +132,29 @@ org.apache.hive hive-common - - - log4j - log4j - - org.apache.hive - hive-metastore - - - log4j-web - org.apache.logging.log4j - - - javax.servlet - javax.servlet-api - - - log4j - log4j - - - org.apache.hbase - hbase-client - - + hive-standalone-metastore org.apache.hive hive-exec - - - log4j - log4j - - + + + org.apache.hive + hive-service + org.apache.hadoop hadoop-common - - - log4j - log4j - - - org.slf4j - slf4j-log4j12 - - org.apache.hadoop hadoop-mapreduce-client-core - - - log4j - log4j - - - org.slf4j - slf4j-log4j12 - - @@ -196,7 +166,6 @@ org.apache.commons commons-vfs2 - 2.1 @@ -214,9 +183,10 @@ org.apache.commons commons-lang3 + - org.springframework.boot - spring-boot-starter-jersey + org.projectlombok + lombok @@ -224,23 +194,20 @@ com.jcabi jcabi-aspects ${jcabi-aspects.version} + + + javax.validation + * + + + org.springframework.boot spring-boot-starter-test test - - - org.mockito - * - - - org.hamcrest - * - - org.hamcrest @@ -278,18 +245,7 @@ fm.last.commons lastcommons-test - 5.2.1 test - - - org.mockito - mockito-all - - - log4j - log4j - - org.awaitility diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/WaggleDance.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/WaggleDance.java index b6577bfb1..f68145a4f 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/WaggleDance.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/WaggleDance.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,8 +22,6 @@ import javax.validation.ConstraintViolation; import javax.validation.ConstraintViolationException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.beans.factory.BeanCreationException; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; @@ -38,6 +36,8 @@ import org.springframework.validation.BindException; import org.springframework.validation.ObjectError; +import lombok.extern.log4j.Log4j2; + import com.google.common.annotations.VisibleForTesting; import com.hotels.bdp.waggledance.manifest.ManifestAttributes; @@ -46,8 +46,8 @@ @EnableConfigurationProperties @EnableSpringConfigured @EnableAspectJAutoProxy(proxyTargetClass = true) +@Log4j2 public class WaggleDance { - private static final Logger LOG = LoggerFactory.getLogger(WaggleDance.class); public interface ContextListener { void onStart(ApplicationContext context); @@ -118,9 +118,9 @@ private static void printHelp(List allErrors) { } private static void logConstraintErrors(ConstraintViolationException constraintViolationException) { - LOG.error("Validation errors:"); + log.error("Validation errors:"); for (ConstraintViolation violation : constraintViolationException.getConstraintViolations()) { - LOG.error(violation.toString()); + log.error(violation.toString()); } } @@ -131,6 +131,6 @@ private static void logConstraintErrors(ConstraintViolationException constraintV private static void logVersionInfo() { ManifestAttributes manifestAttributes = new ManifestAttributes(WaggleDance.class); - LOG.info("{}", manifestAttributes); + log.info("{}", manifestAttributes); } } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/CloseableThriftHiveMetastoreIfaceClientFactory.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/CloseableThriftHiveMetastoreIfaceClientFactory.java index 3af768528..ca62e4063 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/CloseableThriftHiveMetastoreIfaceClientFactory.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/CloseableThriftHiveMetastoreIfaceClientFactory.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,30 +25,34 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import lombok.AllArgsConstructor; + import com.hotels.bdp.waggledance.api.model.AbstractMetaStore; import com.hotels.bdp.waggledance.client.tunnelling.TunnelingMetaStoreClientFactory; import com.hotels.bdp.waggledance.conf.WaggleDanceConfiguration; import com.hotels.hcommon.hive.metastore.conf.HiveConfFactory; import com.hotels.hcommon.hive.metastore.util.MetaStoreUriNormaliser; +@AllArgsConstructor public class CloseableThriftHiveMetastoreIfaceClientFactory { private static final int DEFAULT_CLIENT_FACTORY_RECONNECTION_RETRY = 3; private final TunnelingMetaStoreClientFactory tunnelingMetaStoreClientFactory; private final DefaultMetaStoreClientFactory defaultMetaStoreClientFactory; - private final int defaultConnectionTimeout = (int) TimeUnit.SECONDS.toMillis(2L); private final WaggleDanceConfiguration waggleDanceConfiguration; + private final int defaultConnectionTimeout = (int) TimeUnit.SECONDS.toMillis(2L); - public CloseableThriftHiveMetastoreIfaceClientFactory( - TunnelingMetaStoreClientFactory tunnelingMetaStoreClientFactory, - DefaultMetaStoreClientFactory defaultMetaStoreClientFactory, - WaggleDanceConfiguration waggleDanceConfiguration) { - this.tunnelingMetaStoreClientFactory = tunnelingMetaStoreClientFactory; - this.defaultMetaStoreClientFactory = defaultMetaStoreClientFactory; - this.waggleDanceConfiguration = waggleDanceConfiguration; + public CloseableThriftHiveMetastoreIface newInstance(AbstractMetaStore metaStore) { + Map properties = new HashMap<>(); + if (waggleDanceConfiguration.getConfigurationProperties() != null) { + properties.putAll(waggleDanceConfiguration.getConfigurationProperties()); + } + return newHiveInstance(metaStore, properties); } - public CloseableThriftHiveMetastoreIface newInstance(AbstractMetaStore metaStore) { + private CloseableThriftHiveMetastoreIface newHiveInstance( + AbstractMetaStore metaStore, + Map properties) { String uris = MetaStoreUriNormaliser.normaliseMetaStoreUris(metaStore.getRemoteMetaStoreUris()); String name = metaStore.getName().toLowerCase(Locale.ROOT); @@ -61,11 +65,7 @@ public CloseableThriftHiveMetastoreIface newInstance(AbstractMetaStore metaStore .newInstance(uris, metaStore.getMetastoreTunnel(), name, DEFAULT_CLIENT_FACTORY_RECONNECTION_RETRY, connectionTimeout, waggleDanceConfiguration.getConfigurationProperties()); } - Map properties = new HashMap<>(); properties.put(ConfVars.METASTOREURIS.varname, uris); - if (waggleDanceConfiguration.getConfigurationProperties() != null) { - properties.putAll(waggleDanceConfiguration.getConfigurationProperties()); - } HiveConfFactory confFactory = new HiveConfFactory(Collections.emptyList(), properties); return defaultMetaStoreClientFactory .newInstance(confFactory.newInstance(), "waggledance-" + name, DEFAULT_CLIENT_FACTORY_RECONNECTION_RETRY, diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/DefaultMetaStoreClientFactory.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/DefaultMetaStoreClientFactory.java index 11fa1a2f5..cd9d09725 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/DefaultMetaStoreClientFactory.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/DefaultMetaStoreClientFactory.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,96 +15,123 @@ */ package com.hotels.bdp.waggledance.client; +import java.io.IOException; import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Proxy; +import java.lang.reflect.UndeclaredThrowableException; +import java.util.List; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.utils.SecurityUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.thrift.transport.TTransportException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import lombok.extern.log4j.Log4j2; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; import com.hotels.bdp.waggledance.client.compatibility.HiveCompatibleThriftHiveMetastoreIfaceFactory; +import com.hotels.bdp.waggledance.server.TokenWrappingHMSHandler; import com.hotels.hcommon.hive.metastore.exception.MetastoreUnavailableException; + public class DefaultMetaStoreClientFactory implements MetaStoreClientFactory { static final Class[] INTERFACES = new Class[] { CloseableThriftHiveMetastoreIface.class }; + @Log4j2 private static class ReconnectingMetastoreClientInvocationHandler implements InvocationHandler { - private static final Logger LOG = LoggerFactory.getLogger(ReconnectingMetastoreClientInvocationHandler.class); private final ThriftMetastoreClientManager base; private final String name; private final int maxRetries; + private HiveUgiArgs cachedUgi = null; + private ReconnectingMetastoreClientInvocationHandler( - String name, - int maxRetries, - ThriftMetastoreClientManager base) { + String name, + int maxRetries, + ThriftMetastoreClientManager base) { this.name = name; this.maxRetries = maxRetries; this.base = base; } + @SuppressWarnings("unchecked") @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { int attempt = 0; // close() and isOpen() methods delegate to base HiveMetastoreClient switch (method.getName()) { - case "isOpen": - try { - reconnectIfDisconnected(); - return base.isOpen(); - } catch (Exception e) { - LOG.debug("Error re-opening client at isOpen(): {}", e.getMessage()); - return false; - } - case "close": - if (base != null) { - base.close(); - } - return null; - default: - base.open(); - do { + case "isOpen": try { - return method.invoke(base.getClient(), args); - } catch (InvocationTargetException e) { - Throwable realException = e.getTargetException(); - if (TTransportException.class.isAssignableFrom(realException.getClass())) { - if (attempt < maxRetries && shouldRetry(method)) { - LOG.debug("TTransportException captured in client {}. Reconnecting... ", name); - base.reconnect(); - continue; - } - throw new MetastoreUnavailableException("Client " + name + " is not available", realException); - } - throw realException; + reconnectIfDisconnected(); + return base.isOpen(); + } catch (Exception e) { + log.debug("Error re-opening client at isOpen(): {}", e.getMessage()); + return false; + } + case "close": + if (base != null) { + base.close(); } - } while (++attempt <= maxRetries); - break; + return null; + case "set_ugi": + String user = (String) args[0]; + List groups = (List) args[1]; + cachedUgi = new HiveUgiArgs(user, groups); + if (base.isOpen()) { + log + .info("calling #set_ugi (on already open client) for user '{}', on metastore {}", cachedUgi.getUser(), + name); + return doRealCall(method, args, attempt); + } else { + // delay call until we get the next non set_ugi call, this helps doing unnecessary calls to Federated + // Metastores. + return Lists.newArrayList(user); + } + default: + base.open(cachedUgi); + return doRealCall(method, args, attempt); } - throw new RuntimeException("Unreachable code"); + } + private Object doRealCall(Method method, Object[] args, int attempt) throws IllegalAccessException, Throwable { + do { + try { + return method.invoke(base.getClient(), args); + } catch (InvocationTargetException e) { + Throwable realException = e.getTargetException(); + if (TTransportException.class.isAssignableFrom(realException.getClass())) { + if (attempt < maxRetries && shouldRetry(method)) { + log.debug("TTransportException captured in client {}. Reconnecting... ", name); + base.reconnect(cachedUgi); + continue; + } + throw new MetastoreUnavailableException("Client " + name + " is not available", realException); + } + throw realException; + } + } while (++attempt <= maxRetries); + throw new RuntimeException("Unreachable code"); } private boolean shouldRetry(Method method) { switch (method.getName()) { - case "shutdown": - return false; - default: - return true; + case "shutdown": + return false; + default: + return true; } } private void reconnectIfDisconnected() { try { if (!base.isOpen()) { - base.reconnect(); + base.reconnect(cachedUgi); } } catch (Exception e) { throw new MetastoreUnavailableException("Client " + name + " is not available", e); @@ -113,6 +140,76 @@ private void reconnectIfDisconnected() { } + @Log4j2 + private static class SaslMetastoreClientHander implements InvocationHandler { + + private final CloseableThriftHiveMetastoreIface baseHandler; + private final ThriftMetastoreClientManager clientManager; + private final String tokenSignature = "WAGGLEDANCETOKEN"; + + private String delegationToken; + + public static CloseableThriftHiveMetastoreIface newProxyInstance( + CloseableThriftHiveMetastoreIface baseHandler, + ThriftMetastoreClientManager clientManager) { + return (CloseableThriftHiveMetastoreIface) Proxy.newProxyInstance(SaslMetastoreClientHander.class.getClassLoader(), + INTERFACES, new SaslMetastoreClientHander(baseHandler, clientManager)); + } + + private SaslMetastoreClientHander( + CloseableThriftHiveMetastoreIface handler, + ThriftMetastoreClientManager clientManager) { + this.baseHandler = handler; + this.clientManager = clientManager; + } + + @SuppressWarnings("unchecked") + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + try { + switch (method.getName()) { + case "get_delegation_token": + try { + clientManager.open(); + Object token = method.invoke(baseHandler, args); + this.delegationToken = (String) token; + clientManager.close(); + setTokenStr2Ugi(UserGroupInformation.getCurrentUser(), (String) token); + clientManager.open(); + return token; + } catch (IOException e) { + throw new MetastoreUnavailableException("Couldn't setup delegation token in the ugi: ", e); + } + default: + genToken(); + return method.invoke(baseHandler, args); + } + } catch (InvocationTargetException e) { + throw e.getTargetException(); + } catch (UndeclaredThrowableException e) { + throw e.getCause(); + } + } + + private void genToken() throws Throwable { + UserGroupInformation currUser = null; + if (delegationToken == null && (currUser = UserGroupInformation.getCurrentUser()) + != UserGroupInformation.getLoginUser()) { + + log.info(String.format("set %s delegation token", currUser.getShortUserName())); + String token = TokenWrappingHMSHandler.getToken(); + setTokenStr2Ugi(currUser, token); + delegationToken = token; + clientManager.close(); + } + } + + private void setTokenStr2Ugi(UserGroupInformation currUser, String token) throws IOException { + String newTokenSignature = clientManager.generateNewTokenSignature(tokenSignature); + SecurityUtils.setTokenStr(currUser, token, newTokenSignature); + } + } + /* * (non-Javadoc) * @see com.hotels.bdp.waggledance.client.MetaStoreClientFactoryI#newInstance(org.apache.hadoop.hive.conf.HiveConf, @@ -120,23 +217,31 @@ private void reconnectIfDisconnected() { */ @Override public CloseableThriftHiveMetastoreIface newInstance( - HiveConf hiveConf, - String name, - int reconnectionRetries, - int connectionTimeout) { + HiveConf hiveConf, + String name, + int reconnectionRetries, + int connectionTimeout) { return newInstance(name, reconnectionRetries, new ThriftMetastoreClientManager(hiveConf, - new HiveCompatibleThriftHiveMetastoreIfaceFactory(), connectionTimeout)); + new HiveCompatibleThriftHiveMetastoreIfaceFactory(), connectionTimeout)); } @VisibleForTesting CloseableThriftHiveMetastoreIface newInstance( - String name, - int reconnectionRetries, - ThriftMetastoreClientManager base) { + String name, + int reconnectionRetries, + ThriftMetastoreClientManager base) { ReconnectingMetastoreClientInvocationHandler reconnectingHandler = new ReconnectingMetastoreClientInvocationHandler( - name, reconnectionRetries, base); - return (CloseableThriftHiveMetastoreIface) Proxy - .newProxyInstance(getClass().getClassLoader(), INTERFACES, reconnectingHandler); + name, reconnectionRetries, base); + if (base.isSaslEnabled()) { + CloseableThriftHiveMetastoreIface ifaceReconnectingHandler = (CloseableThriftHiveMetastoreIface) Proxy + .newProxyInstance(getClass().getClassLoader(), INTERFACES, reconnectingHandler); + // wrapping the SaslMetastoreClientHander to handle delegation token if using sasl + return SaslMetastoreClientHander.newProxyInstance(ifaceReconnectingHandler, base); + } else { + return (CloseableThriftHiveMetastoreIface) Proxy + .newProxyInstance(getClass().getClassLoader(), INTERFACES, reconnectingHandler); + } + } -} +} \ No newline at end of file diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/HiveUgiArgs.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/HiveUgiArgs.java new file mode 100644 index 000000000..9ab8aff9b --- /dev/null +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/HiveUgiArgs.java @@ -0,0 +1,79 @@ +/** + * Copyright (C) 2016-2023 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.hotels.bdp.waggledance.client; + +import java.util.ArrayList; +import java.util.List; + +import lombok.Getter; + +public class HiveUgiArgs { + + public static final HiveUgiArgs WAGGLE_DANCE_DEFAULT = new HiveUgiArgs("waggledance", null); + + @Getter + private final String user; + @Getter + private final List groups; + + public HiveUgiArgs(String user, List groups) { + this.user = user; + if (groups == null) { + this.groups = new ArrayList<>(); + } else { + this.groups = new ArrayList<>(groups); + } + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((groups == null) ? 0 : groups.hashCode()); + result = prime * result + ((user == null) ? 0 : user.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + HiveUgiArgs other = (HiveUgiArgs) obj; + if (groups == null) { + if (other.groups != null) { + return false; + } + } else if (!groups.equals(other.groups)) { + return false; + } + if (user == null) { + if (other.user != null) { + return false; + } + } else if (!user.equals(other.user)) { + return false; + } + return true; + } + +} diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManager.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManager.java index ff23c4739..6ff94e8d0 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManager.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManager.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,13 +25,13 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveConfUtil; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.SecurityUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; +import org.apache.hive.service.auth.KerberosSaslHelper; import org.apache.thrift.TException; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TCompactProtocol; @@ -39,14 +39,14 @@ import org.apache.thrift.transport.TFramedTransport; import org.apache.thrift.transport.TSocket; import org.apache.thrift.transport.TTransport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import lombok.extern.log4j.Log4j2; import com.hotels.bdp.waggledance.client.compatibility.HiveCompatibleThriftHiveMetastoreIfaceFactory; +@Log4j2 class ThriftMetastoreClientManager implements Closeable { - private static final Logger LOG = LoggerFactory.getLogger(ThriftMetastoreClientManager.class); private static final AtomicInteger CONN_COUNT = new AtomicInteger(0); private final HiveConf conf; @@ -60,6 +60,7 @@ class ThriftMetastoreClientManager implements Closeable { private long retryDelaySeconds = 0; private final int connectionTimeout; + private final String msUri; ThriftMetastoreClientManager( HiveConf conf, @@ -68,14 +69,14 @@ class ThriftMetastoreClientManager implements Closeable { this.conf = conf; this.hiveCompatibleThriftHiveMetastoreIfaceFactory = hiveCompatibleThriftHiveMetastoreIfaceFactory; this.connectionTimeout = connectionTimeout; - String msUri = conf.getVar(ConfVars.METASTOREURIS); + msUri = conf.getVar(ConfVars.METASTOREURIS); if (HiveConfUtil.isEmbeddedMetaStore(msUri)) { throw new RuntimeException("You can't waggle an embedded metastore"); } // get the number retries - retries = HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES); + retries = HiveConf.getIntVar(conf, ConfVars.METASTORETHRIFTCONNECTIONRETRIES); retryDelaySeconds = conf.getTimeVar(ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS); // user wants file store based configuration @@ -95,34 +96,39 @@ class ThriftMetastoreClientManager implements Closeable { throw (e); } catch (Exception e) { String exInfo = "Got exception: " + e.getClass().getName() + " " + e.getMessage(); - LOG.error(exInfo, e); + log.error(exInfo, e); throw new RuntimeException(exInfo, e); } } else { - LOG.error("NOT getting uris from conf"); + log.error("NOT getting uris from conf"); throw new RuntimeException("MetaStoreURIs not found in conf file"); } } void open() { + open(null); + } + + void open(HiveUgiArgs ugiArgs) { if (isConnected) { return; } TException te = null; boolean useSasl = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL); + boolean useSsl = conf.getBoolVar(ConfVars.HIVE_METASTORE_USE_SSL); boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT); boolean useCompactProtocol = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_COMPACT_PROTOCOL); int clientSocketTimeout = (int) conf.getTimeVar(ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS); for (int attempt = 0; !isConnected && (attempt < retries); ++attempt) { for (URI store : metastoreUris) { - LOG.info("Trying to connect to metastore with URI " + store); + log.info("Trying to connect to metastore with URI {}", store); try { transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout, connectionTimeout); if (useSasl) { // Wrap thrift connection with SASL for secure connection. try { - HadoopThriftAuthBridge.Client authBridge = ShimLoader.getHadoopThriftAuthBridge().createClient(); + UserGroupInformation.setConfiguration(conf); // check if we should use delegation tokens to authenticate // the call below gets hold of the tokens if they are set up by hadoop @@ -131,20 +137,20 @@ void open() { // submission. String tokenSig = conf.getVar(ConfVars.METASTORE_TOKEN_SIGNATURE); // tokenSig could be null - String tokenStrForm = Utils.getTokenStrForm(tokenSig); + String tokenStrForm = SecurityUtils.getTokenStrForm(tokenSig); if (tokenStrForm != null) { // authenticate using delegation tokens via the "DIGEST" mechanism - transport = authBridge - .createClientTransport(null, store.getHost(), "DIGEST", tokenStrForm, transport, - MetaStoreUtils.getMetaStoreSaslProperties(conf)); + transport = KerberosSaslHelper + .getTokenTransport(tokenStrForm, store.getHost(), transport, + MetaStoreUtils.getMetaStoreSaslProperties(conf, useSsl)); } else { - String principalConfig = conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL); - transport = authBridge - .createClientTransport(principalConfig, store.getHost(), "KERBEROS", null, transport, - MetaStoreUtils.getMetaStoreSaslProperties(conf)); + String principalConfig = conf.getVar(ConfVars.METASTORE_KERBEROS_PRINCIPAL); + transport = KerberosSaslHelper + .getKerberosTransport(principalConfig, store.getHost(), transport, + MetaStoreUtils.getMetaStoreSaslProperties(conf, useSsl), false); } } catch (IOException ioe) { - LOG.error("Couldn't create client transport", ioe); + log.error("Couldn't create client transport, URI " + store, ioe); throw new MetaException(ioe.toString()); } } else if (useFramedTransport) { @@ -159,24 +165,30 @@ void open() { client = hiveCompatibleThriftHiveMetastoreIfaceFactory.newInstance(new ThriftHiveMetastore.Client(protocol)); try { transport.open(); - LOG + log .info("Opened a connection to metastore '" + store + "', total current connections to all metastores: " + CONN_COUNT.incrementAndGet()); isConnected = true; + if (ugiArgs != null) { + log.info("calling #set_ugi for user '{}', on URI {}", ugiArgs.getUser(), store); + client.set_ugi(ugiArgs.getUser(), ugiArgs.getGroups()); + } else { + log.debug("Connection opened with out #set_ugi call', on URI {}", store); + } } catch (TException e) { te = e; - if (LOG.isDebugEnabled()) { - LOG.warn("Failed to connect to the MetaStore Server...", e); + if (log.isDebugEnabled()) { + log.warn("Failed to connect to the MetaStore Server, URI " + store, e); } else { // Don't print full exception trace if DEBUG is not on. - LOG.warn("Failed to connect to the MetaStore Server..."); + log.warn("Failed to connect to the MetaStore Server, URI {}", store); } } } catch (MetaException e) { - LOG.error("Unable to connect to metastore with URI " + store + " in attempt " + attempt, e); + log.error("Unable to connect to metastore with URI " + store + " in attempt " + attempt, e); } if (isConnected) { break; @@ -185,26 +197,48 @@ void open() { // Wait before launching the next round of connection retries. if (!isConnected && (retryDelaySeconds > 0) && ((attempt + 1) < retries)) { try { - LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt."); + log.info("Waiting {} seconds before next connection attempt.", retryDelaySeconds); Thread.sleep(retryDelaySeconds * 1000); } catch (InterruptedException ignore) {} } } if (!isConnected) { - throw new RuntimeException("Could not connect to meta store using any of the URIs provided. Most recent failure: " + throw new RuntimeException("Could not connect to meta store using any of the URIs [" + + msUri + + "] provided. Most recent failure: " + StringUtils.stringifyException(te)); } - LOG.info("Connected to metastore."); + log.debug("Connected to metastore."); } - void reconnect() { + void reconnect(HiveUgiArgs ugiArgs) { close(); // Swap the first element of the metastoreUris[] with a random element from the rest // of the array. Rationale being that this method will generally be called when the default // connection has died and the default connection is likely to be the first array element. promoteRandomMetaStoreURI(); - open(); + open(ugiArgs); + } + + public String getHiveConfValue(String key, String defaultValue) { + return conf.get(key, defaultValue); + } + + public void setHiveConfValue(String key, String value) { + conf.set(key, value); + } + + public String generateNewTokenSignature(String defaultTokenSignature) { + String tokenSignature = conf.get(ConfVars.METASTORE_TOKEN_SIGNATURE.varname, + defaultTokenSignature); + conf.set(ConfVars.METASTORE_TOKEN_SIGNATURE.varname, + tokenSignature); + return tokenSignature; + } + + public Boolean isSaslEnabled() { + return conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL); } @Override @@ -218,7 +252,7 @@ public void close() { client.shutdown(); } } catch (TException e) { - LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e); + log.debug("Unable to shutdown metastore client. Will try closing transport directly.", e); } // Transport would have got closed via client.shutdown(), so we don't need this, but // just in case, we make this call. @@ -226,7 +260,7 @@ public void close() { transport.close(); transport = null; } - LOG.info("Closed a connection to metastore, current connections: " + CONN_COUNT.decrementAndGet()); + log.info("Closed a connection to metastore, current connections: {}", CONN_COUNT.decrementAndGet()); } boolean isOpen() { diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/compatibility/HiveCompatibleThriftHiveMetastoreIfaceFactory.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/compatibility/HiveCompatibleThriftHiveMetastoreIfaceFactory.java index e0e84f46d..5dc77075c 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/compatibility/HiveCompatibleThriftHiveMetastoreIfaceFactory.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/compatibility/HiveCompatibleThriftHiveMetastoreIfaceFactory.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,27 +22,21 @@ import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.thrift.TApplicationException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import lombok.AllArgsConstructor; +import lombok.extern.log4j.Log4j2; import com.hotels.bdp.waggledance.client.CloseableThriftHiveMetastoreIface; +@Log4j2 public class HiveCompatibleThriftHiveMetastoreIfaceFactory { - private static final Logger log = LoggerFactory.getLogger(HiveCompatibleThriftHiveMetastoreIfaceFactory.class); - + @AllArgsConstructor private static class ThriftMetaStoreClientInvocationHandler implements InvocationHandler { private final ThriftHiveMetastore.Client delegate; private final HiveThriftMetaStoreIfaceCompatibility compatibility; - ThriftMetaStoreClientInvocationHandler( - ThriftHiveMetastore.Client delegate, - HiveThriftMetaStoreIfaceCompatibility compatibility) { - this.delegate = delegate; - this.compatibility = compatibility; - } - @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { try { diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/compatibility/HiveThriftMetaStoreIfaceCompatibility1xx.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/compatibility/HiveThriftMetaStoreIfaceCompatibility1xx.java index 6555384d1..4bc0379d5 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/compatibility/HiveThriftMetaStoreIfaceCompatibility1xx.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/compatibility/HiveThriftMetaStoreIfaceCompatibility1xx.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -34,14 +34,13 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.thrift.TException; +import lombok.AllArgsConstructor; + +@AllArgsConstructor public class HiveThriftMetaStoreIfaceCompatibility1xx implements HiveThriftMetaStoreIfaceCompatibility { private final ThriftHiveMetastore.Client client; - public HiveThriftMetaStoreIfaceCompatibility1xx(ThriftHiveMetastore.Client client) { - this.client = client; - } - /* * (non-Javadoc) * @see diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/tunnelling/HiveMetaStoreClientSupplier.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/tunnelling/HiveMetaStoreClientSupplier.java index f795988c5..ec4c760ef 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/tunnelling/HiveMetaStoreClientSupplier.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/client/tunnelling/HiveMetaStoreClientSupplier.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,10 +17,13 @@ import org.apache.hadoop.hive.conf.HiveConf; +import lombok.AllArgsConstructor; + import com.hotels.bdp.waggledance.client.CloseableThriftHiveMetastoreIface; import com.hotels.bdp.waggledance.client.MetaStoreClientFactory; import com.hotels.hcommon.ssh.TunnelableSupplier; +@AllArgsConstructor class HiveMetaStoreClientSupplier implements TunnelableSupplier { private final MetaStoreClientFactory factory; private final HiveConf hiveConf; @@ -28,14 +31,6 @@ class HiveMetaStoreClientSupplier implements TunnelableSupplier 1) { uri = urisSplit[0]; - LOG.debug("Can't support multiple uris '{}' for tunneling endpoint, using first '{}'", uris, uri); + log.debug("Can't support multiple uris '{}' for tunneling endpoint, using first '{}'", uris, uri); } String localHost = metastoreTunnel.getLocalhost(); int localPort = getLocalPort(); @@ -87,7 +87,7 @@ public CloseableThriftHiveMetastoreIface newInstance( TunnelableFactory tunnelableFactory = tunnelableFactorySupplier .get(metastoreTunnel); - LOG + log .info("Metastore URI {} is being proxied through {}", uri, localHiveConf.getVar(HiveConf.ConfVars.METASTOREURIS)); diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/conf/PrometheusConfiguration.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/conf/PrometheusConfiguration.java index 17c0a1178..25cb01f3b 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/conf/PrometheusConfiguration.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/conf/PrometheusConfiguration.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2020 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,18 +18,12 @@ import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Configuration; +import lombok.Data; + @Configuration @ConfigurationProperties(prefix = "prometheus") +@Data public class PrometheusConfiguration { - private String prefix = "waggle-dance"; - public String getPrefix() { - return prefix; - } - - public void setPrefix(String prefix) { - this.prefix = prefix; - } - } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/conf/WaggleDanceConfiguration.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/conf/WaggleDanceConfiguration.java index cc392f1a5..6f06dda4d 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/conf/WaggleDanceConfiguration.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/conf/WaggleDanceConfiguration.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -47,6 +47,8 @@ public class WaggleDanceConfiguration { private TimeUnit thriftServerRequestTimeoutUnit = TimeUnit.SECONDS; private int statusPollingDelay = 5; private TimeUnit statusPollingDelayTimeUnit = TimeUnit.MINUTES; + // default to be backward compatible but recommended to be overwritten to false. + private boolean queryFunctionsAcrossAllMetastores = true; public Integer getPort() { return port; @@ -136,4 +138,11 @@ public void setStatusPollingDelayTimeUnit(TimeUnit statusPollingDelayTimeUnit) { this.statusPollingDelayTimeUnit = statusPollingDelayTimeUnit; } + public boolean isQueryFunctionsAcrossAllMetastores() { + return queryFunctionsAcrossAllMetastores; + } + + public void setQueryFunctionsAcrossAllMetastores(boolean queryFunctionsAcrossAllMetastores) { + this.queryFunctionsAcrossAllMetastores = queryFunctionsAcrossAllMetastores; + } } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/context/CommonBeans.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/context/CommonBeans.java index 5d50a4e02..98be2ca65 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/context/CommonBeans.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/context/CommonBeans.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,12 +25,10 @@ import com.hotels.bdp.waggledance.client.DefaultMetaStoreClientFactory; import com.hotels.bdp.waggledance.client.tunnelling.TunnelingMetaStoreClientFactory; import com.hotels.bdp.waggledance.conf.WaggleDanceConfiguration; -import com.hotels.bdp.waggledance.core.federation.service.PopulateStatusFederationService; import com.hotels.bdp.waggledance.mapping.model.ASTQueryMapping; import com.hotels.bdp.waggledance.mapping.model.QueryMapping; import com.hotels.bdp.waggledance.mapping.service.PrefixNamingStrategy; import com.hotels.bdp.waggledance.mapping.service.impl.LowerCasePrefixNamingStrategy; -import com.hotels.bdp.waggledance.mapping.service.impl.PollingFederationService; @org.springframework.context.annotation.Configuration public class CommonBeans { @@ -65,10 +63,10 @@ public QueryMapping queryMapping() { return ASTQueryMapping.INSTANCE; } - @Bean + /* @Bean public PollingFederationService pollingFederationService( PopulateStatusFederationService populateStatusFederationService) { return new PollingFederationService(populateStatusFederationService); - } + }*/ } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/context/ScheduledBeans.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/context/ScheduledBeans.java index 4655192fc..6dacb451e 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/context/ScheduledBeans.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/context/ScheduledBeans.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,13 +29,12 @@ public class ScheduledBeans implements SchedulingConfigurer { private final WaggleDanceConfiguration waggleDanceConfiguration; - private final PollingFederationService pollingFederationService; @Autowired public ScheduledBeans( - WaggleDanceConfiguration waggleDanceConfiguration, - PollingFederationService pollingFederationService) { + WaggleDanceConfiguration waggleDanceConfiguration, + PollingFederationService pollingFederationService) { this.waggleDanceConfiguration = waggleDanceConfiguration; this.pollingFederationService = pollingFederationService; } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/core/federation/service/PopulateStatusFederationService.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/core/federation/service/PopulateStatusFederationService.java index f8da5d788..63260652e 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/core/federation/service/PopulateStatusFederationService.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/core/federation/service/PopulateStatusFederationService.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,16 +17,21 @@ import java.util.ArrayList; import java.util.List; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.TimeUnit; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Service; +import lombok.extern.log4j.Log4j2; + import com.hotels.bdp.waggledance.api.federation.service.FederationService; import com.hotels.bdp.waggledance.api.federation.service.FederationStatusService; import com.hotels.bdp.waggledance.api.model.AbstractMetaStore; import com.hotels.bdp.waggledance.api.model.MetaStoreStatus; @Service +@Log4j2 public class PopulateStatusFederationService implements FederationService { private final FederationService federationService; @@ -62,11 +67,20 @@ public AbstractMetaStore get(String name) { @Override public List getAll() { List metaStores = federationService.getAll(); - List populatedMetaStores = new ArrayList<>(metaStores.size()); - for (AbstractMetaStore metaStore : metaStores) { - populatedMetaStores.add(populate(metaStore)); + // We don't care about order here we just want all the statuses. + // Custom Thread pool so we get optimal parallelism we want for firing our requests + ForkJoinPool customThreadPool = new ForkJoinPool(metaStores.size()); + try { + customThreadPool.submit(() -> metaStores.parallelStream().forEach(metaStore -> { + populate(metaStore); + })); + customThreadPool.shutdown(); + // wait at most 1 minute otherwise just return what we got thus far. + customThreadPool.awaitTermination(1L, TimeUnit.MINUTES); + } catch (InterruptedException e) { + log.error("Can't get status for metastores", e); } - return populatedMetaStores; + return new ArrayList<>(metaStores); } private AbstractMetaStore populate(AbstractMetaStore metaStore) { diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/manifest/ManifestAttributes.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/manifest/ManifestAttributes.java index bc8d98c9a..7dde52892 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/manifest/ManifestAttributes.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/manifest/ManifestAttributes.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,8 +26,7 @@ import java.util.jar.Attributes; import java.util.jar.Manifest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import lombok.extern.log4j.Log4j2; /** * Read and make available all the attributes held in the specified class' META_INF/MANIFEST.MF file. The attributes are @@ -37,8 +36,9 @@ * This class does the best effort to find the correct manifest in the classpath. *

*/ + +@Log4j2 public class ManifestAttributes { - private static final Logger LOG = LoggerFactory.getLogger(ManifestAttributes.class); static final String META_INF_MANIFEST_MF = "META-INF/MANIFEST.MF"; private static final String JAR_PROTOCOL = "jar:"; @@ -63,10 +63,10 @@ public ManifestAttributes(Class mainClass) { attributesStringBuilder.append("Could not find Manifest via " + protectionDomain); } } catch (NullPointerException e) { - LOG.warn("No Manifest found", e); + log.warn("No Manifest found", e); attributesStringBuilder.append("No Manifest found"); } catch (Exception e) { - LOG.warn("Error getting manifest", e); + log.warn("Error getting manifest", e); attributesStringBuilder.append("Error getting manifest " + e.getMessage()); } @@ -83,23 +83,23 @@ protected InputStream openManifestStream(ProtectionDomain protectionDomain) // try to pick the Manifest in the source JAR manifestUrl = selectManifestFromJars(protectionDomain); - LOG.debug("Manifest location in JARs is {}", manifestUrl); + log.debug("Manifest location in JARs is {}", manifestUrl); if (manifestUrl == null) { // if we can't locate the correct JAR then try get to manifest file via a file path (e.g. in Hadoop case where // jar is unpacked to disk) manifestUrl = selectFromFileLocation(protectionDomain); - LOG.debug("Manifest location on disk is {}", manifestUrl); + log.debug("Manifest location on disk is {}", manifestUrl); } if (manifestUrl == null) { // file not found, get via class loader resource (e.g. from inside jar) manifestUrl = protectionDomain.getClassLoader().getResource(META_INF_MANIFEST_MF); - LOG.debug("Manifest location via getResource() is {}", manifestUrl); + log.debug("Manifest location via getResource() is {}", manifestUrl); } if (manifestUrl == null) { - LOG.warn("Manifest not found!"); + log.warn("Manifest not found!"); return null; } @@ -115,7 +115,7 @@ private URL selectManifestFromJars(ProtectionDomain protectionDomain) throws IOE while (resources.hasMoreElements()) { URL url = resources.nextElement(); if (url.toString().startsWith(containingJar)) { - LOG.debug("Found a manifest in location {}", url); + log.debug("Found a manifest in location {}", url); return url; } } @@ -132,7 +132,7 @@ private URL selectFromFileLocation(ProtectionDomain protectionDomain) throws IOE if (manifestFile.exists()) { return url; } - LOG.debug("Could not find manifest in location {}", location); + log.debug("Could not find manifest in location {}", location); return null; } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMapping.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMapping.java index a26699f1a..7496412ef 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMapping.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMapping.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2020 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,16 +17,29 @@ import java.util.List; +import org.apache.hadoop.hive.metastore.api.AddCheckConstraintRequest; +import org.apache.hadoop.hive.metastore.api.AddDefaultConstraintRequest; import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; +import org.apache.hadoop.hive.metastore.api.AddForeignKeyRequest; +import org.apache.hadoop.hive.metastore.api.AddNotNullConstraintRequest; import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest; import org.apache.hadoop.hive.metastore.api.AddPartitionsResult; +import org.apache.hadoop.hive.metastore.api.AddUniqueConstraintRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest; +import org.apache.hadoop.hive.metastore.api.AlterISchemaRequest; import org.apache.hadoop.hive.metastore.api.CacheFileMetadataRequest; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsResponse; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.CompactionRequest; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsResponse; import org.apache.hadoop.hive.metastore.api.DropConstraintRequest; import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest; import org.apache.hadoop.hive.metastore.api.DropPartitionsResult; +import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp; import org.apache.hadoop.hive.metastore.api.FireEventRequest; import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; import org.apache.hadoop.hive.metastore.api.ForeignKeysResponse; @@ -38,8 +51,13 @@ import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.ISchema; +import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.MapSchemaVersionToSerdeRequest; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsResponse; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest; @@ -49,14 +67,26 @@ import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; import org.apache.hadoop.hive.metastore.api.PrimaryKeysResponse; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.ReplTblWriteIdStateRequest; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.SchemaVersion; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.SetSchemaVersionStateRequest; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.TableStatsRequest; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsResponse; public interface DatabaseMapping extends MetaStoreMapping { - Index transformInboundIndex(Index index); + ISchema transformInboundISchema(ISchema iSchema); Partition transformInboundPartition(Partition partition); @@ -64,7 +94,7 @@ public interface DatabaseMapping extends MetaStoreMapping { HiveObjectRef transformInboundHiveObjectRef(HiveObjectRef function); - Index transformOutboundIndex(Index index); + ISchema transformOutboundISchema(ISchema iSchema); Partition transformOutboundPartition(Partition partition); @@ -116,7 +146,7 @@ public interface DatabaseMapping extends MetaStoreMapping { List transformInboundPartitions(List partitions); - List transformOutboundIndexes(List indexes); + List transformOutboundISchemas(List iSchemas); ColumnStatistics transformInboundColumnStatistics(ColumnStatistics columnStatistics); @@ -150,6 +180,72 @@ public interface DatabaseMapping extends MetaStoreMapping { PartitionValuesRequest transformInboundPartitionValuesRequest(PartitionValuesRequest req); + List transformInboundSQLPrimaryKeys(List sqlPrimaryKeys); + + List transformInboundSQLForeignKeys(List sqlForeignKeys); + + List transformInboundSQLUniqueConstraints(List sqlUniqueConstraints); + + List transformInboundSQLNotNullConstraints(List sqlNotNullConstraints); + + List transformInboundSQLDefaultConstraints(List sqlDefaultConstraints); + + List transformInboundSQLCheckConstraints(List sqlCheckConstraints); + + ReplTblWriteIdStateRequest transformInboundReplTblWriteIdStateRequest(ReplTblWriteIdStateRequest request); + + AllocateTableWriteIdsRequest transformInboundAllocateTableWriteIdsRequest(AllocateTableWriteIdsRequest request); + + AlterISchemaRequest transformInboundAlterISchemaRequest(AlterISchemaRequest request); + + SchemaVersion transformInboundSchemaVersion(SchemaVersion schemaVersion); + + SchemaVersion transformOutboundSchemaVersion(SchemaVersion schemaVersion); + + List transformOutboundSchemaVersions(List schemaVersions); + + ISchemaName transformInboundISchemaName(ISchemaName iSchemaName); + + ISchemaName transformOutboundISchemaName(ISchemaName iSchemaName); + + AddForeignKeyRequest transformInboundAddForeignKeyRequest(AddForeignKeyRequest request); + + AddUniqueConstraintRequest transformInboundAddUniqueConstraintRequest(AddUniqueConstraintRequest request); + + AddNotNullConstraintRequest transformInboundAddNotNullConstraintRequest(AddNotNullConstraintRequest request); + + AddDefaultConstraintRequest transformInboundAddDefaultConstraintRequest(AddDefaultConstraintRequest request); + + AddCheckConstraintRequest transformInboundAddCheckConstraintRequest(AddCheckConstraintRequest request); + + FindSchemasByColsResp transformOutboundFindSchemasByColsResp(FindSchemasByColsResp response); + + SchemaVersionDescriptor transformInboundSchemaVersionDescriptor(SchemaVersionDescriptor request); + + MapSchemaVersionToSerdeRequest transformInboundMapSchemaVersionToSerdeRequest(MapSchemaVersionToSerdeRequest request); + + SetSchemaVersionStateRequest transformInboundSetSchemaVersionStateRequest(SetSchemaVersionStateRequest request); + + NotificationEventsCountRequest transformInboundNotificationEventsCountRequest(NotificationEventsCountRequest request); + + UniqueConstraintsRequest transformInboundUniqueConstraintsRequest(UniqueConstraintsRequest request); + + UniqueConstraintsResponse transformOutboundUniqueConstraintsResponse(UniqueConstraintsResponse response); + + NotNullConstraintsRequest transformInboundNotNullConstraintsRequest(NotNullConstraintsRequest request); + + NotNullConstraintsResponse transformOutboundNotNullConstraintsResponse(NotNullConstraintsResponse response); + + DefaultConstraintsRequest transformInboundDefaultConstraintsRequest(DefaultConstraintsRequest request); + + DefaultConstraintsResponse transformOutboundDefaultConstraintsResponse(DefaultConstraintsResponse response); + + CheckConstraintsRequest transformInboundCheckConstraintsRequest(CheckConstraintsRequest request); + + CheckConstraintsResponse transformOutboundCheckConstraintsResponse(CheckConstraintsResponse response); + + CreationMetadata transformInboundCreationMetadata(CreationMetadata request); + @Override long getLatency(); } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMappingImpl.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMappingImpl.java index eb6df5c3a..7ee6bc926 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMappingImpl.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMappingImpl.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,17 +19,30 @@ import java.util.List; import org.apache.hadoop.hive.metastore.MetaStoreFilterHook; +import org.apache.hadoop.hive.metastore.api.AddCheckConstraintRequest; +import org.apache.hadoop.hive.metastore.api.AddDefaultConstraintRequest; import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; +import org.apache.hadoop.hive.metastore.api.AddForeignKeyRequest; +import org.apache.hadoop.hive.metastore.api.AddNotNullConstraintRequest; import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest; import org.apache.hadoop.hive.metastore.api.AddPartitionsResult; +import org.apache.hadoop.hive.metastore.api.AddUniqueConstraintRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.AlterISchemaRequest; import org.apache.hadoop.hive.metastore.api.CacheFileMetadataRequest; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsResponse; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.CompactionRequest; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsResponse; import org.apache.hadoop.hive.metastore.api.DropConstraintRequest; import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest; import org.apache.hadoop.hive.metastore.api.DropPartitionsResult; +import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp; import org.apache.hadoop.hive.metastore.api.FireEventRequest; import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; import org.apache.hadoop.hive.metastore.api.ForeignKeysResponse; @@ -42,11 +55,16 @@ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.ISchema; +import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.LockComponent; import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.MapSchemaVersionToSerdeRequest; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsResponse; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest; @@ -56,31 +74,38 @@ import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; import org.apache.hadoop.hive.metastore.api.PrimaryKeysResponse; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.ReplTblWriteIdStateRequest; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.SchemaVersion; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.SetSchemaVersionStateRequest; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.TableStatsRequest; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsResponse; import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import lombok.AllArgsConstructor; +import lombok.extern.log4j.Log4j2; import com.hotels.bdp.waggledance.api.WaggleDanceException; +import com.hotels.bdp.waggledance.mapping.service.GrammarUtils; +@AllArgsConstructor +@Log4j2 public class DatabaseMappingImpl implements DatabaseMapping { - private final static Logger log = LoggerFactory.getLogger(DatabaseMappingImpl.class); - private final MetaStoreMapping metaStoreMapping; private final QueryMapping queryMapping; - public DatabaseMappingImpl(MetaStoreMapping metaStoreMapping, QueryMapping queryMapping) { - this.metaStoreMapping = metaStoreMapping; - this.queryMapping = queryMapping; - } - @Override public MetaStoreFilterHook getMetastoreFilter() { return metaStoreMapping.getMetastoreFilter(); @@ -134,9 +159,9 @@ public Partition transformOutboundPartition(Partition partition) { } @Override - public Index transformOutboundIndex(Index index) { - index.setDbName(metaStoreMapping.transformOutboundDatabaseName(index.getDbName())); - return index; + public ISchema transformOutboundISchema(ISchema iSchema) { + iSchema.setDbName(metaStoreMapping.transformOutboundDatabaseName(iSchema.getDbName())); + return iSchema; } @Override @@ -152,9 +177,9 @@ public Partition transformInboundPartition(Partition partition) { } @Override - public Index transformInboundIndex(Index index) { - index.setDbName(metaStoreMapping.transformInboundDatabaseName(index.getDbName())); - return index; + public ISchema transformInboundISchema(ISchema iSchema) { + iSchema.setDbName(metaStoreMapping.transformInboundDatabaseName(iSchema.getDbName())); + return iSchema; } @Override @@ -218,11 +243,13 @@ public Iface getClient() { @Override public String transformOutboundDatabaseName(String databaseName) { + databaseName = GrammarUtils.removeCatName(databaseName); return metaStoreMapping.transformOutboundDatabaseName(databaseName); } @Override public List transformOutboundDatabaseNameMultiple(String databaseName) { + databaseName = GrammarUtils.removeCatName(databaseName); return metaStoreMapping.transformOutboundDatabaseNameMultiple(databaseName); } @@ -233,6 +260,7 @@ public Database transformOutboundDatabase(Database database) { @Override public String transformInboundDatabaseName(String databaseName) { + databaseName = GrammarUtils.removeCatName(databaseName); return metaStoreMapping.transformInboundDatabaseName(databaseName); } @@ -317,6 +345,7 @@ public boolean isAvailable() { @Override public MetaStoreMapping checkWritePermissions(String databaseName) throws IllegalArgumentException { + databaseName = GrammarUtils.removeCatName(databaseName); return metaStoreMapping.checkWritePermissions(transformInboundDatabaseName(databaseName)); } @@ -376,11 +405,11 @@ public List transformInboundPartitions(List partitions) { } @Override - public List transformOutboundIndexes(List indexes) { - for (Index index : indexes) { - transformOutboundIndex(index); + public List transformOutboundISchemas(List iSchemaList) { + for (ISchema iSchema : iSchemaList) { + transformOutboundISchema(iSchema); } - return indexes; + return iSchemaList; } @Override @@ -509,6 +538,280 @@ public PartitionValuesRequest transformInboundPartitionValuesRequest(PartitionVa return request; } + @Override + public List transformInboundSQLPrimaryKeys(List sqlPrimaryKeys) { + for(SQLPrimaryKey sqlPrimaryKey: sqlPrimaryKeys) { + sqlPrimaryKey.setTable_db(transformInboundDatabaseName(sqlPrimaryKey.getTable_db())); + } + return sqlPrimaryKeys; + } + + @Override + public List transformInboundSQLForeignKeys(List sqlForeignKeys) { + for(SQLForeignKey sqlForeignKey: sqlForeignKeys) { + sqlForeignKey.setPktable_db(transformInboundDatabaseName(sqlForeignKey.getPktable_db())); + sqlForeignKey.setFktable_db(transformInboundDatabaseName(sqlForeignKey.getFktable_db())); + } + return sqlForeignKeys; + } + + @Override + public List transformInboundSQLUniqueConstraints(List sqlUniqueConstraints) { + for(SQLUniqueConstraint sqlUniqueConstraint: sqlUniqueConstraints) { + sqlUniqueConstraint.setTable_db(transformInboundDatabaseName(sqlUniqueConstraint.getTable_db())); + } + return sqlUniqueConstraints; + } + + @Override + public List transformInboundSQLNotNullConstraints(List sqlNotNullConstraints) { + for(SQLNotNullConstraint sqlNotNullConstraint: sqlNotNullConstraints) { + sqlNotNullConstraint.setTable_db(transformInboundDatabaseName(sqlNotNullConstraint.getTable_db())); + } + return sqlNotNullConstraints; + } + + @Override + public List transformInboundSQLDefaultConstraints(List sqlDefaultConstraints) { + for(SQLDefaultConstraint sqlDefaultConstraint: sqlDefaultConstraints) { + sqlDefaultConstraint.setTable_db(transformInboundDatabaseName(sqlDefaultConstraint.getTable_db())); + } + return sqlDefaultConstraints; + } + + @Override + public List transformInboundSQLCheckConstraints(List sqlCheckConstraints) { + for(SQLCheckConstraint sqlCheckConstraint: sqlCheckConstraints) { + sqlCheckConstraint.setTable_db(transformInboundDatabaseName(sqlCheckConstraint.getTable_db())); + } + return sqlCheckConstraints; + } + + + @Override + public ReplTblWriteIdStateRequest transformInboundReplTblWriteIdStateRequest(ReplTblWriteIdStateRequest request) { + request.setDbName(transformInboundDatabaseName(request.getDbName())); + return request; + } + + + @Override + public AllocateTableWriteIdsRequest transformInboundAllocateTableWriteIdsRequest(AllocateTableWriteIdsRequest request) { + request.setDbName(transformInboundDatabaseName(request.getDbName())); + return request; + } + + + @Override + public AlterISchemaRequest transformInboundAlterISchemaRequest(AlterISchemaRequest request) { + if(request.getName() !=null) { + request.setName(transformInboundISchemaName(request.getName())); + } + if(request.getNewSchema() != null) { + request.setNewSchema(transformInboundISchema(request.getNewSchema())); + } + return request; + } + + + @Override + public SchemaVersion transformInboundSchemaVersion(SchemaVersion schemaVersion) { + if(schemaVersion.getSchema() !=null ) { + schemaVersion.getSchema().setDbName(transformInboundDatabaseName(schemaVersion.getSchema().getDbName())); + } + return schemaVersion; + } + + + @Override + public SchemaVersion transformOutboundSchemaVersion(SchemaVersion schemaVersion) { + if(schemaVersion.getSchema() !=null ) { + schemaVersion.getSchema().setDbName(metaStoreMapping.transformOutboundDatabaseName( + schemaVersion.getSchema().getDbName())); + } + return schemaVersion; + } + + @Override + public List transformOutboundSchemaVersions(List schemaVersions) { + for(SchemaVersion schemaVersion: schemaVersions) { + transformOutboundSchemaVersion(schemaVersion); + } + return schemaVersions; + } + + @Override + public ISchemaName transformInboundISchemaName(ISchemaName iSchemaName) { + iSchemaName.setDbName(transformInboundDatabaseName(iSchemaName.getDbName())); + return iSchemaName; + } + + @Override + public ISchemaName transformOutboundISchemaName(ISchemaName iSchemaName) { + iSchemaName.setDbName(transformOutboundDatabaseName(iSchemaName.getDbName())); + return iSchemaName; + } + + @Override + public AddForeignKeyRequest transformInboundAddForeignKeyRequest(AddForeignKeyRequest request) { + for(SQLForeignKey sqlForeignKey: request.getForeignKeyCols()) { + sqlForeignKey.setPktable_db(transformInboundDatabaseName(sqlForeignKey.getPktable_db())); + sqlForeignKey.setFktable_db(transformInboundDatabaseName(sqlForeignKey.getFktable_db())); + } + return request; + } + + + @Override + public AddUniqueConstraintRequest transformInboundAddUniqueConstraintRequest(AddUniqueConstraintRequest request) { + for(SQLUniqueConstraint sqlUniqueConstraint: request.getUniqueConstraintCols()) { + sqlUniqueConstraint.setTable_db(transformInboundDatabaseName(sqlUniqueConstraint.getTable_db())); + } + return request; + } + + + @Override + public AddNotNullConstraintRequest transformInboundAddNotNullConstraintRequest(AddNotNullConstraintRequest request) { + for(SQLNotNullConstraint sqlNotNullConstraint: request.getNotNullConstraintCols()) { + sqlNotNullConstraint.setTable_db(transformInboundDatabaseName(sqlNotNullConstraint.getTable_db())); + } + return request; + } + + + @Override + public AddDefaultConstraintRequest transformInboundAddDefaultConstraintRequest(AddDefaultConstraintRequest request) { + for(SQLDefaultConstraint sqlDefaultConstraint: request.getDefaultConstraintCols()) { + sqlDefaultConstraint.setTable_db(transformInboundDatabaseName(sqlDefaultConstraint.getTable_db())); + } + return request; + } + + + @Override + public AddCheckConstraintRequest transformInboundAddCheckConstraintRequest(AddCheckConstraintRequest request) { + for(SQLCheckConstraint sqlCheckConstraint: request.getCheckConstraintCols()) { + sqlCheckConstraint.setTable_db(transformInboundDatabaseName(sqlCheckConstraint.getTable_db())); + } + return request; + } + + + @Override + public FindSchemasByColsResp transformOutboundFindSchemasByColsResp(FindSchemasByColsResp response) { + for(SchemaVersionDescriptor schemaVersionDescriptor: response.getSchemaVersions()) { + if(schemaVersionDescriptor.getSchema() != null) { + schemaVersionDescriptor.setSchema(transformOutboundISchemaName(schemaVersionDescriptor.getSchema())); + } + } + return response; + } + + + @Override + public SchemaVersionDescriptor transformInboundSchemaVersionDescriptor(SchemaVersionDescriptor request) { + if(request.getSchema() !=null) { + request.getSchema().setDbName(transformInboundDatabaseName(request.getSchema().getDbName())); + } + return request; + } + + + @Override + public MapSchemaVersionToSerdeRequest transformInboundMapSchemaVersionToSerdeRequest(MapSchemaVersionToSerdeRequest request) { + if(request.getSchemaVersion() != null && request.getSchemaVersion().getSchema() !=null) { + request.getSchemaVersion().getSchema().setDbName(transformInboundDatabaseName( + request.getSchemaVersion().getSchema().getDbName())); + } + return request; + } + + + @Override + public SetSchemaVersionStateRequest transformInboundSetSchemaVersionStateRequest(SetSchemaVersionStateRequest request) { + if(request.getSchemaVersion() != null && request.getSchemaVersion().getSchema() !=null) { + request.getSchemaVersion().getSchema().setDbName(transformInboundDatabaseName( + request.getSchemaVersion().getSchema().getDbName())); + } + return request; + } + + + @Override + public NotificationEventsCountRequest transformInboundNotificationEventsCountRequest(NotificationEventsCountRequest request) { + request.setDbName(transformInboundDatabaseName(request.getDbName())); + return request; + } + + + @Override + public UniqueConstraintsRequest transformInboundUniqueConstraintsRequest(UniqueConstraintsRequest request) { + request.setDb_name(transformInboundDatabaseName(request.getDb_name())); + return request; + } + + @Override + public UniqueConstraintsResponse transformOutboundUniqueConstraintsResponse(UniqueConstraintsResponse response) { + for(SQLUniqueConstraint sqlUniqueConstraint: response.getUniqueConstraints()) { + sqlUniqueConstraint.setTable_db(transformOutboundDatabaseName(sqlUniqueConstraint.getTable_db())); + } + return response; + } + + + @Override + public NotNullConstraintsRequest transformInboundNotNullConstraintsRequest(NotNullConstraintsRequest request) { + request.setDb_name(transformInboundDatabaseName(request.getDb_name())); + return request; + } + + @Override + public NotNullConstraintsResponse transformOutboundNotNullConstraintsResponse(NotNullConstraintsResponse response) { + for(SQLNotNullConstraint sqlNotNullConstraint: response.getNotNullConstraints()) { + sqlNotNullConstraint.setTable_db(transformOutboundDatabaseName(sqlNotNullConstraint.getTable_db())); + } + return response; + } + + + @Override + public DefaultConstraintsRequest transformInboundDefaultConstraintsRequest(DefaultConstraintsRequest request) { + request.setDb_name(transformInboundDatabaseName(request.getDb_name())); + return request; + } + + @Override + public DefaultConstraintsResponse transformOutboundDefaultConstraintsResponse(DefaultConstraintsResponse response) { + for(SQLDefaultConstraint sqlDefaultConstraint: response.getDefaultConstraints()) { + sqlDefaultConstraint.setTable_db(transformOutboundDatabaseName(sqlDefaultConstraint.getTable_db())); + } + return response; + } + + + @Override + public CheckConstraintsRequest transformInboundCheckConstraintsRequest(CheckConstraintsRequest request) { + request.setDb_name(transformInboundDatabaseName(request.getDb_name())); + return request; + } + + @Override + public CheckConstraintsResponse transformOutboundCheckConstraintsResponse(CheckConstraintsResponse response) { + for(SQLCheckConstraint sqlCheckConstraint: response.getCheckConstraints()) { + sqlCheckConstraint.setTable_db(transformOutboundDatabaseName(sqlCheckConstraint.getTable_db())); + } + return response; + } + + + @Override + public CreationMetadata transformInboundCreationMetadata(CreationMetadata request) { + request.setDbName(transformInboundDatabaseName(request.getDbName())); + return request; + } + + @Override public long getLatency() { return metaStoreMapping.getLatency(); diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseNameMapping.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseNameMapping.java index f60c29661..93582affc 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseNameMapping.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/DatabaseNameMapping.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,15 +21,13 @@ import java.util.List; import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import lombok.extern.log4j.Log4j2; import com.google.common.collect.BiMap; +@Log4j2 public class DatabaseNameMapping extends MetaStoreMappingDecorator { - private final static Logger log = LoggerFactory.getLogger(DatabaseNameMapping.class); - private final Map inbound; private final Map outbound; @@ -56,7 +54,7 @@ public List transformOutboundDatabaseNameMultiple(String databaseName) { if (outbound.containsKey(databaseName)) { String result = outbound.get(databaseName); List databases = super.transformOutboundDatabaseNameMultiple(result); - log.debug("transformOutboundDatabaseName '" + databaseName + "' to '" + databases + "'"); + log.debug("transformOutboundDatabaseName '{}' to '{}'", databaseName, databases); results.addAll(databases); } return results; @@ -66,7 +64,7 @@ public List transformOutboundDatabaseNameMultiple(String databaseName) { public String transformInboundDatabaseName(String databaseName) { String newDatabaseName = super.transformInboundDatabaseName(databaseName); String result = inbound.getOrDefault(newDatabaseName, newDatabaseName); - log.debug("transformInboundDatabaseName '" + databaseName + "' to '" + result + "'"); + log.debug("transformInboundDatabaseName '{}' to '{}'", databaseName, result); return result; } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingDecorator.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingDecorator.java index ad795f0f3..18dab355c 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingDecorator.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingDecorator.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,14 +27,13 @@ import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface; import org.apache.thrift.TException; +import lombok.AllArgsConstructor; + +@AllArgsConstructor public abstract class MetaStoreMappingDecorator implements MetaStoreMapping { private final MetaStoreMapping metaStoreMapping; - public MetaStoreMappingDecorator(MetaStoreMapping metaStoreMapping) { - this.metaStoreMapping = metaStoreMapping; - } - @Override public String transformOutboundDatabaseName(String databaseName) { if (databaseName == null) { diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingFactoryImpl.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingFactoryImpl.java index c4efbc590..c21271404 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingFactoryImpl.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingFactoryImpl.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,11 +25,11 @@ import org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl; import org.apache.hadoop.hive.metastore.MetaStoreFilterHook; import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; +import lombok.extern.log4j.Log4j2; + import com.hotels.bdp.waggledance.api.model.AbstractMetaStore; import com.hotels.bdp.waggledance.api.model.DatabaseResolution; import com.hotels.bdp.waggledance.client.CloseableThriftHiveMetastoreIface; @@ -41,9 +41,8 @@ import com.hotels.bdp.waggledance.server.security.AccessControlHandlerFactory; @Component +@Log4j2 public class MetaStoreMappingFactoryImpl implements MetaStoreMappingFactory { - private static final Logger LOG = LoggerFactory.getLogger(MetaStoreMappingFactoryImpl.class); - private final WaggleDanceConfiguration waggleDanceConfiguration; private final PrefixNamingStrategy prefixNamingStrategy; private final CloseableThriftHiveMetastoreIfaceClientFactory metaStoreClientFactory; @@ -51,10 +50,10 @@ public class MetaStoreMappingFactoryImpl implements MetaStoreMappingFactory { @Autowired public MetaStoreMappingFactoryImpl( - WaggleDanceConfiguration waggleDanceConfiguration, - PrefixNamingStrategy prefixNamingStrategy, - CloseableThriftHiveMetastoreIfaceClientFactory metaStoreClientFactory, - AccessControlHandlerFactory accessControlHandlerFactory) { + WaggleDanceConfiguration waggleDanceConfiguration, + PrefixNamingStrategy prefixNamingStrategy, + CloseableThriftHiveMetastoreIfaceClientFactory metaStoreClientFactory, + AccessControlHandlerFactory accessControlHandlerFactory) { this.waggleDanceConfiguration = waggleDanceConfiguration; this.prefixNamingStrategy = prefixNamingStrategy; this.metaStoreClientFactory = metaStoreClientFactory; @@ -65,7 +64,7 @@ private CloseableThriftHiveMetastoreIface createClient(AbstractMetaStore metaSto try { return metaStoreClientFactory.newInstance(metaStore); } catch (Exception e) { - LOG.error("Can't create a client for metastore '{}':", metaStore.getName(), e); + log.error("Can't create a client for metastore '{}':", metaStore.getName(), e); return newUnreachableMetastoreClient(metaStore); } } @@ -73,7 +72,7 @@ private CloseableThriftHiveMetastoreIface createClient(AbstractMetaStore metaSto @SuppressWarnings("resource") @Override public MetaStoreMapping newInstance(AbstractMetaStore metaStore) { - LOG + log .info("Mapping databases with name '{}' to metastore: {}", metaStore.getName(), metaStore.getRemoteMetaStoreUris()); MetaStoreMapping metaStoreMapping = new MetaStoreMappingImpl(prefixNameFor(metaStore), metaStore.getName(), diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingImpl.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingImpl.java index fc9711f5f..b23a12221 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingImpl.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingImpl.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,44 +27,27 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import lombok.AllArgsConstructor; +import lombok.extern.log4j.Log4j2; import com.hotels.bdp.waggledance.api.model.ConnectionType; import com.hotels.bdp.waggledance.client.CloseableThriftHiveMetastoreIface; import com.hotels.bdp.waggledance.server.security.AccessControlHandler; import com.hotels.bdp.waggledance.server.security.NotAllowedException; +@AllArgsConstructor +@Log4j2 class MetaStoreMappingImpl implements MetaStoreMapping { - private final static Logger log = LoggerFactory.getLogger(MetaStoreMappingImpl.class); - private final String databasePrefix; + private final String name; private final CloseableThriftHiveMetastoreIface client; private final AccessControlHandler accessControlHandler; - private final String name; + private final ConnectionType connectionType; private final long latency; private final MetaStoreFilterHook metastoreFilter; - private final ConnectionType connectionType; - - MetaStoreMappingImpl( - String databasePrefix, - String name, - CloseableThriftHiveMetastoreIface client, - AccessControlHandler accessControlHandler, - ConnectionType connectionType, - long latency, - MetaStoreFilterHook metastoreFilter) { - this.databasePrefix = databasePrefix; - this.name = name; - this.client = client; - this.accessControlHandler = accessControlHandler; - this.connectionType = connectionType; - this.latency = latency; - this.metastoreFilter = metastoreFilter; - } - @Override public String transformOutboundDatabaseName(String databaseName) { return databaseName.toLowerCase(Locale.ROOT); diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/DatabaseMappingService.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/DatabaseMappingService.java index e9ca085b7..c89ee5bd0 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/DatabaseMappingService.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/DatabaseMappingService.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -50,5 +50,7 @@ void checkTableAllowed(String databaseName, String tableName, PanopticOperationHandler getPanopticOperationHandler(); - List getDatabaseMappings(); + List getAvailableDatabaseMappings(); + + List getAllDatabaseMappings(); } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/GrammarUtils.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/GrammarUtils.java index 2d3d459dc..6fc04d6a7 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/GrammarUtils.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/GrammarUtils.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,9 @@ */ package com.hotels.bdp.waggledance.mapping.service; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CATALOG_DB_SEPARATOR; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CATALOG_DB_THRIFT_NAME_MARKER; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -22,6 +25,9 @@ import java.util.Map.Entry; import java.util.Set; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hive.metastore.Warehouse; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Splitter; @@ -31,6 +37,10 @@ public final class GrammarUtils { private static final String OR_SEPARATOR = "|"; private static final Splitter OR_SPLITTER = Splitter.on(OR_SEPARATOR); private static final Joiner OR_JOINER = Joiner.on(OR_SEPARATOR); + private final static String MATCH_ALL = "*"; + + private static String DEFAULT_CAT_NAME = StringUtils.join(String.valueOf(CATALOG_DB_THRIFT_NAME_MARKER), + Warehouse.DEFAULT_CATALOG_NAME, CATALOG_DB_SEPARATOR); private GrammarUtils() {} @@ -90,13 +100,15 @@ static String[] splitPattern(String prefix, String pattern) { */ public static Map selectMatchingPrefixes(Set prefixes, String dbPatterns) { Map matchingPrefixes = new HashMap<>(); - if ((dbPatterns == null) || "*".equals(dbPatterns)) { + if ((dbPatterns == null) || MATCH_ALL.equals(dbPatterns) || StringUtils.equalsIgnoreCase(DEFAULT_CAT_NAME, dbPatterns)) { for (String prefix : prefixes) { matchingPrefixes.put(prefix, dbPatterns); } return matchingPrefixes; } + dbPatterns = removeCatName(dbPatterns); + Map> prefixPatterns = new HashMap<>(); for (String subPattern : OR_SPLITTER.split(dbPatterns)) { for (String prefix : prefixes) { @@ -115,4 +127,17 @@ public static Map selectMatchingPrefixes(Set prefixes, S return matchingPrefixes; } + public static String removeCatName(String dbPatterns) { + if(StringUtils.containsIgnoreCase(dbPatterns, DEFAULT_CAT_NAME)) { + dbPatterns = StringUtils.removeIgnoreCase(dbPatterns, DEFAULT_CAT_NAME); + } + if(StringUtils.startsWithIgnoreCase(dbPatterns, String.valueOf(CATALOG_DB_THRIFT_NAME_MARKER))) { + dbPatterns = StringUtils.removeIgnoreCase(dbPatterns, String.valueOf(CATALOG_DB_THRIFT_NAME_MARKER)); + } + if(StringUtils.endsWithIgnoreCase(dbPatterns, CATALOG_DB_SEPARATOR)) { + dbPatterns = StringUtils.removeIgnoreCase(dbPatterns, CATALOG_DB_SEPARATOR); + } + return StringUtils.isNotBlank(dbPatterns) ? dbPatterns : DEFAULT_CAT_NAME; + } + } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/PanopticConcurrentOperationExecutor.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/PanopticConcurrentOperationExecutor.java index f519f3132..aec7adea9 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/PanopticConcurrentOperationExecutor.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/PanopticConcurrentOperationExecutor.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,24 +26,22 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import lombok.extern.log4j.Log4j2; import com.hotels.bdp.waggledance.mapping.model.DatabaseMapping; import com.hotels.bdp.waggledance.mapping.service.requests.RequestCallable; +@Log4j2 public class PanopticConcurrentOperationExecutor implements PanopticOperationExecutor { private static final String INTERRUPTED_MESSAGE = "Execution was interrupted: "; private static final String SLOW_METASTORE_MESSAGE = "Metastore {} was slow to respond so results are omitted"; - private final static Logger LOG = LoggerFactory.getLogger(PanopticConcurrentOperationExecutor.class); - @Override public List executeRequests( - List>> allRequests, - long requestTimeout, - String errorMessage) { + List>> allRequests, + long requestTimeout, + String errorMessage) { List allResults = new ArrayList<>(); if (allRequests.isEmpty()) { return allResults; @@ -57,7 +55,7 @@ public List executeRequests( long totalTimeout = getTotalTimeout(requestTimeout, allRequests); futures = executorService.invokeAll(allRequests, totalTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { - LOG.warn("Execution was interrupted", e); + log.warn("Execution was interrupted", e); } for (Future> future : futures) { @@ -75,11 +73,11 @@ private List getResultFromFuture(Future> future, String metastore try { return future.get(); } catch (InterruptedException e) { - LOG.warn(INTERRUPTED_MESSAGE, e); + log.warn(INTERRUPTED_MESSAGE, e); } catch (ExecutionException e) { - LOG.warn(errorMessage, e.getCause().getMessage()); + log.warn(errorMessage, e.getCause().getMessage()); } catch (CancellationException e) { - LOG.warn(SLOW_METASTORE_MESSAGE, metastoreMappingName); + log.warn(SLOW_METASTORE_MESSAGE, metastoreMappingName); } return Collections.emptyList(); } @@ -95,4 +93,4 @@ private long getTotalTimeout(long requestTimeout, List getAllDatabases( } List result = getPanopticOperationExecutor() .executeRequests(allRequests, GET_DATABASES_TIMEOUT, "Can't fetch databases by pattern: {}"); + log.info("All Databases Result={}", result); return result; } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/MonitoredDatabaseMappingService.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/MonitoredDatabaseMappingService.java index 8e3820a23..00541c1d3 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/MonitoredDatabaseMappingService.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/MonitoredDatabaseMappingService.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,20 +22,20 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import lombok.AllArgsConstructor; + import com.hotels.bdp.waggledance.api.model.AbstractMetaStore; import com.hotels.bdp.waggledance.mapping.model.DatabaseMapping; +import com.hotels.bdp.waggledance.mapping.service.GrammarUtils; import com.hotels.bdp.waggledance.mapping.service.MappingEventListener; import com.hotels.bdp.waggledance.mapping.service.PanopticOperationHandler; import com.hotels.bdp.waggledance.metrics.CurrentMonitoredMetaStoreHolder; +@AllArgsConstructor public class MonitoredDatabaseMappingService implements MappingEventListener { private final MappingEventListener wrapped; - public MonitoredDatabaseMappingService(MappingEventListener wrapped) { - this.wrapped = wrapped; - } - @Override public DatabaseMapping primaryDatabaseMapping() { DatabaseMapping primaryDatabaseMapping = wrapped.primaryDatabaseMapping(); @@ -45,6 +45,7 @@ public DatabaseMapping primaryDatabaseMapping() { @Override public DatabaseMapping databaseMapping(@NotNull String databaseName) throws NoSuchObjectException { + databaseName = GrammarUtils.removeCatName(databaseName); DatabaseMapping databaseMapping = wrapped.databaseMapping(databaseName); CurrentMonitoredMetaStoreHolder.monitorMetastore(databaseMapping.getMetastoreMappingName()); return databaseMapping; @@ -53,6 +54,7 @@ public DatabaseMapping databaseMapping(@NotNull String databaseName) throws NoSu @Override public void checkTableAllowed(String databaseName, String tableName, DatabaseMapping mapping) throws NoSuchObjectException { + databaseName = GrammarUtils.removeCatName(databaseName); wrapped.checkTableAllowed(databaseName, tableName, mapping); } @@ -69,8 +71,13 @@ public PanopticOperationHandler getPanopticOperationHandler() { } @Override - public List getDatabaseMappings() { - return wrapped.getDatabaseMappings(); + public List getAvailableDatabaseMappings() { + return wrapped.getAvailableDatabaseMappings(); + } + + @Override + public List getAllDatabaseMappings() { + return wrapped.getAllDatabaseMappings(); } @Override diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/NotifyingFederationService.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/NotifyingFederationService.java index 200f72d64..73e1d451f 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/NotifyingFederationService.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/NotifyingFederationService.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,20 +27,19 @@ import javax.validation.Valid; import javax.validation.constraints.NotNull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; +import lombok.extern.log4j.Log4j2; + import com.hotels.bdp.waggledance.api.federation.service.FederationService; import com.hotels.bdp.waggledance.api.model.AbstractMetaStore; import com.hotels.bdp.waggledance.mapping.service.FederatedMetaStoreStorage; @Service +@Log4j2 public class NotifyingFederationService implements FederationService { - private static final Logger LOG = LoggerFactory.getLogger(NotifyingFederationService.class); - public interface FederationEventListener { void onRegister(AbstractMetaStore federatedMetaStore); @@ -111,12 +110,12 @@ public void register(@NotNull @Valid AbstractMetaStore metaStore) { checkNotNull(metaStore, "federatedMetaStore cannot be null"); boolean metastoreDoesNotExist = federatedMetaStoreStorage.get(metaStore.getName()) == null; checkIsTrue(metastoreDoesNotExist, "MetaStore '" + metaStore + "' is already registered"); - LOG.debug("Registering new federation {}", metaStore); + log.debug("Registering new federation {}", metaStore); synchronized (federatedMetaStoreStorage) { federatedMetaStoreStorage.insert(metaStore); onRegister(metaStore); } - LOG.debug("New federation {} has been registered successfully", metaStore); + log.debug("New federation {} has been registered successfully", metaStore); } @Override @@ -129,24 +128,24 @@ public void update(AbstractMetaStore oldMetaStore, AbstractMetaStore newMetaStor boolean newNameDoesNotExist = federatedMetaStoreStorage.get(newMetaStore.getName()) == null; checkIsTrue(newNameDoesNotExist, "MetaStore '" + newMetaStore + "' is already registered"); } - LOG.debug("Registering update of existing federation {} to {}", oldMetaStore, newMetaStore); + log.debug("Registering update of existing federation {} to {}", oldMetaStore, newMetaStore); synchronized (federatedMetaStoreStorage) { federatedMetaStoreStorage.update(oldMetaStore, newMetaStore); onUpdate(oldMetaStore, newMetaStore); } - LOG.debug("Update of federation {} to {} has been registered successfully", oldMetaStore, newMetaStore); + log.debug("Update of federation {} to {} has been registered successfully", oldMetaStore, newMetaStore); } @Override public void unregister(@NotNull String name) { checkNotNull(name, "name cannot be null"); checkNotNull(federatedMetaStoreStorage.get(name), "MeataStore with name '" + name + "' is not registered"); - LOG.debug("Unregistering federation with name {}", name); + log.debug("Unregistering federation with name {}", name); synchronized (federatedMetaStoreStorage) { AbstractMetaStore federatedMetaStore = federatedMetaStoreStorage.delete(name); onUnregister(federatedMetaStore); } - LOG.debug("Federation with name {} is no longer available", name); + log.debug("Federation with name {} is no longer available", name); } @Override diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/PollingFederationService.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/PollingFederationService.java index 1d36bfbe0..8ebcbdd37 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/PollingFederationService.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/PollingFederationService.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,22 +19,35 @@ import java.util.List; import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import io.micrometer.core.instrument.Counter; +import io.micrometer.core.instrument.ImmutableTag; +import io.micrometer.core.instrument.MeterRegistry; +import lombok.extern.log4j.Log4j2; + +import com.google.common.annotations.VisibleForTesting; import com.hotels.bdp.waggledance.api.model.AbstractMetaStore; import com.hotels.bdp.waggledance.api.model.MetaStoreStatus; import com.hotels.bdp.waggledance.core.federation.service.PopulateStatusFederationService; +@Service +@Log4j2 public class PollingFederationService { - private final static Logger log = LoggerFactory.getLogger(PollingFederationService.class); + private final static String METASTORE_STATUS_METRIC_NAME = "metastore_status"; + private final static String METASTORE_TAG_NAME = "metastore"; private final PopulateStatusFederationService populateStatusFederationService; private Map previous = new HashMap<>(); + private MeterRegistry meterRegistry; - public PollingFederationService(PopulateStatusFederationService populateStatusFederationService) { + @Autowired + public PollingFederationService(PopulateStatusFederationService populateStatusFederationService, MeterRegistry meterRegistry) { this.populateStatusFederationService = populateStatusFederationService; + this.meterRegistry = meterRegistry; } public void poll() { @@ -42,7 +55,10 @@ public void poll() { Map current = new HashMap<>(); List metastores = populateStatusFederationService.getAll(); for (AbstractMetaStore metaStore : metastores) { - current.put(metaStore.getName(), metaStore.getStatus()); + String metastoreName = metaStore.getName(); + MetaStoreStatus metastoreStatus = metaStore.getStatus(); + current.put(metastoreName, metastoreStatus); + sendMetric(metastoreName, metastoreStatus); MetaStoreStatus previousMetastoreStatus = previous.get(metaStore.getName()); if (previousMetastoreStatus != null) { if (previousMetastoreStatus != metaStore.getStatus()) { @@ -52,4 +68,17 @@ public void poll() { } previous = current; } + + private void sendMetric(String metastoreName, MetaStoreStatus status) { + ImmutableTag tag = new ImmutableTag(METASTORE_TAG_NAME, metastoreName); + Counter counter = Counter.builder(METASTORE_STATUS_METRIC_NAME) + .tag(tag.getKey(), tag.getValue()) + .register(meterRegistry); + counter.increment(status.getIntValue()); + } + + @VisibleForTesting + void setMeterRegistry(MeterRegistry meterRegistry) { + this.meterRegistry = meterRegistry; + } } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/PrefixBasedDatabaseMappingService.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/PrefixBasedDatabaseMappingService.java index 8c733df91..a13cdd440 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/PrefixBasedDatabaseMappingService.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/PrefixBasedDatabaseMappingService.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,6 +26,9 @@ import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.Future; import java.util.function.BiFunction; import javax.validation.constraints.NotNull; @@ -36,8 +39,8 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.logging.log4j.util.Strings; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import lombok.extern.log4j.Log4j2; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList.Builder; @@ -60,10 +63,8 @@ import com.hotels.bdp.waggledance.server.NoPrimaryMetastoreException; import com.hotels.bdp.waggledance.util.AllowList; +@Log4j2 public class PrefixBasedDatabaseMappingService implements MappingEventListener { - - private static final Logger LOG = LoggerFactory.getLogger(PrefixBasedDatabaseMappingService.class); - private static final String EMPTY_PREFIX = ""; private final MetaStoreMappingFactory metaStoreMappingFactory; private final QueryMapping queryMapping; @@ -188,7 +189,7 @@ public DatabaseMapping databaseMapping(@NotNull String databaseName) throws NoSu String metastorePrefix = entry.getKey(); if (Strings.isNotBlank(metastorePrefix) && databaseName.startsWith(metastorePrefix)) { DatabaseMapping databaseMapping = entry.getValue(); - LOG.debug("Database Name `{}` maps to metastore with prefix `{}`", databaseName, metastorePrefix); + log.debug("Database Name `{}` maps to metastore with prefix `{}`", databaseName, metastorePrefix); if (includeInResults(databaseMapping, databaseName)) { return databaseMapping; } @@ -198,7 +199,7 @@ public DatabaseMapping databaseMapping(@NotNull String databaseName) throws NoSu // Find a Metastore that has an empty prefix DatabaseMapping databaseMapping = mappingsByPrefix.get(EMPTY_PREFIX); if (databaseMapping != null) { - LOG.debug("Database Name `{}` maps to metastore with EMPTY_PREFIX", databaseName); + log.debug("Database Name `{}` maps to metastore with EMPTY_PREFIX", databaseName); if (includeInResults(databaseMapping, databaseName)) { return databaseMapping; } @@ -206,13 +207,13 @@ public DatabaseMapping databaseMapping(@NotNull String databaseName) throws NoSu if (primaryDatabaseMapping != null) { // If none found we fall back to primary one if (includeInResults(primaryDatabaseMapping, databaseName)) { - LOG.debug("Database Name `{}` maps to 'primary' metastore", databaseName); + log.debug("Database Name `{}` maps to 'primary' metastore", databaseName); return primaryDatabaseMapping; } throw new NoSuchObjectException("Primary metastore does not have database " + databaseName); } - LOG.debug("Database Name `{}` not mapped", databaseName); + log.debug("Database Name `{}` not mapped", databaseName); throw new NoPrimaryMetastoreException( "Waggle Dance error no database mapping available tried to map database '" + databaseName + "'"); } @@ -254,17 +255,52 @@ private boolean isTableAllowed(String databasePrefix, String database, String ta return tblAllowList.contains(table); } + /** + * This run in parallel because includeInResults could potentially be slow (wait/retries) for certain slow responding + * metastores. Using ExecutorService + Futures to maintain the order. Order is important for example when doing calls + * like show databases, we return that grouped/ordered per metastore. + */ @Override - public List getDatabaseMappings() { + public List getAvailableDatabaseMappings() { + // TODO PD refactor/add same logic for StaticDatabaseMappingService. Builder builder = ImmutableList.builder(); - synchronized (mappingsByPrefix) { - for (DatabaseMapping databaseMapping : mappingsByPrefix.values()) { - if (includeInResults(databaseMapping)) { - builder.add(databaseMapping); + ForkJoinPool customThreadPool = new ForkJoinPool(mappingsByPrefix.size()); + try { + synchronized (mappingsByPrefix) { + List> futures = new ArrayList<>(); + for (DatabaseMapping databaseMapping : mappingsByPrefix.values()) { + futures.add(customThreadPool.submit(() -> { + if (includeInResults(databaseMapping)) { + return databaseMapping; + } + return null; + })); + + } + + for (Future future : futures) { + try { + DatabaseMapping mapping = future.get(); + if (mapping != null) { + builder.add(mapping); + } + } catch (InterruptedException e) { + // ignore mapping + } catch (ExecutionException e) { + log.error("Can't include mapping ", e); + } } } + } finally { + customThreadPool.shutdownNow(); } - return builder.build(); + List result = builder.build(); + return result; + } + + @Override + public List getAllDatabaseMappings() { + return new ArrayList<>(mappingsByPrefix.values()); } private Map databaseMappingsByDbPattern(@NotNull String databasePatterns) { @@ -335,7 +371,7 @@ public List getAllDatabases(String databasePattern) { @Override public List getAllDatabases() { - List databaseMappings = getDatabaseMappings(); + List databaseMappings = getAllDatabaseMappings(); List allRequests = new ArrayList<>(); BiFunction, DatabaseMapping, List> filter = ( diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/SimpleFederationStatusService.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/SimpleFederationStatusService.java index 2d0ea6e56..6fc126b01 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/SimpleFederationStatusService.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/SimpleFederationStatusService.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/StaticDatabaseMappingService.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/StaticDatabaseMappingService.java index 2debfae8c..75b1eae3a 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/StaticDatabaseMappingService.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/StaticDatabaseMappingService.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -39,8 +39,8 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import lombok.extern.log4j.Log4j2; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; @@ -58,6 +58,7 @@ import com.hotels.bdp.waggledance.mapping.model.DatabaseMappingImpl; import com.hotels.bdp.waggledance.mapping.model.MetaStoreMapping; import com.hotels.bdp.waggledance.mapping.model.QueryMapping; +import com.hotels.bdp.waggledance.mapping.service.GrammarUtils; import com.hotels.bdp.waggledance.mapping.service.MappingEventListener; import com.hotels.bdp.waggledance.mapping.service.MetaStoreMappingFactory; import com.hotels.bdp.waggledance.mapping.service.PanopticConcurrentOperationExecutor; @@ -66,10 +67,8 @@ import com.hotels.bdp.waggledance.server.NoPrimaryMetastoreException; import com.hotels.bdp.waggledance.util.AllowList; +@Log4j2 public class StaticDatabaseMappingService implements MappingEventListener { - - private static final Logger LOG = LoggerFactory.getLogger(StaticDatabaseMappingService.class); - private static final String PRIMARY_KEY = ""; private final MetaStoreMappingFactory metaStoreMappingFactory; private final LoadingCache> primaryDatabasesCache; @@ -121,7 +120,7 @@ private void add(AbstractMetaStore metaStore) { AllowList allowedDatabases = new AllowList(metaStore.getMappedDatabases()); mappableDatabases = applyAllowList(allDatabases, allowedDatabases); } catch (TException e) { - LOG.error("Could not get databases for metastore {}", metaStore.getRemoteMetaStoreUris(), e); + log.error("Could not get databases for metastore {}", metaStore.getRemoteMetaStoreUris(), e); } } DatabaseMapping databaseMapping = createDatabaseMapping(metaStoreMapping); @@ -292,22 +291,24 @@ private boolean includeInResults(MetaStoreMapping metaStoreMapping) { @Override public DatabaseMapping databaseMapping(@NotNull String databaseName) throws NoSuchObjectException { + databaseName = GrammarUtils.removeCatName(databaseName); DatabaseMapping databaseMapping = mappingsByDatabaseName.get(databaseName.toLowerCase(Locale.ROOT)); if (databaseMapping != null) { - LOG + log .debug("Database Name `{}` maps to metastore with name '{}'", databaseName, databaseMapping.getMetastoreMappingName()); if (includeInResults(databaseMapping)) { return databaseMapping; } } - LOG.debug("Database Name `{}` not mapped", databaseName); + log.debug("Database Name `{}` not mapped", databaseName); throw new NoSuchObjectException("Primary metastore does not have database " + databaseName); } @Override public void checkTableAllowed(String databaseName, String tableName, DatabaseMapping mapping) throws NoSuchObjectException { + databaseName = GrammarUtils.removeCatName(databaseName); if (!isTableAllowed(databaseName, tableName)) { throw new NoSuchObjectException(String.format("%s.%s table not found in any mappings", databaseName, tableName)); } @@ -316,6 +317,7 @@ public void checkTableAllowed(String databaseName, String tableName, @Override public List filterTables(String databaseName, List tableNames, DatabaseMapping mapping) { List allowedTables = new ArrayList<>(); + databaseName = GrammarUtils.removeCatName(databaseName); String db = databaseName.toLowerCase(Locale.ROOT); for (String table: tableNames) if (isTableAllowed(db, table)) { @@ -334,7 +336,7 @@ private boolean isTableAllowed(String database, String table) { } @Override - public List getDatabaseMappings() { + public List getAvailableDatabaseMappings() { Builder builder = ImmutableList.builder(); synchronized (mappingsByMetaStoreName) { for (DatabaseMapping databaseMapping : mappingsByMetaStoreName.values()) { @@ -346,6 +348,11 @@ public List getDatabaseMappings() { return builder.build(); } + @Override + public List getAllDatabaseMappings() { + return new ArrayList<>(mappingsByMetaStoreName.values()); + } + private boolean databaseAndTableAllowed(String database, String table, DatabaseMapping mapping) { boolean isPrimary = mapping.equals(primaryDatabaseMapping); boolean isMapped = mappingsByDatabaseName.containsKey(database); @@ -365,7 +372,7 @@ public List getTableMeta(String db_patterns, String tbl_patterns, Lis databaseAndTableAllowed(tableMeta.getDbName(), tableMeta.getTableName(), mapping); Map mappingsForPattern = new LinkedHashMap<>(); - for (DatabaseMapping mapping : getDatabaseMappings()) { + for (DatabaseMapping mapping : getAvailableDatabaseMappings()) { mappingsForPattern.put(mapping, db_patterns); } return super.getTableMeta(tbl_patterns, tbl_types, mappingsForPattern, filter); @@ -376,12 +383,15 @@ public List getAllDatabases(String pattern) { BiFunction filter = (database, mapping) -> mappingsByDatabaseName .containsKey(database); + BiFunction filter1 = (database, mapping) -> filter.apply(database, mapping) + && databaseMappingToDatabaseList.get(mapping.getMetastoreMappingName()).contains(database); + Map mappingsForPattern = new LinkedHashMap<>(); - for (DatabaseMapping mapping : getDatabaseMappings()) { + for (DatabaseMapping mapping : getAllDatabaseMappings()) { mappingsForPattern.put(mapping, pattern); } - return super.getAllDatabases(mappingsForPattern, filter); + return super.getAllDatabases(mappingsForPattern, filter1); } @Override diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/YamlFederatedMetaStoreStorage.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/YamlFederatedMetaStoreStorage.java index bf1cdbad2..5df5a105c 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/YamlFederatedMetaStoreStorage.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/impl/YamlFederatedMetaStoreStorage.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -34,13 +34,13 @@ import org.apache.commons.vfs2.FileSystemException; import org.apache.commons.vfs2.FileSystemManager; import org.apache.commons.vfs2.VFS; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Repository; import org.yaml.snakeyaml.Yaml; +import lombok.extern.log4j.Log4j2; + import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList.Builder; @@ -55,8 +55,8 @@ import com.hotels.bdp.waggledance.yaml.YamlFactory; @Repository +@Log4j2 public class YamlFederatedMetaStoreStorage implements FederatedMetaStoreStorage { - private static final Logger LOG = LoggerFactory.getLogger(YamlFederatedMetaStoreStorage.class); private static final Validator VALIDATOR = Validation.buildDefaultValidatorFactory().getValidator(); @@ -120,7 +120,7 @@ private static void insert(AbstractMetaStore federatedMetaStore, Map newFederationsMap = new LinkedHashMap<>(); Federations federations = yamlMarshaller.unmarshall(federationConfigLocation); if (federations != null && federations.getPrimaryMetaStore() != null) { @@ -178,7 +178,7 @@ public void loadFederation() { synchronized (federationsMapLock) { federationsMap = newFederationsMap; } - LOG.info("Loaded {} federations", federationsMap.size()); + log.info("Loaded {} federations", federationsMap.size()); } @PreDestroy @@ -204,7 +204,7 @@ public void insert(AbstractMetaStore federatedMetaStore) { public void update(AbstractMetaStore oldMetaStore, AbstractMetaStore newMetaStore) { validate(newMetaStore); synchronized (federationsMapLock) { - LOG.debug("Updating federation {} to {}", oldMetaStore, newMetaStore); + log.debug("Updating federation {} to {}", oldMetaStore, newMetaStore); if (newMetaStore.getFederationType() == FederationType.PRIMARY) { primaryMetaStore = (PrimaryMetaStore) newMetaStore; } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllDatabasesByPatternRequest.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllDatabasesByPatternRequest.java index b9f9ed375..f81e4442b 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllDatabasesByPatternRequest.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllDatabasesByPatternRequest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,23 +21,19 @@ import org.apache.thrift.TException; +import lombok.AllArgsConstructor; +import lombok.Getter; + import com.hotels.bdp.waggledance.mapping.model.DatabaseMapping; +@AllArgsConstructor public class GetAllDatabasesByPatternRequest implements RequestCallable> { - private final String pattern; + @Getter private final DatabaseMapping mapping; + private final String pattern; private final BiFunction filter; - public GetAllDatabasesByPatternRequest( - DatabaseMapping mapping, - String pattern, - BiFunction filter) { - this.mapping = mapping; - this.pattern = pattern; - this.filter = filter; - } - @Override public List call() throws TException { List databases = mapping.getClient().get_databases(pattern); @@ -49,9 +45,4 @@ public List call() throws TException { } return mappedDatabases; } - - @Override - public DatabaseMapping getMapping() { - return mapping; - } } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllDatabasesRequest.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllDatabasesRequest.java index 8dbee030a..3682b54f4 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllDatabasesRequest.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllDatabasesRequest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,28 +20,21 @@ import org.apache.thrift.TException; +import lombok.AllArgsConstructor; +import lombok.Getter; + import com.hotels.bdp.waggledance.mapping.model.DatabaseMapping; +@AllArgsConstructor public class GetAllDatabasesRequest implements RequestCallable> { + @Getter private final DatabaseMapping mapping; private final BiFunction, DatabaseMapping, List> filter; - public GetAllDatabasesRequest( - DatabaseMapping mapping, - BiFunction, DatabaseMapping, List> filter) { - this.mapping = mapping; - this.filter = filter; - } - @Override public List call() throws TException { List databases = mapping.getClient().get_all_databases(); return filter.apply(databases, mapping); } - - @Override - public DatabaseMapping getMapping() { - return mapping; - } } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllFunctionsRequest.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllFunctionsRequest.java index 11096ffa3..18aeb9cb2 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllFunctionsRequest.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetAllFunctionsRequest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,16 +21,17 @@ import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; +import lombok.AllArgsConstructor; +import lombok.Getter; + import com.hotels.bdp.waggledance.mapping.model.DatabaseMapping; +@AllArgsConstructor public class GetAllFunctionsRequest implements RequestCallable> { + @Getter private final DatabaseMapping mapping; - public GetAllFunctionsRequest(DatabaseMapping mapping) { - this.mapping = mapping; - } - @Override public List call() throws Exception { GetAllFunctionsResponse response = mapping.getClient().get_all_functions(); @@ -42,9 +43,4 @@ public List call() throws Exception { return Collections.singletonList(response); } - @Override - public DatabaseMapping getMapping() { - return mapping; - } - } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetTableMetaRequest.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetTableMetaRequest.java index 61d5af9f3..14e2892d5 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetTableMetaRequest.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/GetTableMetaRequest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,29 +22,21 @@ import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.thrift.TException; +import lombok.AllArgsConstructor; +import lombok.Getter; + import com.hotels.bdp.waggledance.mapping.model.DatabaseMapping; +@AllArgsConstructor public class GetTableMetaRequest implements RequestCallable> { + @Getter private final DatabaseMapping mapping; private final String dbPattern; private final String tablePattern; private final List tableTypes; private final BiFunction filter; - public GetTableMetaRequest( - DatabaseMapping mapping, - String dbPattern, - String tablePattern, - List tableTypes, - BiFunction filter) { - this.mapping = mapping; - this.dbPattern = dbPattern; - this.tablePattern = tablePattern; - this.tableTypes = tableTypes; - this.filter = filter; - } - @Override public List call() throws TException { List tables = mapping.getClient().get_table_meta(dbPattern, tablePattern, tableTypes); @@ -56,9 +48,4 @@ public List call() throws TException { } return mappedTableMeta; } - - @Override - public DatabaseMapping getMapping() { - return mapping; - } } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/SetUgiRequest.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/SetUgiRequest.java index 46ecac4bb..cda3b6bcb 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/SetUgiRequest.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/mapping/service/requests/SetUgiRequest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,27 +17,21 @@ import java.util.List; +import lombok.AllArgsConstructor; +import lombok.Getter; + import com.hotels.bdp.waggledance.mapping.model.DatabaseMapping; +@AllArgsConstructor public class SetUgiRequest implements RequestCallable> { + @Getter private final DatabaseMapping mapping; private final String user_name; private final List group_names; - public SetUgiRequest(DatabaseMapping mapping, String user_name, List group_names) { - this.mapping = mapping; - this.user_name = user_name; - this.group_names = group_names; - } - @Override public List call() throws Exception { return mapping.getClient().set_ugi(user_name, group_names); } - - @Override - public DatabaseMapping getMapping() { - return mapping; - } } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/metrics/CurrentMonitoredMetaStoreHolder.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/metrics/CurrentMonitoredMetaStoreHolder.java index ddced26cd..48b3c5fb2 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/metrics/CurrentMonitoredMetaStoreHolder.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/metrics/CurrentMonitoredMetaStoreHolder.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ import org.apache.commons.lang3.StringUtils; -public final class CurrentMonitoredMetaStoreHolder { - - private CurrentMonitoredMetaStoreHolder() { - } +import lombok.AccessLevel; +import lombok.NoArgsConstructor; +@NoArgsConstructor(access = AccessLevel.PRIVATE) +public final class CurrentMonitoredMetaStoreHolder { private static final String ALL_METASTORES = "all"; private static final ThreadLocal MONITORED_METASTORE = new ThreadLocal<>(); diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/metrics/MonitoringConfiguration.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/metrics/MonitoringConfiguration.java index d273bd952..d41354dc6 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/metrics/MonitoringConfiguration.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/metrics/MonitoringConfiguration.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2020 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -50,7 +50,7 @@ public String get(String key) { } } - @Bean + @Bean("meterRegistry") public GraphiteMeterRegistry graphiteMeterRegistry(GraphiteConfiguration graphiteConfiguration) { GraphiteConfig graphiteConfig = new DisabledGraphiteConfig(); diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/ExceptionWrappingHMSHandler.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/ExceptionWrappingHMSHandler.java index fd79f036a..3119691ce 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/ExceptionWrappingHMSHandler.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/ExceptionWrappingHMSHandler.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,15 +24,14 @@ import org.apache.hadoop.hive.metastore.IHMSHandler; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import lombok.extern.log4j.Log4j2; import com.hotels.bdp.waggledance.server.security.NotAllowedException; +@Log4j2 public class ExceptionWrappingHMSHandler implements InvocationHandler { - private final static Logger LOG = LoggerFactory.getLogger(ExceptionWrappingHMSHandler.class); - private final IHMSHandler baseHandler; public static IHMSHandler newProxyInstance(IHMSHandler baseHandler) { @@ -55,7 +54,7 @@ public Object invoke(Object proxy, Method method, Object[] args) throws Throwabl // the thrift exception. throw new MetaException("Waggle Dance: " + cause.getMessage()); } else if (cause instanceof WaggleDanceServerException) { - LOG.debug("Got error processing '{}' with args '{}'", method, Arrays.toString(args), cause); + log.debug("Got error processing '{}' with args '{}'", method, Arrays.toString(args), cause); throw new MetaException("Waggle Dance: " + cause.getMessage()); } else { // Need to unwrap this, so callers get the correct exception thrown by the handler. diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/FederatedHMSHandler.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/FederatedHMSHandler.java index a4bf274f8..842bbd9fb 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/FederatedHMSHandler.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/FederatedHMSHandler.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,38 +21,65 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.HiveMetaStore; +import org.apache.hadoop.hive.metastore.MetaStoreEventListener; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; +import org.apache.hadoop.hive.metastore.api.AddCheckConstraintRequest; +import org.apache.hadoop.hive.metastore.api.AddDefaultConstraintRequest; import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; import org.apache.hadoop.hive.metastore.api.AddForeignKeyRequest; +import org.apache.hadoop.hive.metastore.api.AddNotNullConstraintRequest; import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest; import org.apache.hadoop.hive.metastore.api.AddPartitionsResult; import org.apache.hadoop.hive.metastore.api.AddPrimaryKeyRequest; +import org.apache.hadoop.hive.metastore.api.AddUniqueConstraintRequest; import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.AlterCatalogRequest; +import org.apache.hadoop.hive.metastore.api.AlterISchemaRequest; import org.apache.hadoop.hive.metastore.api.CacheFileMetadataRequest; import org.apache.hadoop.hive.metastore.api.CacheFileMetadataResult; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsResponse; import org.apache.hadoop.hive.metastore.api.CheckLockRequest; import org.apache.hadoop.hive.metastore.api.ClearFileMetadataRequest; import org.apache.hadoop.hive.metastore.api.ClearFileMetadataResult; +import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; +import org.apache.hadoop.hive.metastore.api.CmRecycleResponse; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; +import org.apache.hadoop.hive.metastore.api.CreateCatalogRequest; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsResponse; +import org.apache.hadoop.hive.metastore.api.DropCatalogRequest; import org.apache.hadoop.hive.metastore.api.DropConstraintRequest; import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest; import org.apache.hadoop.hive.metastore.api.DropPartitionsResult; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp; +import org.apache.hadoop.hive.metastore.api.FindSchemasByColsRqst; import org.apache.hadoop.hive.metastore.api.FireEventRequest; import org.apache.hadoop.hive.metastore.api.FireEventResponse; import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; import org.apache.hadoop.hive.metastore.api.ForeignKeysResponse; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; +import org.apache.hadoop.hive.metastore.api.GetCatalogRequest; +import org.apache.hadoop.hive.metastore.api.GetCatalogResponse; +import org.apache.hadoop.hive.metastore.api.GetCatalogsResponse; import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprRequest; import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprResult; import org.apache.hadoop.hive.metastore.api.GetFileMetadataRequest; @@ -63,10 +90,14 @@ import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; +import org.apache.hadoop.hive.metastore.api.GetRuntimeStatsRequest; +import org.apache.hadoop.hive.metastore.api.GetSerdeRequest; import org.apache.hadoop.hive.metastore.api.GetTableRequest; import org.apache.hadoop.hive.metastore.api.GetTableResult; import org.apache.hadoop.hive.metastore.api.GetTablesRequest; import org.apache.hadoop.hive.metastore.api.GetTablesResult; +import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsRequest; +import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse; import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest; import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse; import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest; @@ -77,7 +108,8 @@ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.ISchema; +import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -85,12 +117,18 @@ import org.apache.hadoop.hive.metastore.api.LockComponent; import org.apache.hadoop.hive.metastore.api.LockRequest; import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.MapSchemaVersionToSerdeRequest; +import org.apache.hadoop.hive.metastore.api.Materialization; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchLockException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsResponse; import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.Partition; @@ -109,10 +147,20 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.PutFileMetadataRequest; import org.apache.hadoop.hive.metastore.api.PutFileMetadataResult; +import org.apache.hadoop.hive.metastore.api.ReplTblWriteIdStateRequest; import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RuntimeStat; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.SchemaVersion; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.SetSchemaVersionStateRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; @@ -125,21 +173,62 @@ import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.api.TxnOpenException; import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsResponse; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.api.UnlockRequest; +import org.apache.hadoop.hive.metastore.api.WMAlterPoolRequest; +import org.apache.hadoop.hive.metastore.api.WMAlterPoolResponse; +import org.apache.hadoop.hive.metastore.api.WMAlterResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMAlterResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMAlterTriggerRequest; +import org.apache.hadoop.hive.metastore.api.WMAlterTriggerResponse; +import org.apache.hadoop.hive.metastore.api.WMCreateOrDropTriggerToPoolMappingRequest; +import org.apache.hadoop.hive.metastore.api.WMCreateOrDropTriggerToPoolMappingResponse; +import org.apache.hadoop.hive.metastore.api.WMCreateOrUpdateMappingRequest; +import org.apache.hadoop.hive.metastore.api.WMCreateOrUpdateMappingResponse; +import org.apache.hadoop.hive.metastore.api.WMCreatePoolRequest; +import org.apache.hadoop.hive.metastore.api.WMCreatePoolResponse; +import org.apache.hadoop.hive.metastore.api.WMCreateResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMCreateResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMCreateTriggerRequest; +import org.apache.hadoop.hive.metastore.api.WMCreateTriggerResponse; +import org.apache.hadoop.hive.metastore.api.WMDropMappingRequest; +import org.apache.hadoop.hive.metastore.api.WMDropMappingResponse; +import org.apache.hadoop.hive.metastore.api.WMDropPoolRequest; +import org.apache.hadoop.hive.metastore.api.WMDropPoolResponse; +import org.apache.hadoop.hive.metastore.api.WMDropResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMDropResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMDropTriggerRequest; +import org.apache.hadoop.hive.metastore.api.WMDropTriggerResponse; +import org.apache.hadoop.hive.metastore.api.WMGetActiveResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMGetActiveResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMGetAllResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMGetAllResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMGetResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMGetResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMGetTriggersForResourePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMGetTriggersForResourePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.beans.factory.config.ConfigurableBeanFactory; import org.springframework.context.annotation.Scope; import org.springframework.stereotype.Component; +import lombok.extern.log4j.Log4j2; + import com.facebook.fb303.FacebookBase; import com.facebook.fb303.fb_status; import com.jcabi.aspects.Loggable; +import com.hotels.bdp.waggledance.conf.WaggleDanceConfiguration; import com.hotels.bdp.waggledance.mapping.model.DatabaseMapping; import com.hotels.bdp.waggledance.mapping.service.MappingEventListener; import com.hotels.bdp.waggledance.mapping.service.impl.NotifyingFederationService; @@ -148,21 +237,23 @@ @Monitored @Component @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) +@Log4j2 class FederatedHMSHandler extends FacebookBase implements CloseableIHMSHandler { - private static final Logger LOG = LoggerFactory.getLogger(FederatedHMSHandler.class); - private static final String INVOCATION_LOG_NAME = "com.hotels.bdp.waggledance.server.invocation-log"; private final MappingEventListener databaseMappingService; private final NotifyingFederationService notifyingFederationService; + private final WaggleDanceConfiguration waggleDanceConfiguration; private Configuration conf; FederatedHMSHandler( MappingEventListener databaseMappingService, - NotifyingFederationService notifyingFederationService) { + NotifyingFederationService notifyingFederationService, + WaggleDanceConfiguration waggleDanceConfiguration) { super("waggle-dance-handler"); this.databaseMappingService = databaseMappingService; this.notifyingFederationService = notifyingFederationService; + this.waggleDanceConfiguration = waggleDanceConfiguration; this.notifyingFederationService.subscribe(databaseMappingService); } @@ -206,7 +297,7 @@ public void shutdown() { notifyingFederationService.unsubscribe(databaseMappingService); databaseMappingService.close(); } catch (IOException e) { - LOG.warn("Error shutting down federated handler", e); + log.warn("Error shutting down federated handler", e); } } @@ -224,6 +315,42 @@ public void setMetaConf(String key, String value) throws MetaException, TExcepti getPrimaryClient().setMetaConf(key, value); } + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void create_catalog(CreateCatalogRequest createCatalogRequest) throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().create_catalog(createCatalogRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void alter_catalog(AlterCatalogRequest alterCatalogRequest) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().alter_catalog(alterCatalogRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public GetCatalogResponse get_catalog(GetCatalogRequest getCatalogRequest) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + return databaseMapping.getClient().get_catalog(getCatalogRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public GetCatalogsResponse get_catalogs() throws MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + return databaseMapping.getClient().get_catalogs(); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void drop_catalog(DropCatalogRequest dropCatalogRequest) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().drop_catalog(dropCatalogRequest); + } + + @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public void create_database(Database database) @@ -235,9 +362,9 @@ public void create_database(Database database) @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public Database get_database(String name) throws NoSuchObjectException, MetaException, TException { - LOG.info("Fetching database {}", name); + log.info("Fetching database {}", name); DatabaseMapping mapping = databaseMappingService.databaseMapping(name); - LOG.info("Mapping is '{}'", mapping.getDatabasePrefix()); + log.info("Mapping is '{}'", mapping.getDatabasePrefix()); Database result = mapping.getClient().get_database(mapping.transformInboundDatabaseName(name)); return mapping.transformOutboundDatabase(mapping.getMetastoreFilter().filterDatabase(result)); } @@ -329,6 +456,22 @@ public void create_table_with_environment_context(Table tbl, EnvironmentContext mapping.getClient().create_table_with_environment_context(mapping.transformInboundTable(tbl), environment_context); } + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, + List uniqueConstraints, List notNullConstraints, + List defaultConstraints, List checkConstraints) + throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(tbl.getDbName()); + databaseMapping.getClient().create_table_with_constraints(databaseMapping.transformInboundTable(tbl), + databaseMapping.transformInboundSQLPrimaryKeys(primaryKeys), + databaseMapping.transformInboundSQLForeignKeys(foreignKeys), + databaseMapping.transformInboundSQLUniqueConstraints(uniqueConstraints), + databaseMapping.transformInboundSQLNotNullConstraints(notNullConstraints), + databaseMapping.transformInboundSQLDefaultConstraints(defaultConstraints), + databaseMapping.transformInboundSQLCheckConstraints(checkConstraints)); + } + @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public void drop_table(String dbname, String name, boolean deleteData) @@ -352,13 +495,20 @@ public void drop_table_with_environment_context( environment_context); } + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void truncate_table(String dbName, String tableName, List partNames) throws MetaException, TException { + DatabaseMapping databaseMapping = getDbMappingAndCheckTableAllowed(dbName, tableName); + databaseMapping.getClient().truncate_table(databaseMapping.transformInboundDatabaseName(dbName), tableName, partNames); + } + @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public List get_tables(String db_name, String pattern) throws MetaException, TException { DatabaseMapping mapping = databaseMappingService.databaseMapping(db_name); List resultTables = mapping.getClient().get_tables(mapping.transformInboundDatabaseName(db_name), pattern); resultTables = databaseMappingService.filterTables(db_name, resultTables, mapping); - return mapping.getMetastoreFilter().filterTableNames(db_name, resultTables); + return mapping.getMetastoreFilter().filterTableNames(null, db_name, resultTables); } @Override @@ -367,7 +517,7 @@ public List get_all_tables(String db_name) throws MetaException, TExcept DatabaseMapping mapping = databaseMappingService.databaseMapping(db_name); List resultTables = mapping.getClient().get_all_tables(mapping.transformInboundDatabaseName(db_name)); resultTables = databaseMappingService.filterTables(db_name, resultTables, mapping); - return mapping.getMetastoreFilter().filterTableNames(db_name, resultTables); + return mapping.getMetastoreFilter().filterTableNames(null, db_name, resultTables); } @Override @@ -404,7 +554,7 @@ public List get_table_names_by_filter(String dbname, String filter, shor List resultTables = mapping.getClient() .get_table_names_by_filter(mapping.transformInboundDatabaseName(dbname), filter, max_tables); List result = databaseMappingService.filterTables(dbname, resultTables, mapping); - return mapping.getMetastoreFilter().filterTableNames(dbname, result); + return mapping.getMetastoreFilter().filterTableNames(null, dbname, result); } @Override @@ -721,7 +871,7 @@ public List get_partition_names(String db_name, String tbl_name, short m DatabaseMapping mapping = getDbMappingAndCheckTableAllowed(db_name, tbl_name); List result = mapping.getClient() .get_partition_names(mapping.transformInboundDatabaseName(db_name), tbl_name, max_parts); - return mapping.getMetastoreFilter().filterPartitionNames(db_name, tbl_name, result); + return mapping.getMetastoreFilter().filterPartitionNames(null, db_name, tbl_name, result); } @Override @@ -761,7 +911,7 @@ public List get_partition_names_ps(String db_name, String tbl_name, List List result = mapping .getClient() .get_partition_names_ps(mapping.transformInboundDatabaseName(db_name), tbl_name, part_vals, max_parts); - return mapping.getMetastoreFilter().filterPartitionNames(db_name, tbl_name, result); + return mapping.getMetastoreFilter().filterPartitionNames(null, db_name, tbl_name, result); } @Override @@ -915,70 +1065,6 @@ public boolean isPartitionMarkedForEvent( .isPartitionMarkedForEvent(mapping.transformInboundDatabaseName(db_name), tbl_name, part_vals, eventType); } - @Override - @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) - public Index add_index(Index new_index, Table index_table) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException { - DatabaseMapping mapping = checkWritePermissionsAndCheckTableAllowed(new_index.getDbName(), new_index.getOrigTableName()); - checkWritePermissionsAndCheckTableAllowed(index_table.getDbName(), index_table.getTableName(), mapping); - Index result = mapping - .getClient() - .add_index(mapping.transformInboundIndex(new_index), mapping.transformInboundTable(index_table)); - return mapping.transformOutboundIndex(result); - } - - @Override - @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) - public void alter_index(String dbname, String base_tbl_name, String idx_name, Index new_idx) - throws InvalidOperationException, MetaException, TException { - DatabaseMapping mapping = checkWritePermissionsAndCheckTableAllowed(dbname, base_tbl_name); - checkWritePermissionsAndCheckTableAllowed(new_idx.getDbName(), new_idx.getOrigTableName(), mapping); - mapping - .getClient() - .alter_index(mapping.transformInboundDatabaseName(dbname), base_tbl_name, idx_name, - mapping.transformInboundIndex(new_idx)); - } - - @Override - @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) - public boolean drop_index_by_name(String db_name, String tbl_name, String index_name, boolean deleteData) - throws NoSuchObjectException, MetaException, TException { - DatabaseMapping mapping = checkWritePermissionsAndCheckTableAllowed(db_name, tbl_name); - return mapping - .getClient() - .drop_index_by_name(mapping.transformInboundDatabaseName(db_name), tbl_name, index_name, deleteData); - } - - @Override - @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) - public Index get_index_by_name(String db_name, String tbl_name, String index_name) - throws MetaException, NoSuchObjectException, TException { - DatabaseMapping mapping = getDbMappingAndCheckTableAllowed(db_name, tbl_name); - Index result = mapping.getClient().get_index_by_name(mapping.transformInboundDatabaseName(db_name), tbl_name, index_name); - return mapping.transformOutboundIndex(mapping.getMetastoreFilter().filterIndex(result)); - } - - @Override - @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) - public List get_indexes(String db_name, String tbl_name, short max_indexes) - throws NoSuchObjectException, MetaException, TException { - DatabaseMapping mapping = getDbMappingAndCheckTableAllowed(db_name, tbl_name); - List indexes = mapping - .getClient() - .get_indexes(mapping.transformInboundDatabaseName(db_name), tbl_name, max_indexes); - return mapping.transformOutboundIndexes(mapping.getMetastoreFilter().filterIndexes(indexes)); - } - - @Override - @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) - public List get_index_names(String db_name, String tbl_name, short max_indexes) - throws MetaException, TException { - DatabaseMapping mapping = getDbMappingAndCheckTableAllowed(db_name, tbl_name); - List result = mapping.getClient() - .get_index_names(mapping.transformInboundDatabaseName(db_name), tbl_name, max_indexes); - return mapping.getMetastoreFilter().filterIndexNames(db_name, tbl_name, result); - } - @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public boolean update_table_column_statistics(ColumnStatistics stats_obj) @@ -1250,6 +1336,15 @@ public GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilege return getPrimaryClient().grant_revoke_privileges(request); } + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public GrantRevokePrivilegeResponse refresh_privileges(HiveObjectRef hiveObjectRef, String authorizer, + GrantRevokePrivilegeRequest grantRevokePrivilegeRequest) throws MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(hiveObjectRef.getDbName()); + return databaseMapping.getClient().refresh_privileges(databaseMapping.transformInboundHiveObjectRef(hiveObjectRef), + authorizer, grantRevokePrivilegeRequest); + } + private DatabaseMapping checkWritePermissionsForPrivileges(PrivilegeBag privileges) throws NoSuchObjectException { DatabaseMapping mapping = databaseMappingService .databaseMapping(privileges.getPrivileges().get(0).getHiveObject().getDbName()); @@ -1266,7 +1361,7 @@ private DatabaseMapping checkWritePermissionsForPrivileges(PrivilegeBag privileg @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public List set_ugi(String user_name, List group_names) throws MetaException, TException { - List mappings = databaseMappingService.getDatabaseMappings(); + List mappings = databaseMappingService.getAllDatabaseMappings(); return databaseMappingService.getPanopticOperationHandler().setUgi(user_name, group_names, mappings); } @@ -1319,6 +1414,27 @@ public void commit_txn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbor getPrimaryClient().commit_txn(rqst); } + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void repl_tbl_writeid_state(ReplTblWriteIdStateRequest replTblWriteIdStateRequest) throws TException { + DatabaseMapping databaseMapping = checkWritePermissions(replTblWriteIdStateRequest.getDbName()); + databaseMapping.getClient().repl_tbl_writeid_state(databaseMapping.transformInboundReplTblWriteIdStateRequest(replTblWriteIdStateRequest)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public GetValidWriteIdsResponse get_valid_write_ids(GetValidWriteIdsRequest getValidWriteIdsRequest) throws NoSuchTxnException, MetaException, TException { + return getPrimaryClient().get_valid_write_ids(getValidWriteIdsRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public AllocateTableWriteIdsResponse allocate_table_write_ids(AllocateTableWriteIdsRequest allocateTableWriteIdsRequest) throws NoSuchTxnException, TxnAbortedException, MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(allocateTableWriteIdsRequest.getDbName()); + return databaseMapping.getClient().allocate_table_write_ids(databaseMapping. + transformInboundAllocateTableWriteIdsRequest(allocateTableWriteIdsRequest)); + } + @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, TException { @@ -1377,31 +1493,36 @@ public ShowCompactResponse show_compact(ShowCompactRequest rqst) throws TExcepti } @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public String getCpuProfile(int arg0) throws TException { return getPrimaryClient().getCpuProfile(arg0); } @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public String getVersion() throws TException { return getPrimaryClient().getVersion(); } @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public fb_status getStatus() { try { return getPrimaryClient().getStatus(); } catch (TException e) { - LOG.error("Cannot getStatus() from client: ", e); + log.error("Cannot getStatus() from client: ", e); return fb_status.DEAD; } } @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public Configuration getConf() { return conf; } @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public void setConf(Configuration conf) { this.conf = conf; } @@ -1409,6 +1530,69 @@ public void setConf(Configuration conf) { @Override public void init() throws MetaException {} + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public int getThreadId() { + return HiveMetaStore.HMSHandler.get(); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public RawStore getMS() throws MetaException { + return HiveMetaStore.HMSHandler.getRawStore(); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public TxnStore getTxnHandler() { + return TxnUtils.getTxnStore(conf); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public Warehouse getWh() { + try { + return new Warehouse(conf); + } catch (MetaException e) { + log.error("Error Instantiating Warehouse", e); + return null; + } + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public Database get_database_core(String catalogName, String name) throws NoSuchObjectException, MetaException { + return HiveMetaStore.HMSHandler.getRawStore().getDatabase(catalogName, name); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public Table get_table_core(String catName, String dbName, String tableName) throws MetaException, NoSuchObjectException { + return HiveMetaStore.HMSHandler.getRawStore().getTable(catName, dbName, tableName); + } + + @Override //TODO + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public List getTransactionalListeners() { + try{ + return MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class, conf, + MetastoreConf.getVar(conf, MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS)); + } catch (MetaException e) { + throw new RuntimeException(e); + } + } + + @Override //TODO + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public List getListeners() { + try{ + return MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, conf, + MetastoreConf.getVar(conf, MetastoreConf.ConfVars.EVENT_LISTENERS)); + } catch (MetaException e) { + throw new RuntimeException(e); + } + } + // Hive 2.1.0 methods @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) @@ -1427,7 +1611,36 @@ public void add_dynamic_partitions(AddDynamicPartitions rqst) @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public void add_foreign_key(AddForeignKeyRequest req) throws NoSuchObjectException, MetaException, TException { - getPrimaryClient().add_foreign_key(req); + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().add_foreign_key(databaseMapping.transformInboundAddForeignKeyRequest(req)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void add_unique_constraint(AddUniqueConstraintRequest addUniqueConstraintRequest) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().add_unique_constraint(databaseMapping.transformInboundAddUniqueConstraintRequest(addUniqueConstraintRequest)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void add_not_null_constraint(AddNotNullConstraintRequest addNotNullConstraintRequest) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().add_not_null_constraint(databaseMapping.transformInboundAddNotNullConstraintRequest(addNotNullConstraintRequest)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void add_default_constraint(AddDefaultConstraintRequest addDefaultConstraintRequest) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().add_default_constraint(databaseMapping.transformInboundAddDefaultConstraintRequest(addDefaultConstraintRequest)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void add_check_constraint(AddCheckConstraintRequest addCheckConstraintRequest) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().add_check_constraint(databaseMapping.transformInboundAddCheckConstraintRequest(addCheckConstraintRequest)); } @Override @@ -1457,10 +1670,13 @@ public void alter_partitions_with_environment_context( EnvironmentContext environment_context) throws InvalidOperationException, MetaException, TException { DatabaseMapping mapping = checkWritePermissionsAndCheckTableAllowed(db_name, tbl_name); + for(Partition newPart : new_parts) { + checkWritePermissionsAndCheckTableAllowed(newPart.getDbName(), newPart.getTableName(), mapping); + } mapping .getClient() - .alter_partitions_with_environment_context(mapping.transformInboundDatabaseName(db_name), tbl_name, new_parts, - environment_context); + .alter_partitions_with_environment_context(mapping.transformInboundDatabaseName(db_name), tbl_name, + mapping.transformInboundPartitions(new_parts), environment_context); } @Override @@ -1468,9 +1684,11 @@ public void alter_partitions_with_environment_context( public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, TException { DatabaseMapping mapping = checkWritePermissionsAndCheckTableAllowed(dbname, tbl_name); + checkWritePermissionsAndCheckTableAllowed(new_tbl.getDbName(), new_tbl.getTableName(), mapping); mapping .getClient() - .alter_table_with_cascade(mapping.transformInboundDatabaseName(dbname), tbl_name, new_tbl, cascade); + .alter_table_with_cascade(mapping.transformInboundDatabaseName(dbname), tbl_name, + mapping.transformInboundTable(new_tbl), cascade); } @Override @@ -1483,16 +1701,261 @@ public CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req) @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) - public ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req) throws TException { - return getPrimaryClient().clear_file_metadata(req); + public String get_metastore_db_uuid() throws MetaException, TException { + return getPrimaryClient().get_metastore_db_uuid(); } @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) - public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys) - throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { - DatabaseMapping mapping = checkWritePermissions(tbl.getDbName()); - mapping.getClient().create_table_with_constraints(mapping.transformInboundTable(tbl), primaryKeys, foreignKeys); + public WMCreateResourcePlanResponse create_resource_plan(WMCreateResourcePlanRequest wmCreateResourcePlanRequest) throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + return getPrimaryClient().create_resource_plan(wmCreateResourcePlanRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMGetResourcePlanResponse get_resource_plan(WMGetResourcePlanRequest wmGetResourcePlanRequest) throws NoSuchObjectException, MetaException, TException { + return getPrimaryClient().get_resource_plan(wmGetResourcePlanRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMGetActiveResourcePlanResponse get_active_resource_plan(WMGetActiveResourcePlanRequest wmGetActiveResourcePlanRequest) throws MetaException, TException { + return getPrimaryClient().get_active_resource_plan(wmGetActiveResourcePlanRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMGetAllResourcePlanResponse get_all_resource_plans(WMGetAllResourcePlanRequest wmGetAllResourcePlanRequest) throws MetaException, TException { + return getPrimaryClient().get_all_resource_plans(wmGetAllResourcePlanRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMAlterResourcePlanResponse alter_resource_plan(WMAlterResourcePlanRequest wmAlterResourcePlanRequest) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + return getPrimaryClient().alter_resource_plan(wmAlterResourcePlanRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMValidateResourcePlanResponse validate_resource_plan(WMValidateResourcePlanRequest wmValidateResourcePlanRequest) throws NoSuchObjectException, MetaException, TException { + return getPrimaryClient().validate_resource_plan(wmValidateResourcePlanRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMDropResourcePlanResponse drop_resource_plan(WMDropResourcePlanRequest wmDropResourcePlanRequest) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + return getPrimaryClient().drop_resource_plan(wmDropResourcePlanRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMCreateTriggerResponse create_wm_trigger(WMCreateTriggerRequest wmCreateTriggerRequest) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, TException { + return getPrimaryClient().create_wm_trigger(wmCreateTriggerRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMAlterTriggerResponse alter_wm_trigger(WMAlterTriggerRequest wmAlterTriggerRequest) throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + return getPrimaryClient().alter_wm_trigger(wmAlterTriggerRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMDropTriggerResponse drop_wm_trigger(WMDropTriggerRequest wmDropTriggerRequest) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + return getPrimaryClient().drop_wm_trigger(wmDropTriggerRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan(WMGetTriggersForResourePlanRequest wmGetTriggersForResourePlanRequest) throws NoSuchObjectException, MetaException, TException { + return getPrimaryClient().get_triggers_for_resourceplan(wmGetTriggersForResourePlanRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMCreatePoolResponse create_wm_pool(WMCreatePoolRequest wmCreatePoolRequest) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, TException { + return getPrimaryClient().create_wm_pool(wmCreatePoolRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMAlterPoolResponse alter_wm_pool(WMAlterPoolRequest wmAlterPoolRequest) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, TException { + return getPrimaryClient().alter_wm_pool(wmAlterPoolRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMDropPoolResponse drop_wm_pool(WMDropPoolRequest wmDropPoolRequest) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + return getPrimaryClient().drop_wm_pool(wmDropPoolRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest wmCreateOrUpdateMappingRequest) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, TException { + return getPrimaryClient().create_or_update_wm_mapping(wmCreateOrUpdateMappingRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest wmDropMappingRequest) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + return getPrimaryClient().drop_wm_mapping(wmDropMappingRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest + wmCreateOrDropTriggerToPoolMappingRequest) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, TException { + return getPrimaryClient().create_or_drop_wm_trigger_to_pool_mapping(wmCreateOrDropTriggerToPoolMappingRequest); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void create_ischema(ISchema iSchema) throws AlreadyExistsException, NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(iSchema.getDbName()); + checkWritePermissions(iSchema.getDbName()); + databaseMapping.getClient().create_ischema(databaseMapping.transformInboundISchema(iSchema)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void alter_ischema(AlterISchemaRequest alterISchemaRequest) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(alterISchemaRequest.getName().getDbName()); + checkWritePermissions(alterISchemaRequest.getNewSchema().getDbName()); + databaseMapping.getClient().alter_ischema(databaseMapping.transformInboundAlterISchemaRequest(alterISchemaRequest)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public ISchema get_ischema(ISchemaName iSchemaName) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(iSchemaName.getDbName()); + ISchema result = databaseMapping.getClient().get_ischema(databaseMapping.transformInboundISchemaName(iSchemaName)); + return databaseMapping.transformOutboundISchema(result); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void drop_ischema(ISchemaName iSchemaName) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(iSchemaName.getDbName()); + databaseMapping.getClient().drop_ischema(databaseMapping.transformInboundISchemaName(iSchemaName)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void add_schema_version(SchemaVersion schemaVersion) throws AlreadyExistsException, NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(schemaVersion.getSchema().getDbName()); + databaseMapping.getClient().add_schema_version(databaseMapping.transformInboundSchemaVersion(schemaVersion)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public SchemaVersion get_schema_version(SchemaVersionDescriptor schemaVersionDescriptor) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(schemaVersionDescriptor.getSchema().getDbName()); + SchemaVersion result = databaseMapping.getClient().get_schema_version(databaseMapping. + transformInboundSchemaVersionDescriptor(schemaVersionDescriptor)); + return databaseMapping.transformOutboundSchemaVersion(result); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public SchemaVersion get_schema_latest_version(ISchemaName iSchemaName) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(iSchemaName.getDbName()); + SchemaVersion result = databaseMapping.getClient().get_schema_latest_version(databaseMapping. + transformInboundISchemaName(iSchemaName)); + return databaseMapping.transformOutboundSchemaVersion(result); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public List get_schema_all_versions(ISchemaName iSchemaName) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(iSchemaName.getDbName()); + List result = databaseMapping.getClient().get_schema_all_versions(databaseMapping. + transformInboundISchemaName(iSchemaName)); + return databaseMapping.transformOutboundSchemaVersions(result); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void drop_schema_version(SchemaVersionDescriptor schemaVersionDescriptor) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = checkWritePermissions(schemaVersionDescriptor.getSchema().getDbName()); + databaseMapping.getClient().drop_schema_version(databaseMapping.transformInboundSchemaVersionDescriptor(schemaVersionDescriptor)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public FindSchemasByColsResp get_schemas_by_cols(FindSchemasByColsRqst findSchemasByColsRqst) throws MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + FindSchemasByColsResp result = databaseMapping.getClient().get_schemas_by_cols(findSchemasByColsRqst); + return databaseMapping.transformOutboundFindSchemasByColsResp(result); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void map_schema_version_to_serde(MapSchemaVersionToSerdeRequest mapSchemaVersionToSerdeRequest) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().map_schema_version_to_serde(databaseMapping. + transformInboundMapSchemaVersionToSerdeRequest(mapSchemaVersionToSerdeRequest)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void set_schema_version_state(SetSchemaVersionStateRequest setSchemaVersionStateRequest) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().set_schema_version_state(databaseMapping. + transformInboundSetSchemaVersionStateRequest(setSchemaVersionStateRequest)); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void add_serde(SerDeInfo serDeInfo) throws AlreadyExistsException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().add_serde(serDeInfo); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public SerDeInfo get_serde(GetSerdeRequest getSerdeRequest) throws NoSuchObjectException, MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + SerDeInfo result = databaseMapping.getClient().get_serde(getSerdeRequest); + return result; + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public LockResponse get_lock_materialization_rebuild(String dbName, String tableName, long txnId) throws TException { + DatabaseMapping databaseMapping = databaseMappingService.databaseMapping(dbName); + LockResponse result = databaseMapping.getClient().get_lock_materialization_rebuild( + databaseMapping.transformInboundDatabaseName(dbName), tableName, txnId); + return result; + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public boolean heartbeat_lock_materialization_rebuild(String dbName, String tableName, long txnId) throws TException { + DatabaseMapping databaseMapping = databaseMappingService.databaseMapping(dbName); + boolean result = databaseMapping.getClient().heartbeat_lock_materialization_rebuild( + databaseMapping.transformInboundDatabaseName(dbName), tableName, txnId); + return result; + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void add_runtime_stats(RuntimeStat runtimeStat) throws MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + databaseMapping.getClient().add_runtime_stats(runtimeStat); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public List get_runtime_stats(GetRuntimeStatsRequest getRuntimeStatsRequest) throws MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + List result = databaseMapping.getClient().get_runtime_stats(getRuntimeStatsRequest); + return result; + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req) throws TException { + return getPrimaryClient().clear_file_metadata(req); } @Override @@ -1533,12 +1996,24 @@ public void flushCache() throws TException { getPrimaryClient().flushCache(); } + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public CmRecycleResponse cm_recycle(CmRecycleRequest cmRecycleRequest) throws MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.primaryDatabaseMapping(); + CmRecycleResponse result = databaseMapping.getClient().cm_recycle(cmRecycleRequest); + return result; + } + @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public GetAllFunctionsResponse get_all_functions() throws TException { - return databaseMappingService - .getPanopticOperationHandler() - .getAllFunctions(databaseMappingService.getDatabaseMappings()); + if(waggleDanceConfiguration.isQueryFunctionsAcrossAllMetastores()) { + return databaseMappingService + .getPanopticOperationHandler() + .getAllFunctions(databaseMappingService.getAvailableDatabaseMappings()); + } else { + return getPrimaryClient().get_all_functions(); + } } @Override @@ -1553,6 +2028,15 @@ public CurrentNotificationEventId get_current_notificationEventId() throws TExce return getPrimaryClient().get_current_notificationEventId(); } + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public NotificationEventsCountResponse get_notification_events_count(NotificationEventsCountRequest notificationEventsCountRequest) throws TException { + DatabaseMapping databaseMapping = getDbMappingAndCheckTableAllowed(notificationEventsCountRequest.getDbName(), notificationEventsCountRequest.getCatName()); + NotificationEventsCountResponse result = databaseMapping.getClient().get_notification_events_count( + databaseMapping.transformInboundNotificationEventsCountRequest(notificationEventsCountRequest)); + return result; + } + @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public List get_fields_with_environment_context( @@ -1589,6 +2073,42 @@ public ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request) mapping.getClient().get_foreign_keys(mapping.transformInboundForeignKeysRequest(request))); } + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest uniqueConstraintsRequest) throws MetaException, NoSuchObjectException, TException { + DatabaseMapping databaseMapping = getDbMappingAndCheckTableAllowed(uniqueConstraintsRequest.getDb_name(), uniqueConstraintsRequest.getTbl_name()); + UniqueConstraintsResponse result = databaseMapping.getClient().get_unique_constraints( + databaseMapping.transformInboundUniqueConstraintsRequest(uniqueConstraintsRequest)); + return databaseMapping.transformOutboundUniqueConstraintsResponse(result); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsRequest notNullConstraintsRequest) throws MetaException, NoSuchObjectException, TException { + DatabaseMapping databaseMapping = getDbMappingAndCheckTableAllowed(notNullConstraintsRequest.getDb_name(), notNullConstraintsRequest.getTbl_name()); + NotNullConstraintsResponse result = databaseMapping.getClient().get_not_null_constraints( + databaseMapping.transformInboundNotNullConstraintsRequest(notNullConstraintsRequest)); + return databaseMapping.transformOutboundNotNullConstraintsResponse(result); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequest defaultConstraintsRequest) throws MetaException, NoSuchObjectException, TException { + DatabaseMapping databaseMapping = getDbMappingAndCheckTableAllowed(defaultConstraintsRequest.getDb_name(), defaultConstraintsRequest.getTbl_name()); + DefaultConstraintsResponse result = databaseMapping.getClient().get_default_constraints( + databaseMapping.transformInboundDefaultConstraintsRequest(defaultConstraintsRequest)); + return databaseMapping.transformOutboundDefaultConstraintsResponse(result); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public CheckConstraintsResponse get_check_constraints(CheckConstraintsRequest checkConstraintsRequest) throws MetaException, NoSuchObjectException, TException { + DatabaseMapping databaseMapping = getDbMappingAndCheckTableAllowed(checkConstraintsRequest.getDb_name(), checkConstraintsRequest.getTbl_name()); + CheckConstraintsResponse result = databaseMapping.getClient().get_check_constraints( + databaseMapping.transformInboundCheckConstraintsRequest(checkConstraintsRequest)); + return databaseMapping.transformOutboundCheckConstraintsResponse(result); + } + @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public List get_master_keys() throws TException { @@ -1681,7 +2201,13 @@ public List get_tables_by_type(String db_name, String pattern, String ta DatabaseMapping mapping = databaseMappingService.databaseMapping(db_name); List resultTables = mapping.getClient().get_tables_by_type(mapping.transformInboundDatabaseName(db_name), pattern, tableType); List result = databaseMappingService.filterTables(db_name, resultTables, mapping); - return mapping.getMetastoreFilter().filterTableNames(db_name, result); + return mapping.getMetastoreFilter().filterTableNames(null, mapping.transformInboundDatabaseName(db_name), result); + } + + @Override + public List get_materialized_views_for_rewriting(String dbName) throws MetaException, TException { + DatabaseMapping databaseMapping = databaseMappingService.databaseMapping(dbName); + return databaseMapping.getClient().get_materialized_views_for_rewriting(databaseMapping.transformInboundDatabaseName(dbName)); } @Override @@ -1707,6 +2233,22 @@ public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) return mapping.transformOutboundGetTablesResult(result); } + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public Materialization get_materialization_invalidation_info(CreationMetadata creationMetadata, String validTxnList) throws MetaException, InvalidOperationException, UnknownDBException, TException { + DatabaseMapping databaseMapping = getDbMappingAndCheckTableAllowed(creationMetadata.getDbName(), creationMetadata.getTblName()); + return databaseMapping.getClient().get_materialization_invalidation_info(databaseMapping. + transformInboundCreationMetadata(creationMetadata), validTxnList); + } + + @Override + @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) + public void update_creation_metadata(String catName, String dbName, String tableName, CreationMetadata creationMetadata) throws MetaException, InvalidOperationException, UnknownDBException, TException { + DatabaseMapping databaseMapping = getDbMappingAndCheckTableAllowed(dbName, tableName); + databaseMapping.getClient().update_creation_metadata(catName, databaseMapping.transformInboundDatabaseName(dbName), + tableName, creationMetadata); + } + @Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public CompactionResponse compact2(CompactionRequest rqst) throws TException { diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/FederatedHMSHandlerFactory.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/FederatedHMSHandlerFactory.java index aaa238a74..d1d501ace 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/FederatedHMSHandlerFactory.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/FederatedHMSHandlerFactory.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -41,11 +41,11 @@ public class FederatedHMSHandlerFactory { @Autowired public FederatedHMSHandlerFactory( - HiveConf hiveConf, - NotifyingFederationService notifyingFederationService, - MetaStoreMappingFactory metaStoreMappingFactory, - WaggleDanceConfiguration waggleDanceConfiguration, - QueryMapping queryMapping) { + HiveConf hiveConf, + NotifyingFederationService notifyingFederationService, + MetaStoreMappingFactory metaStoreMappingFactory, + WaggleDanceConfiguration waggleDanceConfiguration, + QueryMapping queryMapping) { this.hiveConf = hiveConf; this.notifyingFederationService = notifyingFederationService; this.metaStoreMappingFactory = metaStoreMappingFactory; @@ -57,7 +57,8 @@ public CloseableIHMSHandler create() { MappingEventListener service = createDatabaseMappingService(); MonitoredDatabaseMappingService monitoredService = new MonitoredDatabaseMappingService(service); - CloseableIHMSHandler baseHandler = new FederatedHMSHandler(monitoredService, notifyingFederationService); + CloseableIHMSHandler baseHandler = new FederatedHMSHandler(monitoredService, notifyingFederationService, + waggleDanceConfiguration); HiveConf conf = new HiveConf(hiveConf); baseHandler.setConf(conf); return baseHandler; diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/MetaStoreProxyServer.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/MetaStoreProxyServer.java index 705b4fe6c..05c7797a3 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/MetaStoreProxyServer.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/MetaStoreProxyServer.java @@ -35,23 +35,28 @@ import java.util.concurrent.locks.ReentrantLock; import javax.annotation.PreDestroy; +import javax.security.auth.login.LoginException; import org.apache.hadoop.hive.common.auth.HiveAuthUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TServerSocketKeepAlive; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; +import org.apache.thrift.TProcessorFactory; import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.server.ServerContext; import org.apache.thrift.server.TServer; +import org.apache.thrift.server.TServerEventHandler; import org.apache.thrift.server.TThreadPoolServer; import org.apache.thrift.transport.TFramedTransport; import org.apache.thrift.transport.TServerSocket; +import org.apache.thrift.transport.TTransport; import org.apache.thrift.transport.TTransportException; import org.apache.thrift.transport.TTransportFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.ApplicationArguments; import org.springframework.boot.ApplicationRunner; @@ -59,14 +64,16 @@ import org.springframework.core.annotation.Order; import org.springframework.stereotype.Component; +import lombok.extern.log4j.Log4j2; + import com.hotels.bdp.waggledance.conf.WaggleDanceConfiguration; +import com.hotels.bdp.waggledance.util.SaslHelper; @Component @Order(Ordered.HIGHEST_PRECEDENCE) +@Log4j2 public class MetaStoreProxyServer implements ApplicationRunner { - private static final Logger LOG = LoggerFactory.getLogger(MetaStoreProxyServer.class); - /** * default port on which to start the server (48869) */ @@ -76,7 +83,7 @@ public class MetaStoreProxyServer implements ApplicationRunner { private final HiveConf hiveConf; private final WaggleDanceConfiguration waggleDanceConfiguration; - private final TSetIpAddressProcessorFactory tSetIpAddressProcessorFactory; + private final TProcessorFactory tProcessorFactory; private final Lock startLock; private final Condition startCondition; private TServer tServer; @@ -85,10 +92,10 @@ public class MetaStoreProxyServer implements ApplicationRunner { public MetaStoreProxyServer( HiveConf hiveConf, WaggleDanceConfiguration waggleDanceConfiguration, - TSetIpAddressProcessorFactory tSetIpAddressProcessorFactory) { + TProcessorFactory tProcessorFactory) { this.hiveConf = hiveConf; this.waggleDanceConfiguration = waggleDanceConfiguration; - this.tSetIpAddressProcessorFactory = tSetIpAddressProcessorFactory; + this.tProcessorFactory = tProcessorFactory; startLock = new ReentrantLock(); startCondition = startLock.newCondition(); } @@ -110,7 +117,7 @@ public void run(ApplicationArguments args) throws Exception { try { String msg = "Starting WaggleDance on port " + waggleDanceConfiguration.getPort(); - LOG.info(msg); + log.info(msg); if (waggleDanceConfiguration.isVerbose()) { System.err.println(msg); } @@ -118,17 +125,17 @@ public void run(ApplicationArguments args) throws Exception { // Add shutdown hook. Runtime.getRuntime().addShutdownHook(new Thread(() -> { String shutdownMsg = "Shutting down WaggleDance."; - LOG.info(shutdownMsg); + log.info(shutdownMsg); if (isCliVerbose) { System.err.println(shutdownMsg); } })); AtomicBoolean startedServing = new AtomicBoolean(); - startWaggleDance(ShimLoader.getHadoopThriftAuthBridge(), startLock, startCondition, startedServing); + startWaggleDance(startLock, startCondition, startedServing); } catch (Throwable t) { // Catch the exception, log it and rethrow it. - LOG.error("WaggleDance Thrift Server threw an exception...", t); + log.error("WaggleDance Thrift Server threw an exception...", t); throw new Exception(t); } } @@ -136,14 +143,12 @@ public void run(ApplicationArguments args) throws Exception { /** * Start Metastore based on a passed {@link HadoopThriftAuthBridge} * - * @param bridge * @param startLock * @param startCondition * @param startedServing * @throws Throwable */ private void startWaggleDance( - HadoopThriftAuthBridge bridge, Lock startLock, Condition startCondition, AtomicBoolean startedServing) @@ -157,6 +162,7 @@ private void startWaggleDance( boolean tcpKeepAlive = hiveConf.getBoolVar(ConfVars.METASTORE_TCP_KEEP_ALIVE); boolean useFramedTransport = hiveConf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT); boolean useSSL = hiveConf.getBoolVar(ConfVars.HIVE_METASTORE_USE_SSL); + boolean useSASL = hiveConf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL); TServerSocket serverSocket = createServerSocket(useSSL, waggleDanceConfiguration.getPort()); @@ -164,11 +170,19 @@ private void startWaggleDance( serverSocket = new TServerSocketKeepAlive(serverSocket); } - TTransportFactory transFactory = useFramedTransport ? new TFramedTransport.Factory() : new TTransportFactory(); - LOG.info("Starting WaggleDance Server"); + HadoopThriftAuthBridge.Server saslServer = null; + + if(useSASL) { + UserGroupInformation.setConfiguration(hiveConf); + saslServer = SaslHelper.createSaslServer(hiveConf); + } + + TTransportFactory transFactory = createTTransportFactory(useFramedTransport, useSASL, saslServer); + TProcessorFactory tProcessorFactory = getTProcessorFactory(useSASL, saslServer); + log.info("Starting WaggleDance Server"); TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverSocket) - .processorFactory(tSetIpAddressProcessorFactory) + .processorFactory(tProcessorFactory) .transportFactory(transFactory) .protocolFactory(new TBinaryProtocol.Factory()) .minWorkerThreads(minWorkerThreads) @@ -178,20 +192,64 @@ private void startWaggleDance( .requestTimeoutUnit(waggleDanceConfiguration.getThriftServerRequestTimeoutUnit()); tServer = new TThreadPoolServer(args); - LOG.info("Started the new WaggleDance on port [" + waggleDanceConfiguration.getPort() + "]..."); - LOG.info("Options.minWorkerThreads = " + minWorkerThreads); - LOG.info("Options.maxWorkerThreads = " + maxWorkerThreads); - LOG.info("TCP keepalive = " + tcpKeepAlive); + if (useSASL){ + TServerEventHandler tServerEventHandler = new TServerEventHandler() { + @Override + public void preServe() { + } + + @Override + public ServerContext createContext(TProtocol tProtocol, TProtocol tProtocol1) { + return null; + } + + @Override + public void deleteContext(ServerContext serverContext, TProtocol tProtocol, TProtocol tProtocol1) { + TokenWrappingHMSHandler.removeToken(); + } + + @Override + public void processContext(ServerContext serverContext, TTransport tTransport, TTransport tTransport1) { + } + }; + tServer.setServerEventHandler(tServerEventHandler); + } + log.info("Started the new WaggleDance on port [{}]...", waggleDanceConfiguration.getPort()); + log.info("Options.minWorkerThreads = {}", minWorkerThreads); + log.info("Options.maxWorkerThreads = {}", maxWorkerThreads); + log.info("TCP keepalive = {}", tcpKeepAlive); if (startLock != null) { signalOtherThreadsToStart(tServer, startLock, startCondition, startedServing); } tServer.serve(); } catch (Throwable x) { - LOG.error(StringUtils.stringifyException(x)); + log.error(StringUtils.stringifyException(x)); throw x; } - LOG.info("Waggle Dance has stopped"); + log.info("Waggle Dance has stopped"); + } + + private TProcessorFactory getTProcessorFactory(boolean useSASL, + HadoopThriftAuthBridge.Server server) throws TTransportException { + if (useSASL) { + return new TProcessorFactorySaslDecorator(tProcessorFactory, server); + } else { + return tProcessorFactory; + } + } + + private TTransportFactory createTTransportFactory(boolean useFramedTransport, boolean useSASL, + HadoopThriftAuthBridge.Server server) + throws LoginException { + if (useSASL) { + return SaslHelper.getAuthTransFactory(server, hiveConf); + } + if (useFramedTransport) { + return new TFramedTransport.Factory(); + } + return new TTransportFactory(); + } private TServerSocket createServerSocket(boolean useSSL, int port) throws IOException, TTransportException { @@ -226,7 +284,7 @@ private void signalOtherThreadsToStart( try { Thread.sleep(1000); } catch (InterruptedException e) { - LOG.warn("Signalling thread was interuppted: " + e.getMessage()); + log.warn("Signalling thread was interuppted: {}", e.getMessage()); } } while (!server.isServing()); startLock.lock(); diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/NoPrimaryMetastoreException.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/NoPrimaryMetastoreException.java index 0e4b1e8ba..11334ae94 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/NoPrimaryMetastoreException.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/NoPrimaryMetastoreException.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TProcessorFactorySaslDecorator.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TProcessorFactorySaslDecorator.java new file mode 100644 index 000000000..7f77557bd --- /dev/null +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TProcessorFactorySaslDecorator.java @@ -0,0 +1,45 @@ +/** + * Copyright (C) 2016-2023 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.hotels.bdp.waggledance.server; + +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.thrift.TProcessor; +import org.apache.thrift.TProcessorFactory; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; + +public class TProcessorFactorySaslDecorator extends TProcessorFactory { + + private final HadoopThriftAuthBridge.Server saslServer; + private final TProcessorFactory tProcessorFactory; + + TProcessorFactorySaslDecorator(TProcessorFactory tProcessorFactory, HadoopThriftAuthBridge.Server saslServer) throws TTransportException { + super(null); + this.tProcessorFactory = tProcessorFactory; + this.saslServer = saslServer; + } + + @Override + public TProcessor getProcessor(TTransport transport) { + try { + TProcessor tProcessor = tProcessorFactory.getProcessor(transport); + return saslServer.wrapProcessor(tProcessor); + } catch (RuntimeException e) { + throw new RuntimeException("Error creating SASL wrapped TProcessor", e); + } + } + +} diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TSetIpAddressProcessorFactory.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TSetIpAddressProcessorFactory.java index 9147c8350..63b1fe2bc 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TSetIpAddressProcessorFactory.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TSetIpAddressProcessorFactory.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,15 +26,14 @@ import org.apache.thrift.TProcessorFactory; import org.apache.thrift.transport.TSocket; import org.apache.thrift.transport.TTransport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; +import lombok.extern.log4j.Log4j2; + @Component +@Log4j2 class TSetIpAddressProcessorFactory extends TProcessorFactory { - - private final static Logger log = LoggerFactory.getLogger(TSetIpAddressProcessorFactory.class); private final HiveConf hiveConf; private final FederatedHMSHandlerFactory federatedHMSHandlerFactory; private final TTransportMonitor transportMonitor; @@ -58,10 +57,19 @@ public TProcessor getProcessor(TTransport transport) { log.debug("Received a connection from ip: {}", socket.getInetAddress().getHostAddress()); } CloseableIHMSHandler baseHandler = federatedHMSHandlerFactory.create(); - IHMSHandler handler = newRetryingHMSHandler(ExceptionWrappingHMSHandler.newProxyInstance(baseHandler), hiveConf, - false); - transportMonitor.monitor(transport, baseHandler); - return new TSetIpAddressProcessor<>(handler); + + boolean useSASL = hiveConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL); + if (useSASL) { + IHMSHandler tokenHandler = TokenWrappingHMSHandler.newProxyInstance(baseHandler, useSASL); + IHMSHandler handler = newRetryingHMSHandler(ExceptionWrappingHMSHandler.newProxyInstance(tokenHandler), hiveConf, + false); + return new TSetIpAddressProcessor<>(handler); + } else { + IHMSHandler handler = newRetryingHMSHandler(ExceptionWrappingHMSHandler.newProxyInstance(baseHandler), hiveConf, + false); + transportMonitor.monitor(transport, baseHandler); + return new TSetIpAddressProcessor<>(handler); + } } catch (MetaException | ReflectiveOperationException | RuntimeException e) { throw new RuntimeException("Error creating TProcessor", e); } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TTransportMonitor.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TTransportMonitor.java index ac717eac7..9a3580909 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TTransportMonitor.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TTransportMonitor.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,28 +25,24 @@ import javax.annotation.WillClose; import org.apache.thrift.transport.TTransport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; +import lombok.AllArgsConstructor; +import lombok.extern.log4j.Log4j2; + import com.google.common.annotations.VisibleForTesting; import com.hotels.bdp.waggledance.conf.WaggleDanceConfiguration; @Component +@Log4j2 public class TTransportMonitor { - private static final Logger LOG = LoggerFactory.getLogger(TTransportMonitor.class); - + @AllArgsConstructor private static class ActionContainer { private final TTransport transport; private final Closeable action; - - private ActionContainer(TTransport transport, Closeable action) { - this.transport = transport; - this.action = action; - } } private final ScheduledExecutorService scheduler; @@ -61,7 +57,7 @@ public TTransportMonitor(WaggleDanceConfiguration waggleDanceConfiguration) { TTransportMonitor(WaggleDanceConfiguration waggleDanceConfiguration, ScheduledExecutorService scheduler) { this.scheduler = scheduler; Runnable monitor = () -> { - LOG.debug("Releasing disconnected sessions"); + log.debug("Releasing disconnected sessions"); Iterator iterator = transports.iterator(); while (iterator.hasNext()) { ActionContainer actionContainer = iterator.next(); @@ -71,12 +67,12 @@ public TTransportMonitor(WaggleDanceConfiguration waggleDanceConfiguration) { try { actionContainer.action.close(); } catch (Exception e) { - LOG.warn("Error closing action", e); + log.warn("Error closing action", e); } try { actionContainer.transport.close(); } catch (Exception e) { - LOG.warn("Error closing transport", e); + log.warn("Error closing transport", e); } iterator.remove(); } diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TokenWrappingHMSHandler.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TokenWrappingHMSHandler.java new file mode 100644 index 000000000..d4bb368c9 --- /dev/null +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/TokenWrappingHMSHandler.java @@ -0,0 +1,102 @@ +/** + * Copyright (C) 2016-2023 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.hotels.bdp.waggledance.server; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.lang.reflect.UndeclaredThrowableException; + +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.security.UserGroupInformation; + +import lombok.extern.log4j.Log4j2; + +@Log4j2 +public class TokenWrappingHMSHandler implements InvocationHandler { + + private final IHMSHandler baseHandler; + private final Boolean useSasl; + + private static final ThreadLocal tokens = new ThreadLocal() { + @Override + protected String initialValue() { + return ""; + } + }; + + public static String getToken() { + return tokens.get(); + } + + public static void removeToken() { + tokens.remove(); + } + + public static IHMSHandler newProxyInstance(IHMSHandler baseHandler, boolean useSasl) { + return (IHMSHandler) Proxy.newProxyInstance(TokenWrappingHMSHandler.class.getClassLoader(), + new Class[] { IHMSHandler.class }, new TokenWrappingHMSHandler(baseHandler, useSasl)); + } + + public TokenWrappingHMSHandler(IHMSHandler baseHandler, boolean useSasl) { + this.baseHandler = baseHandler; + this.useSasl = useSasl; + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + try { + // We will get the token when proxy user call in the first time. + // Login user must open connect in `TProcessorFactorySaslDecorator#getProcessor` + // so we can reuse this connect to get proxy user delegation token + if (useSasl) { + UserGroupInformation currUser = null; + String token = null; + // if call get_delegation_token , will call it directly and set token to threadlocal + + switch (method.getName()) { + case "get_delegation_token": + token = (String) method.invoke(baseHandler, args); + tokens.set(token); + return token; + case "close": + tokens.remove(); + return method.invoke(baseHandler, args); + default: + if (tokens.get().isEmpty() && (currUser = UserGroupInformation.getCurrentUser()) + != UserGroupInformation.getLoginUser()) { + + String shortName = currUser.getShortUserName(); + token = baseHandler.get_delegation_token(shortName, shortName); + log.info(String.format("get delegation token by user %s", shortName)); + tokens.set(token); + } + return method.invoke(baseHandler, args); + } + } + return method.invoke(baseHandler, args); + } catch (InvocationTargetException e) { + // Need to unwrap this, so callers get the correct exception thrown by the handler. + throw e.getCause(); + } catch (UndeclaredThrowableException e) { + // Need to unwrap this, so callers get the correct exception thrown by the handler. + throw e.getCause(); + } + + } + +} diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/WaggleDanceServerException.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/WaggleDanceServerException.java index a2d8ed717..1d578cab3 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/WaggleDanceServerException.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/WaggleDanceServerException.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/security/NotAllowedException.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/security/NotAllowedException.java index dc2055773..70020b7a3 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/security/NotAllowedException.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/security/NotAllowedException.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/security/ReadWriteCreateAccessControlHandler.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/security/ReadWriteCreateAccessControlHandler.java index 1f2620bb1..f4fd41c39 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/security/ReadWriteCreateAccessControlHandler.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/server/security/ReadWriteCreateAccessControlHandler.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,21 +18,18 @@ import java.util.ArrayList; import java.util.List; +import lombok.AllArgsConstructor; + import com.hotels.bdp.waggledance.api.WaggleDanceException; import com.hotels.bdp.waggledance.api.federation.service.FederationService; import com.hotels.bdp.waggledance.api.model.AbstractMetaStore; import com.hotels.bdp.waggledance.api.model.PrimaryMetaStore; +@AllArgsConstructor public class ReadWriteCreateAccessControlHandler implements AccessControlHandler { - private final FederationService federationService; private AbstractMetaStore metaStore; - - ReadWriteCreateAccessControlHandler(AbstractMetaStore metaStore, - FederationService federationService) { - this.metaStore = metaStore; - this.federationService = federationService; - } + private final FederationService federationService; @Override public boolean hasWritePermission(String databaseName) { diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/util/AllowList.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/util/AllowList.java index a9ddeab11..26e428c8a 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/util/AllowList.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/util/AllowList.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,13 +22,14 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import lombok.NoArgsConstructor; + +@NoArgsConstructor public class AllowList { private final static String MATCH_ALL = ".*"; private final Set allowList = new HashSet<>(); - public AllowList() {} - public AllowList(List allowList) { if (allowList == null) { add(MATCH_ALL); diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/util/SaslHelper.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/util/SaslHelper.java new file mode 100644 index 000000000..87b07cded --- /dev/null +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/util/SaslHelper.java @@ -0,0 +1,123 @@ +/** + * Copyright (C) 2016-2023 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.hotels.bdp.waggledance.util; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import javax.security.auth.login.LoginException; +import javax.security.sasl.Sasl; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.security.DBTokenStore; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hive.service.auth.HiveAuthConstants; +import org.apache.hive.service.auth.PlainSaslHelper; +import org.apache.hive.service.auth.SaslQOP; +import org.apache.thrift.transport.TSaslServerTransport; +import org.apache.thrift.transport.TTransportException; +import org.apache.thrift.transport.TTransportFactory; + +import lombok.AccessLevel; +import lombok.NoArgsConstructor; + +@NoArgsConstructor(access = AccessLevel.PRIVATE) +public final class SaslHelper { + + public static HadoopThriftAuthBridge.Server createSaslServer(HiveConf conf) throws TTransportException { + HadoopThriftAuthBridge.Server saslServer = null; + if (SaslHelper.isSASLWithKerberizedHadoop(conf)) { + saslServer = + HadoopThriftAuthBridge.getBridge().createServer( + conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB), + conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL), + conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_CLIENT_KERBEROS_PRINCIPAL)); + + // Start delegation token manager + MetastoreDelegationTokenManager delegationTokenManager = new MetastoreDelegationTokenManager(); + try { + Object baseHandler = null; + String tokenStoreClass = conf.getVar(HiveConf.ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS); + + if (tokenStoreClass.equals(DBTokenStore.class.getName())) { + // IMetaStoreClient is needed to access token store if DBTokenStore is to be used. It + // will be got via Hive.get(conf).getMSC in a thread where the DelegationTokenStore + // is called. To avoid the cyclic reference, we pass the Hive class to DBTokenStore where + // it is used to get a threadLocal Hive object with a synchronized MetaStoreClient using + // Java reflection. + // Note: there will be two HS2 life-long opened MSCs, one is stored in HS2 thread local + // Hive object, the other is in a daemon thread spawned in DelegationTokenSecretManager + // to remove expired tokens. + baseHandler = Hive.class; + } + + delegationTokenManager.startDelegationTokenSecretManager(conf, baseHandler, HadoopThriftAuthBridge.Server.ServerMode.METASTORE); + saslServer.setSecretManager(delegationTokenManager.getSecretManager()); + } + catch (IOException e) { + throw new TTransportException("Failed to start token manager", e); + } + } + return saslServer; + } + + public static boolean isSASLWithKerberizedHadoop(HiveConf hiveconf) { + return "kerberos".equalsIgnoreCase(hiveconf.get(HADOOP_SECURITY_AUTHENTICATION, "simple")) + && !hiveconf.getVar(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION).equalsIgnoreCase(HiveAuthConstants.AuthTypes.NOSASL.getAuthName()); + } + + public static TTransportFactory getAuthTransFactory(HadoopThriftAuthBridge.Server saslServer, HiveConf hiveConf) throws LoginException { + TTransportFactory transportFactory; + TSaslServerTransport.Factory serverTransportFactory; + String authTypeStr = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION); + if (SaslHelper.isSASLWithKerberizedHadoop(hiveConf)) { + try { + serverTransportFactory = saslServer.createSaslServerTransportFactory( + getSaslProperties(hiveConf)); + } catch (TTransportException e) { + throw new LoginException(e.getMessage()); + } + if (!authTypeStr.equalsIgnoreCase(HiveAuthConstants.AuthTypes.KERBEROS.getAuthName())) { + throw new LoginException("Unsupported authentication type " + authTypeStr); + } + transportFactory = saslServer.wrapTransportFactory(serverTransportFactory); + } else if (authTypeStr.equalsIgnoreCase(HiveAuthConstants.AuthTypes.NONE.getAuthName()) || + authTypeStr.equalsIgnoreCase(HiveAuthConstants.AuthTypes.LDAP.getAuthName()) || + authTypeStr.equalsIgnoreCase(HiveAuthConstants.AuthTypes.PAM.getAuthName()) || + authTypeStr.equalsIgnoreCase(HiveAuthConstants.AuthTypes.CUSTOM.getAuthName())) { + transportFactory = PlainSaslHelper.getPlainTransportFactory(authTypeStr); + } else if (authTypeStr.equalsIgnoreCase(HiveAuthConstants.AuthTypes.NOSASL.getAuthName())) { + transportFactory = new TTransportFactory(); + } else { + throw new LoginException("Unsupported authentication type " + authTypeStr); + } + return transportFactory; + } + + public static Map getSaslProperties(HiveConf hiveConf) { + Map saslProps = new HashMap(); + SaslQOP saslQOP = SaslQOP.fromString(hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_SASL_QOP)); + saslProps.put(Sasl.QOP, saslQOP.toString()); + saslProps.put(Sasl.SERVER_AUTH, "true"); + return saslProps; + } + +} diff --git a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/yaml/YamlFactory.java b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/yaml/YamlFactory.java index b319eea32..64630aa80 100644 --- a/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/yaml/YamlFactory.java +++ b/waggle-dance-core/src/main/java/com/hotels/bdp/waggledance/yaml/YamlFactory.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,6 +29,9 @@ import org.yaml.snakeyaml.nodes.Tag; import org.yaml.snakeyaml.representer.Representer; +import lombok.AccessLevel; +import lombok.NoArgsConstructor; + import com.google.common.collect.Sets; import com.hotels.bdp.waggledance.api.model.AbstractMetaStore; @@ -38,10 +41,9 @@ import com.hotels.bdp.waggledance.conf.WaggleDanceConfiguration; import com.hotels.bdp.waggledance.conf.YamlStorageConfiguration; +@NoArgsConstructor(access = AccessLevel.PRIVATE) public final class YamlFactory { - private YamlFactory() {} - public static Yaml newYaml() { PropertyUtils propertyUtils = new AdvancedPropertyUtils(); propertyUtils.setSkipMissingProperties(true); diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/DefaultMetaStoreClientFactoryTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/DefaultMetaStoreClientFactoryTest.java index 6518b3868..0afef806f 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/DefaultMetaStoreClientFactoryTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/DefaultMetaStoreClientFactoryTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,6 +22,10 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static com.hotels.bdp.waggledance.client.HiveUgiArgsStub.TEST_ARGS; + +import java.util.List; + import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface; import org.apache.thrift.TException; import org.apache.thrift.transport.TTransportException; @@ -30,6 +34,8 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; +import com.google.common.collect.Lists; + import com.hotels.hcommon.hive.metastore.exception.MetastoreUnavailableException; @RunWith(MockitoJUnitRunner.class) @@ -49,7 +55,7 @@ public void isOpen() { boolean result = iface.isOpen(); assertThat(result, is(true)); - verify(base, never()).reconnect(); + verify(base, never()).reconnect(TEST_ARGS); } @Test @@ -60,7 +66,7 @@ public void isOpenWithReconnection() { boolean result = iface.isOpen(); assertThat(result, is(true)); - verify(base).reconnect(); + verify(base).reconnect(null); } @Test @@ -73,14 +79,6 @@ public void isOpenThrowsException() { assertThat(result, is(false)); } - @Test - public void closeNullBase() throws Exception { - CloseableThriftHiveMetastoreIface iface = factory.newInstance("name", RECONNECTION_RETRIES, null); - - iface.close(); - verify(base, never()).close(); - } - @Test public void defaultMethodCall() throws Exception { when(base.getClient()).thenReturn(client); @@ -101,7 +99,46 @@ public void defaultMethodCallThrowsTransportExceptionRetries() throws TException String result = iface.getName(); assertThat(result, is("ourName")); - verify(base).reconnect(); + verify(base).open(null); + verify(base).reconnect(null); + } + + @Test + public void set_ugi_before_call() throws Exception { + when(base.getClient()).thenReturn(client); + when(client.getName()).thenThrow(new TTransportException()).thenReturn("ourName"); + + CloseableThriftHiveMetastoreIface iface = factory.newInstance("name", RECONNECTION_RETRIES, base); + List setUgiResult = iface.set_ugi(TEST_ARGS.getUser(), TEST_ARGS.getGroups()); + assertThat(setUgiResult, is(Lists.newArrayList(TEST_ARGS.getUser()))); + String name = iface.getName(); + + assertThat(name, is("ourName")); + verify(base).open(TEST_ARGS); + verify(base).reconnect(TEST_ARGS); + } + + @Test + public void set_ugi_CachedWhenClosed() throws Exception { + when(base.isOpen()).thenReturn(false); + + CloseableThriftHiveMetastoreIface iface = factory.newInstance("name", RECONNECTION_RETRIES, base); + List setUgiResult = iface.set_ugi(TEST_ARGS.getUser(), TEST_ARGS.getGroups()); + assertThat(setUgiResult, is(Lists.newArrayList(TEST_ARGS.getUser()))); + + verify(base, never()).open(TEST_ARGS); + verify(base, never()).reconnect(TEST_ARGS); + } + + @Test + public void set_ugi_CalledWhenOpen() throws Exception { + when(base.getClient()).thenReturn(client); + when(base.isOpen()).thenReturn(true); + when(client.set_ugi(TEST_ARGS.getUser(), TEST_ARGS.getGroups())).thenReturn(Lists.newArrayList("users!")); + + CloseableThriftHiveMetastoreIface iface = factory.newInstance("name", RECONNECTION_RETRIES, base); + List setUgiResult = iface.set_ugi(TEST_ARGS.getUser(), TEST_ARGS.getGroups()); + assertThat(setUgiResult, is(Lists.newArrayList("users!"))); } @Test(expected = MetastoreUnavailableException.class) diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/HiveUgiArgsStub.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/HiveUgiArgsStub.java new file mode 100644 index 000000000..b0a25cd66 --- /dev/null +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/HiveUgiArgsStub.java @@ -0,0 +1,25 @@ +/** + * Copyright (C) 2016-2023 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.hotels.bdp.waggledance.client; + + +import com.google.common.collect.Lists; + +public final class HiveUgiArgsStub { + + public static final HiveUgiArgs TEST_ARGS = new HiveUgiArgs("test", Lists.newArrayList("my_group")); + +} diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/HiveUgiArgsTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/HiveUgiArgsTest.java new file mode 100644 index 000000000..7f76ca823 --- /dev/null +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/HiveUgiArgsTest.java @@ -0,0 +1,64 @@ +/** + * Copyright (C) 2016-2023 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.hotels.bdp.waggledance.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.Collections; + +import org.junit.Test; + +public class HiveUgiArgsTest { + + @Test + public void groups() throws Exception { + HiveUgiArgs args = new HiveUgiArgs("user", new ArrayList<>()); + assertThat("user", is(args.getUser())); + asssertThatListIsMutatable(args); + } + + private void asssertThatListIsMutatable(HiveUgiArgs args) { + assertThat(args.getGroups().size(), is(0)); + // List should be mutable, Hive code potentially mutates it. + args.getGroups().add("user"); + assertThat(args.getGroups().size(), is(1)); + } + + @Test + public void groupDefaults() throws Exception { + HiveUgiArgs args = HiveUgiArgs.WAGGLE_DANCE_DEFAULT; + assertThat("waggledance", is(args.getUser())); + asssertThatListIsMutatable(args); + } + + @Test + public void groupsImmutable() throws Exception { + HiveUgiArgs args = new HiveUgiArgs("user", Collections.emptyList()); + assertThat("user", is(args.getUser())); + asssertThatListIsMutatable(args); + } + + @Test + public void groupsNull() throws Exception { + HiveUgiArgs args = new HiveUgiArgs("user", null); + assertThat("user", is(args.getUser())); + assertTrue(args.getGroups().isEmpty()); + } + +} diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManagerIntegrationTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManagerIntegrationTest.java index faad4e3ef..1d63b1885 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManagerIntegrationTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManagerIntegrationTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,6 +19,8 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertNotNull; +import static com.hotels.bdp.waggledance.client.HiveUgiArgsStub.TEST_ARGS; + import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.Database; @@ -51,14 +53,14 @@ public void init() { @Test public void open() throws Exception { - manager.open(); + manager.open(TEST_ARGS); Database database = manager.getClient().get_database(databaseName); assertNotNull(database); } @Test public void reconnect() throws Exception { - manager.reconnect(); + manager.reconnect(TEST_ARGS); Database database = manager.getClient().get_database(databaseName); assertNotNull(database); } @@ -70,7 +72,7 @@ public void openWithDummyConnectionThrowsRuntimeWithOriginalExceptionInMessage() connectionTimeout); try { - manager.open(); + manager.open(TEST_ARGS); } catch (RuntimeException e) { assertThat(e.getMessage(), containsString("java.net.ConnectException: Connection refused")); } diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManagerTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManagerTest.java index 48256e37a..7ebcad47a 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManagerTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/client/ThriftMetastoreClientManagerTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,8 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static com.hotels.bdp.waggledance.client.HiveUgiArgsStub.TEST_ARGS; + import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.thrift.transport.TSocket; @@ -103,15 +105,15 @@ public void closeOpenedConnectionTwice() { } @Test - public void typical() { - client.open(); + public void typical() throws Exception { + client.open(TEST_ARGS); client.close(); } @Test(expected = RuntimeException.class) public void openSlowConnection() { client = new ThriftMetastoreClientManager(hiveConf, hiveCompatibleThriftHiveMetastoreIfaceFactory, 1); - client.open(); + client.open(TEST_ARGS); } } diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/conf/WaggleDanceConfigurationIntegrationTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/conf/WaggleDanceConfigurationIntegrationTest.java index 6253bce9e..379079e91 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/conf/WaggleDanceConfigurationIntegrationTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/conf/WaggleDanceConfigurationIntegrationTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -31,8 +31,14 @@ import com.google.common.collect.ImmutableMap; @RunWith(SpringJUnit4ClassRunner.class) -@TestPropertySource(properties = { "port: 123", "verbose: true", "reserved-prefix: my_prefix_", - "disconnect-connection-delay: 15", "disconnect-time-unit: seconds", "configuration-properties.prop1: val1" }) +@TestPropertySource(properties = { + "port: 123", + "verbose: true", + "reserved-prefix: my_prefix_", + "disconnect-connection-delay: 15", + "disconnect-time-unit: seconds", + "configuration-properties.prop1: val1", + "queryFunctionsAcrossAllMetastores: false" }) @ContextConfiguration(classes = { WaggleDanceConfiguration.class }) public class WaggleDanceConfigurationIntegrationTest { @@ -47,6 +53,7 @@ public void typical() { Map props = ImmutableMap.builder().put("prop1", "val1").build(); assertThat(waggleDanceConfiguration.getConfigurationProperties(), is(props)); + assertThat(waggleDanceConfiguration.isQueryFunctionsAcrossAllMetastores(), is(false)); } } diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/context/CommonBeansTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/context/CommonBeansTest.java index a53ed98ba..af39f19a9 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/context/CommonBeansTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/context/CommonBeansTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -31,9 +31,11 @@ import com.hotels.bdp.waggledance.mapping.service.PrefixNamingStrategy; import com.hotels.bdp.waggledance.mapping.service.impl.LowerCasePrefixNamingStrategy; +import com.hotels.bdp.waggledance.metrics.MonitoringConfiguration; +import com.hotels.bdp.waggledance.metrics.MonitoringConfigurationTestContext; @RunWith(SpringJUnit4ClassRunner.class) -@ContextConfiguration(classes = { CommonBeansTestContext.class, CommonBeans.class }) +@ContextConfiguration(classes = { CommonBeansTestContext.class, MonitoringConfiguration.class, MonitoringConfigurationTestContext.class, CommonBeans.class }) public class CommonBeansTest { private @Autowired HiveConf hiveConf; diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/context/ScheduledBeansTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/context/ScheduledBeansTest.java index 9b936f533..b05b050a5 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/context/ScheduledBeansTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/context/ScheduledBeansTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,20 +26,21 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.stubbing.Answer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import lombok.extern.log4j.Log4j2; + import com.hotels.bdp.waggledance.mapping.service.impl.PollingFederationService; +import com.hotels.bdp.waggledance.metrics.MonitoringConfiguration; +import com.hotels.bdp.waggledance.metrics.MonitoringConfigurationTestContext; @RunWith(SpringJUnit4ClassRunner.class) -@ContextConfiguration(classes = { ScheduledBeans.class, ScheduledBeansTestContext.class }) +@ContextConfiguration(classes = { MonitoringConfiguration.class, MonitoringConfigurationTestContext.class, ScheduledBeansTestContext.class, ScheduledBeans.class }) +@Log4j2 public class ScheduledBeansTest { - private final static Logger log = LoggerFactory.getLogger(ScheduledBeansTest.class); - @Autowired private PollingFederationService pollingFederationService; diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMappingImplTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMappingImplTest.java index 1f14e7a9a..b3ea1cdb6 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMappingImplTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/DatabaseMappingImplTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.ISchema; import org.apache.hadoop.hive.metastore.api.LockComponent; import org.apache.hadoop.hive.metastore.api.LockRequest; import org.apache.hadoop.hive.metastore.api.Partition; @@ -99,7 +99,7 @@ public class DatabaseMappingImplTest { private DatabaseMappingImpl databaseMapping; private Partition partition; - private Index index; + private ISchema iSchema; private HiveObjectRef hiveObjectRef; private PartitionSpec partitionSpec; private Database database; @@ -114,8 +114,8 @@ public void setUp() { partition = new Partition(); partition.setDbName(DB_NAME); partitions = Lists.newArrayList(partition); - index = new Index(); - index.setDbName(DB_NAME); + iSchema = new ISchema(); + iSchema.setDbName(DB_NAME); hiveObjectRef = new HiveObjectRef(); hiveObjectRef.setDbName(DB_NAME); hiveObjectRef.setObjectType(HiveObjectType.DATABASE); @@ -243,15 +243,15 @@ public void transformInboundPartition() throws Exception { @Test public void transformOutboundIndex() throws Exception { - Index result = databaseMapping.transformOutboundIndex(index); - assertThat(result, is(sameInstance(index))); + ISchema result = databaseMapping.transformOutboundISchema(iSchema); + assertThat(result, is(sameInstance(iSchema))); assertThat(result.getDbName(), is(OUT_DB_NAME)); } @Test public void transformInboundIndex() throws Exception { - Index result = databaseMapping.transformInboundIndex(index); - assertThat(result, is(sameInstance(index))); + ISchema result = databaseMapping.transformInboundISchema(iSchema); + assertThat(result, is(sameInstance(iSchema))); assertThat(result.getDbName(), is(IN_DB_NAME)); } @@ -563,12 +563,12 @@ public void transformOutboundPartitionSpecs() throws Exception { @Test public void transformOutboundIndexes() throws Exception { - List indexes = new ArrayList<>(); - indexes.add(index); - List result = databaseMapping.transformOutboundIndexes(indexes); + List indexes = new ArrayList<>(); + indexes.add(iSchema); + List result = databaseMapping.transformOutboundISchemas(indexes); assertThat(result, is(sameInstance(indexes))); - Index resultIndex = result.get(0); - assertThat(resultIndex, is(sameInstance(index))); + ISchema resultIndex = result.get(0); + assertThat(resultIndex, is(sameInstance(iSchema))); assertThat(resultIndex.getDbName(), is(OUT_DB_NAME)); } diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingDecoratorTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingDecoratorTest.java index ad9611c25..b6c507fa8 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingDecoratorTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/MetaStoreMappingDecoratorTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,7 +32,7 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -import com.beust.jcommander.internal.Lists; +import com.google.common.collect.Lists; @RunWith(MockitoJUnitRunner.class) public class MetaStoreMappingDecoratorTest { diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/PrefixingMetastoreFilter.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/PrefixingMetastoreFilter.java index 822ef0e13..b7c12010c 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/PrefixingMetastoreFilter.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/model/PrefixingMetastoreFilter.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,13 +20,13 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreFilterHook; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; /** * For testing purposes @@ -48,11 +48,21 @@ public Database filterDatabase(Database dataBase) throws MetaException, NoSuchOb return dataBase; } - @Override - public List filterTableNames(String dbName, List tableList) throws MetaException { + @Override //TODO + public List filterTableNames(String catName, String dbName, List tableList) throws MetaException { return tableList; } + @Override //TODO + public List filterTableMetas(List tableMetas) throws MetaException { + return tableMetas; + } + + /* @Override + public List filterTableNames(String dbName, List tableList) throws MetaException { + return tableList; + }*/ + @Override public Table filterTable(Table table) throws MetaException, NoSuchObjectException { setLocationPrefix(table); @@ -90,16 +100,21 @@ public Partition filterPartition(Partition partition) throws MetaException, NoSu return partition; } - @Override + @Override //TODO + public List filterPartitionNames(String catName, String dbName, String tblName, List partitionNames) throws MetaException { + return partitionNames; + } + +/* @Override public List filterPartitionNames(String dbName, String tblName, List partitionNames) throws MetaException { return partitionNames; } @Override - public Index filterIndex(Index index) throws MetaException, NoSuchObjectException { - setLocationPrefix(index.getSd()); - return index; + public ISchema filterIndex(ISchema iSchema) throws MetaException, NoSuchObjectException { + setLocationPrefix(iSchema.getSd()); + return iSchema; } @Override @@ -108,12 +123,12 @@ public List filterIndexNames(String dbName, String tblName, List } @Override - public List filterIndexes(List indexeList) throws MetaException { - for (Index index: indexeList) { - setLocationPrefix(index.getSd()); + public List filterIndexes(List iSchemaList) throws MetaException { + for (ISchema iSchema: iSchemaList) { + setLocationPrefix(iSchema.getSd()); } - return indexeList; - } + return iSchemaList; + }*/ private void setLocationPrefix(Table table) { setLocationPrefix(table.getSd()); diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/MonitoredDatabaseMappingServiceTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/MonitoredDatabaseMappingServiceTest.java index c848c504f..bf1a8ab90 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/MonitoredDatabaseMappingServiceTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/MonitoredDatabaseMappingServiceTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -106,9 +106,17 @@ public void close() throws Exception { } @Test - public void getDatabaseMappings() { + public void getAvailableDatabaseMappings() { List databaseMappings = Arrays.asList(primaryMapping, otherMapping); - when(wrapped.getDatabaseMappings()).thenReturn(databaseMappings); - assertThat(service.getDatabaseMappings(), is(databaseMappings)); + when(wrapped.getAvailableDatabaseMappings()).thenReturn(databaseMappings); + assertThat(service.getAvailableDatabaseMappings(), is(databaseMappings)); } + + @Test + public void getAllDatabaseMappings() { + List databaseMappings = Arrays.asList(primaryMapping, otherMapping); + when(wrapped.getAllDatabaseMappings()).thenReturn(databaseMappings); + assertThat(service.getAllDatabaseMappings(), is(databaseMappings)); + } + } diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/PollingFederationServiceTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/PollingFederationServiceTest.java index 64ad97b65..e619b59d3 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/PollingFederationServiceTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/PollingFederationServiceTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,6 +28,9 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.simple.SimpleMeterRegistry; + import com.google.common.collect.Lists; import com.hotels.bdp.waggledance.api.model.AbstractMetaStore; @@ -43,7 +46,8 @@ public class PollingFederationServiceTest { @Before public void setUp() { - service = new PollingFederationService(populateStatusFederationService); + MeterRegistry meterRegistry = new SimpleMeterRegistry(); + service = new PollingFederationService(populateStatusFederationService, meterRegistry); } @Test diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/PrefixBasedDatabaseMappingServiceTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/PrefixBasedDatabaseMappingServiceTest.java index ef4d0debf..ba53abe2f 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/PrefixBasedDatabaseMappingServiceTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/PrefixBasedDatabaseMappingServiceTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -107,8 +107,8 @@ private MetaStoreMapping mockNewMapping(boolean isAvailable, String prefix) { if (Strings.isNullOrEmpty(prefix)) { when(result.transformOutboundDatabaseName(anyString())).then(returnsFirstArg()); when(result.transformInboundDatabaseName(anyString())).then(returnsFirstArg()); - when(result.transformOutboundDatabaseNameMultiple(anyString())).then( - (Answer>) invocation -> Lists.newArrayList((String) invocation.getArguments()[0])); + when(result.transformOutboundDatabaseNameMultiple(anyString())) + .then((Answer>) invocation -> Lists.newArrayList((String) invocation.getArguments()[0])); } return result; } @@ -119,7 +119,7 @@ public void onRegister() { MetaStoreMapping newMapping = mockNewMapping(true, "newname_"); when(metaStoreMappingFactory.newInstance(newMetastore)).thenReturn(newMapping); service.onRegister(newMetastore); - List databaseMappings = service.getDatabaseMappings(); + List databaseMappings = service.getAvailableDatabaseMappings(); assertThat(databaseMappings.size(), is(3)); assertThat(ImmutableSet .of(databaseMappings.get(0).getDatabasePrefix(), databaseMappings.get(1).getDatabasePrefix(), @@ -144,7 +144,7 @@ public void onUpdate() throws NoSuchObjectException { service.onUpdate(federatedMetastore, newMetastore); - List databaseMappings = service.getDatabaseMappings(); + List databaseMappings = service.getAvailableDatabaseMappings(); assertThat(databaseMappings.size(), is(2)); assertThat( ImmutableSet.of(databaseMappings.get(0).getDatabasePrefix(), databaseMappings.get(1).getDatabasePrefix()), @@ -163,7 +163,7 @@ public void onUpdateDifferentPrefix() { service.onUpdate(federatedMetastore, newMetastore); - List databaseMappings = service.getDatabaseMappings(); + List databaseMappings = service.getAvailableDatabaseMappings(); assertThat(databaseMappings.size(), is(2)); assertThat( ImmutableSet.of(databaseMappings.get(0).getDatabasePrefix(), databaseMappings.get(1).getDatabasePrefix()), @@ -175,7 +175,7 @@ public void onInitOverridesDuplicates() { List duplicates = Arrays .asList(primaryMetastore, federatedMetastore, primaryMetastore, federatedMetastore); service = new PrefixBasedDatabaseMappingService(metaStoreMappingFactory, duplicates, queryMapping); - assertThat(service.getDatabaseMappings().size(), is(2)); + assertThat(service.getAvailableDatabaseMappings().size(), is(2)); } @Test @@ -192,7 +192,7 @@ public void onInitEmpty() { public void onUnregister() { when(metaStoreMappingFactory.prefixNameFor(federatedMetastore)).thenReturn(DB_PREFIX); service.onUnregister(newFederatedInstance(METASTORE_NAME, URI)); - List databaseMappings = service.getDatabaseMappings(); + List databaseMappings = service.getAvailableDatabaseMappings(); assertThat(databaseMappings.size(), is(1)); assertThat(databaseMappings.get(0).getDatabasePrefix(), is("")); } @@ -201,7 +201,7 @@ public void onUnregister() { public void onUnregisterPrimary() { when(metaStoreMappingFactory.prefixNameFor(primaryMetastore)).thenReturn(""); service.onUnregister(primaryMetastore); - List databaseMappings = service.getDatabaseMappings(); + List databaseMappings = service.getAvailableDatabaseMappings(); assertThat(databaseMappings.size(), is(1)); assertThat(databaseMappings.get(0).getDatabasePrefix(), is(DB_PREFIX)); } @@ -256,14 +256,24 @@ public void databaseMappingDoesNotMatchPrimaryWithOtherMappedDbs() throws NoSuch } @Test - public void databaseMappings() { - List databaseMappings = service.getDatabaseMappings(); + public void availableDatabaseMappings() { + List databaseMappings = service.getAvailableDatabaseMappings(); assertThat(databaseMappings.size(), is(2)); assertThat( ImmutableSet.of(databaseMappings.get(0).getDatabasePrefix(), databaseMappings.get(1).getDatabasePrefix()), is(ImmutableSet.of("", DB_PREFIX))); } + @Test + public void allDatabaseMappings() { + List databaseMappings = service.getAllDatabaseMappings(); + assertThat(databaseMappings.size(), is(3)); + assertThat(ImmutableSet + .of(databaseMappings.get(0).getDatabasePrefix(), databaseMappings.get(1).getDatabasePrefix(), + databaseMappings.get(2).getDatabasePrefix()), + is(ImmutableSet.of("", DB_PREFIX, "name2_"))); + } + @Test public void close() throws IOException { service.close(); @@ -334,8 +344,8 @@ public void filterTables() throws NoSuchObjectException { service = new PrefixBasedDatabaseMappingService(metaStoreMappingFactory, Arrays.asList(primaryMetastore, federatedMetastore), queryMapping); DatabaseMapping mapping = service.databaseMapping(PRIMARY_DB); - List result = service.filterTables(PRIMARY_DB, - Lists.newArrayList("table", "table_not_mapped", "another_table"), mapping); + List result = service + .filterTables(PRIMARY_DB, Lists.newArrayList("table", "table_not_mapped", "another_table"), mapping); assertThat(result, is(allowedTables)); } @@ -507,7 +517,7 @@ public void panopticOperationsHandlerSetUgi() throws Exception { when(federatedDatabaseClient.set_ugi(user, groups)).thenReturn(Lists.newArrayList("ugi", "ugi2")); PanopticOperationHandler handler = service.getPanopticOperationHandler(); - List databaseMappings = service.getDatabaseMappings(); + List databaseMappings = service.getAvailableDatabaseMappings(); List result = handler.setUgi(user, groups, databaseMappings); assertThat(result, is(Arrays.asList("ugi", "ugi2"))); } @@ -524,7 +534,7 @@ public void panopticOperationsHandlerGetAllFunctions() throws Exception { when(federatedDatabaseClient.get_all_functions()).thenReturn(responseFederated); PanopticOperationHandler handler = service.getPanopticOperationHandler(); - GetAllFunctionsResponse result = handler.getAllFunctions(service.getDatabaseMappings()); + GetAllFunctionsResponse result = handler.getAllFunctions(service.getAvailableDatabaseMappings()); assertThat(result.getFunctionsSize(), is(2)); assertThat(result.getFunctions().get(0).getFunctionName(), is("fn1")); assertThat(result.getFunctions().get(1).getFunctionName(), is("fn2")); @@ -548,7 +558,7 @@ public void panopticOperationsHandlerGetAllFunctionsPrimaryMappingHasPrefix() th service = new PrefixBasedDatabaseMappingService(metaStoreMappingFactory, Arrays.asList(primaryMetastore, federatedMetastore), queryMapping); PanopticOperationHandler handler = service.getPanopticOperationHandler(); - GetAllFunctionsResponse result = handler.getAllFunctions(service.getDatabaseMappings()); + GetAllFunctionsResponse result = handler.getAllFunctions(service.getAvailableDatabaseMappings()); assertThat(result.getFunctionsSize(), is(3)); assertThat(result.getFunctions().get(0).getFunctionName(), is("fn1")); assertThat(result.getFunctions().get(0).getDbName(), is("prefixed_db")); diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/StaticDatabaseMappingServiceTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/StaticDatabaseMappingServiceTest.java index f6377a47b..f1f7b9603 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/StaticDatabaseMappingServiceTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/mapping/service/impl/StaticDatabaseMappingServiceTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -50,6 +50,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; +import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.hotels.bdp.waggledance.api.WaggleDanceException; @@ -100,8 +101,20 @@ public void init() throws Exception { when(metaStoreMappingFactory.newInstance(primaryMetastore)).thenReturn(metaStoreMappingPrimary); when(metaStoreMappingFactory.newInstance(federatedMetastore)).thenReturn(metaStoreMappingFederated); + + AbstractMetaStore unavailableMetastore = newFederatedInstance("unavailable", "thrift:host:port"); + MetaStoreMapping unavailableMapping = mockNewMapping(false, "unavailable"); + when(metaStoreMappingFactory.newInstance(unavailableMetastore)).thenReturn(unavailableMapping); + service = new StaticDatabaseMappingService(metaStoreMappingFactory, - Arrays.asList(primaryMetastore, federatedMetastore), queryMapping); + Arrays.asList(primaryMetastore, federatedMetastore, unavailableMetastore), queryMapping); + } + + private MetaStoreMapping mockNewMapping(boolean isAvailable, String name) { + MetaStoreMapping result = Mockito.mock(MetaStoreMapping.class); + when(result.isAvailable()).thenReturn(isAvailable); + when(result.getMetastoreMappingName()).thenReturn(name); + return result; } private MetaStoreMapping mockNewMapping(boolean isAvailable, AbstractMetaStore metaStore) { @@ -109,8 +122,8 @@ private MetaStoreMapping mockNewMapping(boolean isAvailable, AbstractMetaStore m when(result.isAvailable()).thenReturn(isAvailable); when(result.getMetastoreMappingName()).thenReturn(metaStore.getName()); when(result.transformOutboundDatabaseName(anyString())).then(returnsFirstArg()); - when(result.transformOutboundDatabaseNameMultiple(anyString())).then( - (Answer>) invocation -> Lists.newArrayList((String) invocation.getArguments()[0])); + when(result.transformOutboundDatabaseNameMultiple(anyString())) + .then((Answer>) invocation -> Lists.newArrayList((String) invocation.getArguments()[0])); return result; } @@ -341,6 +354,26 @@ public void databaseMappingsIgnoreDisconnected() throws TException { service.databaseMapping("db2"); } + @Test + public void availableDatabaseMappings() { + List databaseMappings = service.getAvailableDatabaseMappings(); + assertThat(databaseMappings.size(), is(2)); + assertThat( + ImmutableSet + .of(databaseMappings.get(0).getMetastoreMappingName(), databaseMappings.get(1).getMetastoreMappingName()), + is(ImmutableSet.of(PRIMARY_NAME, FEDERATED_NAME))); + } + + @Test + public void allDatabaseMappings() { + List databaseMappings = service.getAllDatabaseMappings(); + assertThat(databaseMappings.size(), is(3)); + assertThat( + ImmutableSet + .of(databaseMappings.get(0).getMetastoreMappingName(), databaseMappings.get(1).getMetastoreMappingName(), + databaseMappings.get(2).getMetastoreMappingName()), + is(ImmutableSet.of(PRIMARY_NAME, FEDERATED_NAME, "unavailable"))); + } @Test public void checkTableAllowedNoMappedTablesConfig() throws NoSuchObjectException { @@ -398,8 +431,8 @@ public void filterTables() { primaryMetastore.setMappedTables(Collections.singletonList(mappedTables)); service = new StaticDatabaseMappingService(metaStoreMappingFactory, Arrays.asList(primaryMetastore, federatedMetastore), queryMapping); - List result = service.filterTables(PRIMARY_DB, - Lists.newArrayList("table", "table_not_mapped", "another_table"), null); + List result = service + .filterTables(PRIMARY_DB, Lists.newArrayList("table", "table_not_mapped", "another_table"), null); assertThat(result, is(allowedTables)); } @@ -412,8 +445,7 @@ public void close() throws IOException { @Test public void closeOnEmptyInit() throws Exception { - service = new StaticDatabaseMappingService(metaStoreMappingFactory, Collections.emptyList(), - queryMapping); + service = new StaticDatabaseMappingService(metaStoreMappingFactory, Collections.emptyList(), queryMapping); service.close(); verify(metaStoreMappingPrimary, never()).close(); verify(metaStoreMappingFederated, never()).close(); @@ -555,7 +587,7 @@ public void panopticOperationsHandlerSetUgi() throws Exception { when(federatedDatabaseClient.set_ugi(user, groups)).thenReturn(Lists.newArrayList("ugi", "ugi2")); PanopticOperationHandler handler = service.getPanopticOperationHandler(); - List databaseMappings = service.getDatabaseMappings(); + List databaseMappings = service.getAvailableDatabaseMappings(); List result = handler.setUgi(user, groups, databaseMappings); assertThat(result, is(Arrays.asList("ugi", "ugi2"))); } @@ -572,10 +604,9 @@ public void panopticOperationsHandlerGetAllFunctions() throws Exception { when(federatedDatabaseClient.get_all_functions()).thenReturn(responseFederated); PanopticOperationHandler handler = service.getPanopticOperationHandler(); - GetAllFunctionsResponse result = handler.getAllFunctions(service.getDatabaseMappings()); + GetAllFunctionsResponse result = handler.getAllFunctions(service.getAvailableDatabaseMappings()); assertThat(result.getFunctionsSize(), is(2)); assertThat(result.getFunctions().get(0).getFunctionName(), is("fn1")); assertThat(result.getFunctions().get(1).getFunctionName(), is("fn2")); } - } diff --git a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/server/FederatedHMSHandlerTest.java b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/server/FederatedHMSHandlerTest.java index 6178d6889..c7b2029d0 100644 --- a/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/server/FederatedHMSHandlerTest.java +++ b/waggle-dance-core/src/test/java/com/hotels/bdp/waggledance/server/FederatedHMSHandlerTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,15 +15,20 @@ */ package com.hotels.bdp.waggledance.server; +import static org.apache.hadoop.hive.metastore.api.CmRecycleRequest._Fields.DATA_PATH; +import static org.apache.hadoop.hive.metastore.api.GetRuntimeStatsRequest._Fields.MAX_WEIGHT; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.isA; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -37,35 +42,58 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; +import org.apache.hadoop.hive.metastore.api.AddCheckConstraintRequest; +import org.apache.hadoop.hive.metastore.api.AddDefaultConstraintRequest; import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; import org.apache.hadoop.hive.metastore.api.AddForeignKeyRequest; +import org.apache.hadoop.hive.metastore.api.AddNotNullConstraintRequest; import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest; import org.apache.hadoop.hive.metastore.api.AddPartitionsResult; import org.apache.hadoop.hive.metastore.api.AddPrimaryKeyRequest; +import org.apache.hadoop.hive.metastore.api.AddUniqueConstraintRequest; import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse; +import org.apache.hadoop.hive.metastore.api.AlterCatalogRequest; +import org.apache.hadoop.hive.metastore.api.AlterISchemaRequest; import org.apache.hadoop.hive.metastore.api.CacheFileMetadataRequest; import org.apache.hadoop.hive.metastore.api.CacheFileMetadataResult; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsResponse; import org.apache.hadoop.hive.metastore.api.CheckLockRequest; import org.apache.hadoop.hive.metastore.api.ClearFileMetadataRequest; import org.apache.hadoop.hive.metastore.api.ClearFileMetadataResult; +import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; +import org.apache.hadoop.hive.metastore.api.CmRecycleResponse; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.CompactionType; +import org.apache.hadoop.hive.metastore.api.CreateCatalogRequest; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsResponse; +import org.apache.hadoop.hive.metastore.api.DropCatalogRequest; import org.apache.hadoop.hive.metastore.api.DropConstraintRequest; import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest; import org.apache.hadoop.hive.metastore.api.DropPartitionsResult; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp; +import org.apache.hadoop.hive.metastore.api.FindSchemasByColsRqst; import org.apache.hadoop.hive.metastore.api.FireEventRequest; import org.apache.hadoop.hive.metastore.api.FireEventResponse; import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; import org.apache.hadoop.hive.metastore.api.ForeignKeysResponse; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; +import org.apache.hadoop.hive.metastore.api.GetCatalogRequest; +import org.apache.hadoop.hive.metastore.api.GetCatalogResponse; +import org.apache.hadoop.hive.metastore.api.GetCatalogsResponse; import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprRequest; import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprResult; import org.apache.hadoop.hive.metastore.api.GetFileMetadataRequest; @@ -74,10 +102,14 @@ import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; +import org.apache.hadoop.hive.metastore.api.GetRuntimeStatsRequest; +import org.apache.hadoop.hive.metastore.api.GetSerdeRequest; import org.apache.hadoop.hive.metastore.api.GetTableRequest; import org.apache.hadoop.hive.metastore.api.GetTableResult; import org.apache.hadoop.hive.metastore.api.GetTablesRequest; import org.apache.hadoop.hive.metastore.api.GetTablesResult; +import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsRequest; +import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse; import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest; import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse; import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest; @@ -87,15 +119,21 @@ import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.ISchema; +import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.LockComponent; import org.apache.hadoop.hive.metastore.api.LockLevel; import org.apache.hadoop.hive.metastore.api.LockRequest; import org.apache.hadoop.hive.metastore.api.LockResponse; import org.apache.hadoop.hive.metastore.api.LockType; +import org.apache.hadoop.hive.metastore.api.MapSchemaVersionToSerdeRequest; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsResponse; import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.Partition; @@ -113,10 +151,20 @@ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.ReplTblWriteIdStateRequest; import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RuntimeStat; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.SchemaVersion; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.SetSchemaVersionStateRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; @@ -126,7 +174,43 @@ import org.apache.hadoop.hive.metastore.api.TableStatsResult; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface; import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsResponse; import org.apache.hadoop.hive.metastore.api.UnlockRequest; +import org.apache.hadoop.hive.metastore.api.WMAlterPoolRequest; +import org.apache.hadoop.hive.metastore.api.WMAlterPoolResponse; +import org.apache.hadoop.hive.metastore.api.WMAlterResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMAlterResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMAlterTriggerRequest; +import org.apache.hadoop.hive.metastore.api.WMAlterTriggerResponse; +import org.apache.hadoop.hive.metastore.api.WMCreateOrDropTriggerToPoolMappingRequest; +import org.apache.hadoop.hive.metastore.api.WMCreateOrDropTriggerToPoolMappingResponse; +import org.apache.hadoop.hive.metastore.api.WMCreateOrUpdateMappingRequest; +import org.apache.hadoop.hive.metastore.api.WMCreateOrUpdateMappingResponse; +import org.apache.hadoop.hive.metastore.api.WMCreatePoolRequest; +import org.apache.hadoop.hive.metastore.api.WMCreatePoolResponse; +import org.apache.hadoop.hive.metastore.api.WMCreateResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMCreateResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMCreateTriggerRequest; +import org.apache.hadoop.hive.metastore.api.WMCreateTriggerResponse; +import org.apache.hadoop.hive.metastore.api.WMDropMappingRequest; +import org.apache.hadoop.hive.metastore.api.WMDropMappingResponse; +import org.apache.hadoop.hive.metastore.api.WMDropPoolRequest; +import org.apache.hadoop.hive.metastore.api.WMDropPoolResponse; +import org.apache.hadoop.hive.metastore.api.WMDropResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMDropResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMDropTriggerRequest; +import org.apache.hadoop.hive.metastore.api.WMDropTriggerResponse; +import org.apache.hadoop.hive.metastore.api.WMGetActiveResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMGetActiveResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMGetAllResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMGetAllResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMGetResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMGetResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMGetTriggersForResourePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMGetTriggersForResourePlanResponse; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanRequest; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.thrift.TException; import org.junit.Before; import org.junit.Test; @@ -138,6 +222,7 @@ import com.facebook.fb303.fb_status; import com.google.common.collect.Lists; +import com.hotels.bdp.waggledance.conf.WaggleDanceConfiguration; import com.hotels.bdp.waggledance.mapping.model.DatabaseMapping; import com.hotels.bdp.waggledance.mapping.service.MappingEventListener; import com.hotels.bdp.waggledance.mapping.service.PanopticOperationHandler; @@ -148,19 +233,25 @@ public class FederatedHMSHandlerTest { private final static String DB_P = "db_primary"; private final static String DB_S = "db_second"; + private final static String TBL_1 = "table1"; + private final static String TBL_2 = "table2"; + private final static String CAT_1 = "cat1"; + private final static String CAT_2 = "cat2"; + private final static String SCH_1 = "sch1"; private @Mock MappingEventListener databaseMappingService; private @Mock NotifyingFederationService notifyingFederationService; private @Mock DatabaseMapping primaryMapping; private @Mock Iface primaryClient; + private @Mock WaggleDanceConfiguration waggleDanceConfiguration; private FederatedHMSHandler handler; @Before public void setUp() throws NoSuchObjectException { - handler = new FederatedHMSHandler(databaseMappingService, notifyingFederationService); + handler = new FederatedHMSHandler(databaseMappingService, notifyingFederationService, waggleDanceConfiguration); when(databaseMappingService.primaryDatabaseMapping()).thenReturn(primaryMapping); - when(databaseMappingService.getDatabaseMappings()).thenReturn(Collections.singletonList(primaryMapping)); + when(databaseMappingService.getAvailableDatabaseMappings()).thenReturn(Collections.singletonList(primaryMapping)); when(primaryMapping.getClient()).thenReturn(primaryClient); when(primaryMapping.getMetastoreFilter()).thenReturn(new DefaultMetaStoreFilterHookImpl(new HiveConf())); when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn(DB_P); @@ -553,7 +644,7 @@ public void append_partition_by_name_with_environment_context() throws TExceptio when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn("inbound"); when(primaryClient .append_partition_by_name_with_environment_context("inbound", "table1", "partName", environmentContext)) - .thenReturn(inbound); + .thenReturn(inbound); when(primaryMapping.transformOutboundPartition(inbound)).thenReturn(outbound); Partition result = handler .append_partition_by_name_with_environment_context(DB_P, "table1", "partName", environmentContext); @@ -578,7 +669,7 @@ public void drop_partition_with_environment_context() throws TException { when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn("inbound"); when( primaryClient.drop_partition_with_environment_context("inbound", "table1", partVals, false, environmentContext)) - .thenReturn(true); + .thenReturn(true); boolean result = handler .drop_partition_with_environment_context(DB_P, "table1", partVals, false, environmentContext); assertThat(result, is(true)); @@ -600,7 +691,7 @@ public void drop_partition_by_name_with_environment_context() throws TException when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn("inbound"); when(primaryClient .drop_partition_by_name_with_environment_context("inbound", "table1", "partName", false, environmentContext)) - .thenReturn(true); + .thenReturn(true); boolean result = handler .drop_partition_by_name_with_environment_context(DB_P, "table1", "partName", false, environmentContext); assertThat(result, is(true)); @@ -826,21 +917,33 @@ public void flushCache() throws TException { @Test public void get_all_functions() throws TException { + when(waggleDanceConfiguration.isQueryFunctionsAcrossAllMetastores()).thenReturn(true); PanopticOperationHandler panopticHandler = Mockito.mock(PanopticOperationHandler.class); when(databaseMappingService.getPanopticOperationHandler()).thenReturn(panopticHandler); DatabaseMapping mapping = Mockito.mock(DatabaseMapping.class); List mappings = Lists.newArrayList(mapping); - when(databaseMappingService.getDatabaseMappings()).thenReturn(mappings); + when(databaseMappingService.getAvailableDatabaseMappings()).thenReturn(mappings); GetAllFunctionsResponse getAllFunctionsResponse = Mockito.mock(GetAllFunctionsResponse.class); when(panopticHandler.getAllFunctions(mappings)).thenReturn(getAllFunctionsResponse); GetAllFunctionsResponse result = handler.get_all_functions(); assertThat(result, is(getAllFunctionsResponse)); } + @Test + public void get_all_functionsViaPrimary() throws TException { + when(waggleDanceConfiguration.isQueryFunctionsAcrossAllMetastores()).thenReturn(false); + GetAllFunctionsResponse getAllFunctionsResponse = Mockito.mock(GetAllFunctionsResponse.class); + when(primaryClient.get_all_functions()).thenReturn(getAllFunctionsResponse); + + GetAllFunctionsResponse result = handler.get_all_functions(); + assertThat(result, is(getAllFunctionsResponse)); + } + @Test public void set_ugi() throws TException { PanopticOperationHandler panopticHandler = Mockito.mock(PanopticOperationHandler.class); when(databaseMappingService.getPanopticOperationHandler()).thenReturn(panopticHandler); + when(databaseMappingService.getAllDatabaseMappings()).thenReturn(Collections.singletonList(primaryMapping)); String user_name = "user"; List group_names = Lists.newArrayList("group"); when(panopticHandler.setUgi(user_name, group_names, Collections.singletonList(primaryMapping))) @@ -1058,72 +1161,83 @@ public void isPartitionMarkedForEvent() throws TException { } @Test - public void add_index() throws TException { - Index newIndex = new Index(); - newIndex.setDbName(DB_P); - Index inboundIndex = new Index(); - Index outboundIndex = new Index(); - Table newTable = new Table(); - newTable.setDbName(DB_P); - Table inboundTable = new Table(); + public void create_ischema() throws TException { + ISchema newISchema = new ISchema(); + newISchema.setDbName(DB_P); + ISchema inboundISchema = new ISchema(); + inboundISchema.setDbName(DB_P); - when(primaryMapping.transformInboundIndex(newIndex)).thenReturn(inboundIndex); - when(primaryMapping.transformInboundTable(newTable)).thenReturn(inboundTable); - when(primaryMapping.transformOutboundIndex(outboundIndex)).thenReturn(newIndex); - when(primaryClient.add_index(inboundIndex, inboundTable)).thenReturn(outboundIndex); + when(primaryMapping.transformInboundISchema(newISchema)).thenReturn(inboundISchema); + handler.create_ischema(newISchema); - Index result = handler.add_index(newIndex, newTable); + verify(primaryClient).create_ischema(inboundISchema); verify(primaryMapping, times(2)).checkWritePermissions(DB_P); - assertThat(result, is(newIndex)); } @Test - public void alter_index() throws TException { - Index newIndex = new Index(); - newIndex.setDbName(DB_P); - Index inboundIndex = new Index(); - when(primaryMapping.transformInboundIndex(newIndex)).thenReturn(inboundIndex); + public void alter_ischema() throws TException { + AlterISchemaRequest alterISchemaRequest = new AlterISchemaRequest(); + ISchemaName oldISchema = new ISchemaName(); + oldISchema.setDbName(DB_P); + oldISchema.setCatName(CAT_1); + oldISchema.setSchemaName("oldSchema"); - handler.alter_index(DB_P, "table", "index", newIndex); + ISchema newISchema = new ISchema(); + newISchema.setDbName(DB_P); + newISchema.setCatName(CAT_1); + newISchema.setName("newSchema"); + + alterISchemaRequest.setName(oldISchema); + alterISchemaRequest.setNewSchema(newISchema); + + when(primaryMapping.transformInboundAlterISchemaRequest(alterISchemaRequest)).thenReturn(new AlterISchemaRequest()); + + handler.alter_ischema(alterISchemaRequest); verify(primaryMapping, times(2)).checkWritePermissions(DB_P); - verify(primaryClient).alter_index(DB_P, "table", "index", inboundIndex); + verify(primaryClient).alter_ischema(new AlterISchemaRequest()); } @Test - public void drop_index_by_name() throws TException { - when(primaryClient.drop_index_by_name(DB_P, "table", "index", true)).thenReturn(true); - boolean result = handler.drop_index_by_name(DB_P, "table", "index", true); + public void drop_ischema() throws TException { + ISchemaName iSchemaName = new ISchemaName(); + iSchemaName.setDbName(DB_P); + iSchemaName.setCatName(CAT_1); + iSchemaName.setSchemaName(SCH_1); + + ISchemaName inboundISchemaName = new ISchemaName(); + inboundISchemaName.setDbName(DB_P); + inboundISchemaName.setCatName(CAT_1); + inboundISchemaName.setSchemaName(SCH_1); + + when(primaryMapping.transformInboundISchemaName(iSchemaName)).thenReturn(inboundISchemaName); + + handler.drop_ischema(iSchemaName); verify(primaryMapping).checkWritePermissions(DB_P); - assertThat(result, is(true)); + verify(primaryClient).drop_ischema(iSchemaName); } @Test - public void get_index_by_name() throws TException { - Index index = new Index(); - Index outboundIndex = new Index(); - when(primaryClient.get_index_by_name(DB_P, "table", "index")).thenReturn(index); - when(primaryMapping.transformOutboundIndex(index)).thenReturn(outboundIndex); - Index result = handler.get_index_by_name(DB_P, "table", "index"); - assertThat(result, is(outboundIndex)); - } + public void get_ischema() throws TException { + ISchemaName iSchemaName = new ISchemaName(); + iSchemaName.setDbName(DB_P); + iSchemaName.setCatName(CAT_1); + iSchemaName.setSchemaName(SCH_1); - @Test - public void get_indexes() throws TException { - List indexList = Collections.singletonList(new Index()); - List outboundIndexList = Collections.singletonList(new Index()); - when(primaryMapping.transformOutboundIndexes(indexList)).thenReturn(outboundIndexList); - when(primaryClient.get_indexes(DB_P, "table", (short) 2)).thenReturn(indexList); + ISchemaName inboundISchemaName = new ISchemaName(); + inboundISchemaName.setDbName(DB_P); + inboundISchemaName.setCatName(CAT_1); + inboundISchemaName.setSchemaName(SCH_1); - List result = handler.get_indexes(DB_P, "table", (short) 2); - assertThat(result, is(outboundIndexList)); - } + ISchema outboundISchema = new ISchema(); + outboundISchema.setDbName(DB_P); + outboundISchema.setCatName(CAT_1); + outboundISchema.setName(SCH_1); - @Test - public void get_index_names() throws TException { - List indexNames = Arrays.asList("name1", "name2"); - when(primaryClient.get_index_names(DB_P, "table", (short) 2)).thenReturn(indexNames); - List result = handler.get_index_names(DB_P, "table", (short) 2); - assertThat(result, is(indexNames)); + when(primaryMapping.transformInboundISchemaName(iSchemaName)).thenReturn(inboundISchemaName); + when(primaryClient.get_ischema(inboundISchemaName)).thenReturn(new ISchema()); + when(primaryMapping.transformOutboundISchema(new ISchema())).thenReturn(outboundISchema); + ISchema result = handler.get_ischema(iSchemaName); + assertThat(result, is(outboundISchema)); } @Test @@ -1578,7 +1692,7 @@ public void abort_txns() throws TException { @Test public void add_dynamic_partitions() throws TException { - AddDynamicPartitions request = new AddDynamicPartitions(1, DB_P, "table", Collections.emptyList()); + AddDynamicPartitions request = new AddDynamicPartitions(1, 1, DB_P, "table", Collections.emptyList()); AddDynamicPartitions inboundRequest = new AddDynamicPartitions(); when(primaryMapping.transformInboundAddDynamicPartitions(request)).thenReturn(inboundRequest); handler.add_dynamic_partitions(request); @@ -1589,6 +1703,7 @@ public void add_dynamic_partitions() throws TException { @Test public void add_foreign_key() throws TException { AddForeignKeyRequest request = new AddForeignKeyRequest(); + when(primaryMapping.transformInboundAddForeignKeyRequest(request)).thenReturn(request); handler.add_foreign_key(request); verify(primaryClient).add_foreign_key(request); } @@ -1618,18 +1733,29 @@ public void add_token() throws TException { @Test public void alter_partitions_with_environment_context() throws TException { EnvironmentContext environmentContext = new EnvironmentContext(); - handler.alter_partitions_with_environment_context(DB_P, "table", Collections.emptyList(), environmentContext); - verify(primaryMapping).checkWritePermissions(DB_P); - verify(primaryClient).alter_partitions_with_environment_context(DB_P, "table", Collections.emptyList(), - environmentContext); + Partition newPartition1 = new Partition(); + newPartition1.setDbName(DB_P); + Partition newPartition2 = new Partition(); + newPartition2.setDbName(DB_P); + List inbound = Lists.newArrayList(new Partition(), new Partition()); + List partitions = Lists.newArrayList(newPartition1, newPartition2); + when(primaryMapping.transformInboundPartitions(partitions)).thenReturn(inbound); + handler.alter_partitions_with_environment_context(DB_P, "table", partitions, environmentContext); + verify(primaryMapping, times(3)).checkWritePermissions(DB_P); + verify(primaryClient) + .alter_partitions_with_environment_context(DB_P, "table", inbound, environmentContext); } @Test public void alter_table_with_cascade() throws TException { Table table = new Table(); + table.setDbName(DB_P); + Table inbound = new Table(); + when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn("inbound"); + when(primaryMapping.transformInboundTable(table)).thenReturn(inbound); handler.alter_table_with_cascade(DB_P, "table", table, true); - verify(primaryMapping).checkWritePermissions(DB_P); - verify(primaryClient).alter_table_with_cascade(DB_P, "table", table, true); + verify(primaryMapping, times(2)).checkWritePermissions(DB_P); + verify(primaryClient).alter_table_with_cascade("inbound", "table", inbound, true); } @Test @@ -1657,12 +1783,19 @@ public void create_table_with_constraints() throws TException { Table table = new Table(); table.setDbName(DB_P); Table inboundTable = new Table(); + inboundTable.setDbName(DB_P); List primaryKeys = Collections.emptyList(); List foreignKeys = Collections.emptyList(); + List uniqueConstraints = Collections.emptyList(); + List notNullConstraints = Collections.emptyList(); + List defaultConstraints = Collections.emptyList(); + List checkConstraints = Collections.emptyList(); when(primaryMapping.transformInboundTable(table)).thenReturn(inboundTable); - handler.create_table_with_constraints(table, primaryKeys, foreignKeys); + handler.create_table_with_constraints(table, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, + defaultConstraints, checkConstraints); verify(primaryMapping).checkWritePermissions(DB_P); - verify(primaryClient).create_table_with_constraints(inboundTable, primaryKeys, foreignKeys); + verify(primaryClient).create_table_with_constraints(table, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, + defaultConstraints, checkConstraints); } @Test @@ -1682,8 +1815,8 @@ public void exchange_partitions() throws TException { List expected = Collections.emptyList(); when(primaryMapping.transformInboundDatabaseName("dest_db")).thenReturn("dest_db"); when(primaryMapping.transformOutboundPartitions(partitions)).thenReturn(expected); - when(primaryClient.exchange_partitions(partitionSpecs, DB_P, "source", "dest_db", "dest_table")).thenReturn( - partitions); + when(primaryClient.exchange_partitions(partitionSpecs, DB_P, "source", "dest_db", "dest_table")) + .thenReturn(partitions); List result = handler.exchange_partitions(partitionSpecs, DB_P, "source", "dest_db", "dest_table"); verify(primaryMapping).checkWritePermissions(DB_P); verify(primaryMapping).checkWritePermissions("dest_db"); @@ -1719,8 +1852,8 @@ public void get_current_notificationEventId() throws TException { @Test public void get_fields_with_environment_context() throws TException { EnvironmentContext context = new EnvironmentContext(); - List expected = Arrays.asList(new FieldSchema("name1", "type1", ""), - new FieldSchema("name2", "type2", "")); + List expected = Arrays + .asList(new FieldSchema("name1", "type1", ""), new FieldSchema("name2", "type2", "")); when(primaryClient.get_fields_with_environment_context(DB_P, "table", context)).thenReturn(expected); List result = handler.get_fields_with_environment_context(DB_P, "table", context); assertThat(result, is(expected)); @@ -1784,7 +1917,8 @@ public void get_primary_keys() throws TException { @Test public void get_partition_values() throws TException { - PartitionValuesRequest request = new PartitionValuesRequest(DB_P, "table", Collections.singletonList(new FieldSchema())); + PartitionValuesRequest request = new PartitionValuesRequest(DB_P, "table", + Collections.singletonList(new FieldSchema())); List partitionValues = Collections.singletonList(new PartitionValuesRow()); PartitionValuesResponse response = new PartitionValuesResponse(partitionValues); when(primaryClient.get_partition_values(request)).thenReturn(response); @@ -1794,4 +1928,650 @@ public void get_partition_values() throws TException { assertThat(result.getPartitionValues(), is(sameInstance(partitionValues))); } + @Test + public void create_catalog() throws TException { + CreateCatalogRequest createCatalogRequest = new CreateCatalogRequest(); + createCatalogRequest.setCatalog(new Catalog(CAT_1, "")); + + doNothing().when(primaryClient).create_catalog(isA(CreateCatalogRequest.class)); + handler.create_catalog(createCatalogRequest); + verify(primaryClient, times(1)).create_catalog(createCatalogRequest); + } + + @Test + public void alter_catalog() throws TException { + AlterCatalogRequest alterCatalogRequest = new AlterCatalogRequest(); + alterCatalogRequest.setName(CAT_1); + alterCatalogRequest.setNewCat(new Catalog(CAT_2, "")); + + doNothing().when(primaryClient).alter_catalog(isA(AlterCatalogRequest.class)); + handler.alter_catalog(alterCatalogRequest); + verify(primaryClient, times(1)).alter_catalog(alterCatalogRequest); + } + + + @Test + public void get_catalog() throws TException { + GetCatalogRequest getCatalogRequest = new GetCatalogRequest(); + GetCatalogResponse getCatalogResponse = new GetCatalogResponse(); + + when(primaryClient.get_catalog(getCatalogRequest)).thenReturn(getCatalogResponse); + GetCatalogResponse result = handler.get_catalog(getCatalogRequest); + assertThat(result, is(getCatalogResponse)); + verify(primaryClient, times(1)).get_catalog(getCatalogRequest); + } + + @Test + public void get_catalogs() throws TException { + GetCatalogsResponse getCatalogsResponse = new GetCatalogsResponse(); + + when(primaryClient.get_catalogs()).thenReturn(getCatalogsResponse); + GetCatalogsResponse result = handler.get_catalogs(); + verify(primaryClient, times(1)).get_catalogs(); + } + + @Test + public void drop_catalog() throws TException { + DropCatalogRequest dropCatalogRequest = new DropCatalogRequest(); + doNothing().when(primaryClient).drop_catalog(isA(DropCatalogRequest.class)); + handler.drop_catalog(dropCatalogRequest); + verify(primaryClient, times(1)).drop_catalog(dropCatalogRequest); + } + + @Test + public void truncate_table() throws TException { + ListpartNames = Lists.newArrayList(); + handler.truncate_table(DB_P, TBL_1, partNames); + verify(primaryClient, times(1)).truncate_table(DB_P, TBL_1, partNames); + } + + @Test + public void refresh_privileges() throws TException { + GrantRevokePrivilegeRequest grantRevokePrivilegeRequest = new GrantRevokePrivilegeRequest(); + HiveObjectRef hiveObjectRef = new HiveObjectRef(); + hiveObjectRef.setDbName(DB_P); + GrantRevokePrivilegeResponse grantRevokePrivilegeResponse = new GrantRevokePrivilegeResponse(); + + when(primaryMapping.transformInboundHiveObjectRef(hiveObjectRef)).thenReturn(hiveObjectRef); + when(primaryClient.refresh_privileges(hiveObjectRef, "dummy", grantRevokePrivilegeRequest)).thenReturn(grantRevokePrivilegeResponse); + GrantRevokePrivilegeResponse result = handler.refresh_privileges(hiveObjectRef, "dummy", grantRevokePrivilegeRequest); + assertThat(result, is(grantRevokePrivilegeResponse)); + verify(primaryClient, times(1)).refresh_privileges(hiveObjectRef, "dummy", grantRevokePrivilegeRequest); + } + + @Test + public void repl_tbl_writeid_state() throws TException { + ReplTblWriteIdStateRequest replTblWriteIdStateRequest = new ReplTblWriteIdStateRequest(); + replTblWriteIdStateRequest.setDbName(DB_P); + + when(primaryMapping.transformInboundReplTblWriteIdStateRequest(replTblWriteIdStateRequest)).thenReturn(replTblWriteIdStateRequest); + doNothing().when(primaryClient).repl_tbl_writeid_state(isA(ReplTblWriteIdStateRequest.class)); + handler.repl_tbl_writeid_state(replTblWriteIdStateRequest); + verify(primaryMapping).checkWritePermissions(DB_P); + verify(primaryClient, times(1)).repl_tbl_writeid_state(replTblWriteIdStateRequest); + } + + @Test + public void get_valid_write_ids() throws TException { + GetValidWriteIdsRequest getValidWriteIdsRequest = new GetValidWriteIdsRequest(); + GetValidWriteIdsResponse getValidWriteIdsResponse = new GetValidWriteIdsResponse(); + + when(primaryClient.get_valid_write_ids(getValidWriteIdsRequest)).thenReturn(getValidWriteIdsResponse); + GetValidWriteIdsResponse result = handler.get_valid_write_ids(getValidWriteIdsRequest); + assertThat(result, is(getValidWriteIdsResponse)); + verify(primaryClient, times(1)).get_valid_write_ids(getValidWriteIdsRequest); + } + + @Test + public void allocate_table_write_ids() throws TException { + AllocateTableWriteIdsRequest allocateTableWriteIdsRequest = new AllocateTableWriteIdsRequest(); + allocateTableWriteIdsRequest.setDbName(DB_P); + AllocateTableWriteIdsResponse allocateTableWriteIdsResponse = new AllocateTableWriteIdsResponse(); + + when(primaryMapping.transformInboundAllocateTableWriteIdsRequest(allocateTableWriteIdsRequest)).thenReturn(allocateTableWriteIdsRequest); + when(primaryClient.allocate_table_write_ids(allocateTableWriteIdsRequest)).thenReturn(allocateTableWriteIdsResponse); + + AllocateTableWriteIdsResponse result = handler.allocate_table_write_ids(allocateTableWriteIdsRequest); + assertThat(result, is(allocateTableWriteIdsResponse)); + verify(primaryMapping).checkWritePermissions(DB_P); + verify(primaryClient, times(1)).allocate_table_write_ids(allocateTableWriteIdsRequest); + } + + @Test + public void add_unique_constraint() throws TException { + AddUniqueConstraintRequest addUniqueConstraintRequest = new AddUniqueConstraintRequest(); + addUniqueConstraintRequest.setUniqueConstraintCols(Lists.newArrayList(new SQLUniqueConstraint())); + + when(primaryMapping.transformInboundAddUniqueConstraintRequest(addUniqueConstraintRequest)).thenReturn(addUniqueConstraintRequest); + doNothing().when(primaryClient).add_unique_constraint(isA(AddUniqueConstraintRequest.class)); + handler.add_unique_constraint(addUniqueConstraintRequest); + verify(primaryClient, times(1)).add_unique_constraint(addUniqueConstraintRequest); + } + + @Test + public void add_not_null_constraint() throws TException { + AddNotNullConstraintRequest addNotNullConstraintRequest = new AddNotNullConstraintRequest(); + addNotNullConstraintRequest.setNotNullConstraintCols(Lists.newArrayList(new SQLNotNullConstraint())); + + when(primaryMapping.transformInboundAddNotNullConstraintRequest(addNotNullConstraintRequest)).thenReturn(addNotNullConstraintRequest); + doNothing().when(primaryClient).add_not_null_constraint(isA(AddNotNullConstraintRequest.class)); + handler.add_not_null_constraint(addNotNullConstraintRequest); + verify(primaryClient, times(1)).add_not_null_constraint(addNotNullConstraintRequest); + } + + @Test + public void add_default_constraint() throws TException { + AddDefaultConstraintRequest addDefaultConstraintRequest = new AddDefaultConstraintRequest(); + addDefaultConstraintRequest.setDefaultConstraintCols(Lists.newArrayList(new SQLDefaultConstraint())); + + when(primaryMapping.transformInboundAddDefaultConstraintRequest(addDefaultConstraintRequest)). + thenReturn(addDefaultConstraintRequest); + + doNothing().when(primaryClient).add_default_constraint(isA(AddDefaultConstraintRequest.class)); + handler.add_default_constraint(addDefaultConstraintRequest); + verify(primaryClient, times(1)).add_default_constraint(addDefaultConstraintRequest); + } + + @Test + public void add_check_constraint() throws TException { + AddCheckConstraintRequest addCheckConstraintRequest = new AddCheckConstraintRequest(); + addCheckConstraintRequest.setCheckConstraintCols(Lists.newArrayList(new SQLCheckConstraint())); + + when(primaryMapping.transformInboundAddCheckConstraintRequest(addCheckConstraintRequest)).thenReturn(addCheckConstraintRequest); + doNothing().when(primaryClient).add_check_constraint(isA(AddCheckConstraintRequest.class)); + handler.add_check_constraint(addCheckConstraintRequest); + verify(primaryClient, times(1)).add_check_constraint(addCheckConstraintRequest); + } + + @Test + public void get_metastore_db_uuid() throws TException { + when(primaryClient.get_metastore_db_uuid()).thenReturn("uuid"); + String result = handler.get_metastore_db_uuid(); + assertThat(result, is("uuid")); + verify(primaryClient, times(1)).get_metastore_db_uuid(); + } + + @Test + public void create_resource_plan() throws TException { + WMCreateResourcePlanRequest wmCreateResourcePlanRequest = new WMCreateResourcePlanRequest(); + WMCreateResourcePlanResponse wmCreateResourcePlanResponse = new WMCreateResourcePlanResponse(); + + when(primaryClient.create_resource_plan(wmCreateResourcePlanRequest)).thenReturn(wmCreateResourcePlanResponse); + WMCreateResourcePlanResponse result = handler.create_resource_plan(wmCreateResourcePlanRequest); + assertThat(result, is(wmCreateResourcePlanResponse)); + verify(primaryClient, times(1)).create_resource_plan(wmCreateResourcePlanRequest); + } + + @Test + public void get_resource_plan() throws TException { + WMGetResourcePlanRequest wmGetResourcePlanRequest = new WMGetResourcePlanRequest(); + WMGetResourcePlanResponse wmGetResourcePlanResponse = new WMGetResourcePlanResponse(); + + when(primaryClient.get_resource_plan(wmGetResourcePlanRequest)).thenReturn(wmGetResourcePlanResponse); + WMGetResourcePlanResponse result = handler.get_resource_plan(wmGetResourcePlanRequest); + assertThat(result, is(wmGetResourcePlanResponse)); + verify(primaryClient, times(1)).get_resource_plan(wmGetResourcePlanRequest); + } + + @Test + public void get_active_resource_plan() throws TException { + WMGetActiveResourcePlanRequest wmGetActiveResourcePlanRequest = new WMGetActiveResourcePlanRequest(); + WMGetActiveResourcePlanResponse wmGetActiveResourcePlanResponse = new WMGetActiveResourcePlanResponse(); + + when(primaryClient.get_active_resource_plan(wmGetActiveResourcePlanRequest)).thenReturn(wmGetActiveResourcePlanResponse); + WMGetActiveResourcePlanResponse result = handler.get_active_resource_plan(wmGetActiveResourcePlanRequest); + assertThat(result, is(wmGetActiveResourcePlanResponse)); + verify(primaryClient, times(1)).get_active_resource_plan(wmGetActiveResourcePlanRequest); + } + + @Test + public void get_all_resource_plans() throws TException { + WMGetAllResourcePlanRequest wmGetAllResourcePlanRequest = new WMGetAllResourcePlanRequest(); + WMGetAllResourcePlanResponse wmGetAllResourcePlanResponse = new WMGetAllResourcePlanResponse(); + + when(primaryClient.get_all_resource_plans(wmGetAllResourcePlanRequest)).thenReturn(wmGetAllResourcePlanResponse); + WMGetAllResourcePlanResponse result = handler.get_all_resource_plans(wmGetAllResourcePlanRequest); + assertThat(result, is(wmGetAllResourcePlanResponse)); + verify(primaryClient, times(1)).get_all_resource_plans(wmGetAllResourcePlanRequest); + } + + @Test + public void alter_resource_plan() throws TException { + WMAlterResourcePlanRequest wmAlterResourcePlanRequest = new WMAlterResourcePlanRequest(); + WMAlterResourcePlanResponse wmAlterResourcePlanResponse = new WMAlterResourcePlanResponse(); + + when(primaryClient.alter_resource_plan(wmAlterResourcePlanRequest)).thenReturn(wmAlterResourcePlanResponse); + WMAlterResourcePlanResponse result = handler.alter_resource_plan(wmAlterResourcePlanRequest); + assertThat(result, is(wmAlterResourcePlanResponse)); + verify(primaryClient, times(1)).alter_resource_plan(wmAlterResourcePlanRequest); + } + + @Test + public void validate_resource_plan() throws TException { + WMValidateResourcePlanRequest wmValidateResourcePlanRequest = new WMValidateResourcePlanRequest(); + WMValidateResourcePlanResponse wmValidateResourcePlanResponse = new WMValidateResourcePlanResponse(); + + when(primaryClient.validate_resource_plan(wmValidateResourcePlanRequest)).thenReturn(wmValidateResourcePlanResponse); + WMValidateResourcePlanResponse result = handler.validate_resource_plan(wmValidateResourcePlanRequest); + assertThat(result, is(wmValidateResourcePlanResponse)); + verify(primaryClient, times(1)).validate_resource_plan(wmValidateResourcePlanRequest); + } + + @Test + public void drop_resource_plan() throws TException { + WMDropResourcePlanRequest wmDropResourcePlanRequest = new WMDropResourcePlanRequest(); + WMDropResourcePlanResponse wmDropResourcePlanResponse = new WMDropResourcePlanResponse(); + + when(primaryClient.drop_resource_plan(wmDropResourcePlanRequest)).thenReturn(wmDropResourcePlanResponse); + WMDropResourcePlanResponse result = handler.drop_resource_plan(wmDropResourcePlanRequest); + assertThat(result, is(wmDropResourcePlanResponse)); + verify(primaryClient, times(1)).drop_resource_plan(wmDropResourcePlanRequest); + } + + @Test + public void create_wm_trigger() throws TException { + WMCreateTriggerRequest wmCreateTriggerRequest = new WMCreateTriggerRequest(); + WMCreateTriggerResponse wmCreateTriggerResponse = new WMCreateTriggerResponse(); + + when(primaryClient.create_wm_trigger(wmCreateTriggerRequest)).thenReturn(wmCreateTriggerResponse); + WMCreateTriggerResponse result = handler.create_wm_trigger(wmCreateTriggerRequest); + assertThat(result, is(wmCreateTriggerResponse)); + verify(primaryClient, times(1)).create_wm_trigger(wmCreateTriggerRequest); + } + + @Test + public void alter_wm_trigger() throws TException { + WMAlterTriggerRequest wmAlterTriggerRequest = new WMAlterTriggerRequest(); + WMAlterTriggerResponse wmAlterTriggerResponse = new WMAlterTriggerResponse(); + + when(primaryClient.alter_wm_trigger(wmAlterTriggerRequest)).thenReturn(wmAlterTriggerResponse); + WMAlterTriggerResponse result = handler.alter_wm_trigger(wmAlterTriggerRequest); + assertThat(result, is(wmAlterTriggerResponse)); + verify(primaryClient, times(1)).alter_wm_trigger(wmAlterTriggerRequest); + } + + @Test + public void drop_wm_trigger() throws TException { + WMDropTriggerRequest wmDropTriggerRequest = new WMDropTriggerRequest(); + WMDropTriggerResponse wmDropTriggerResponse = new WMDropTriggerResponse(); + + when(primaryClient.drop_wm_trigger(wmDropTriggerRequest)).thenReturn(wmDropTriggerResponse); + WMDropTriggerResponse result = handler.drop_wm_trigger(wmDropTriggerRequest); + assertThat(result, is(wmDropTriggerResponse)); + verify(primaryClient, times(1)).drop_wm_trigger(wmDropTriggerRequest); + } + + @Test + public void get_triggers_for_resourceplan() throws TException { + WMGetTriggersForResourePlanRequest wmGetTriggersForResourePlanRequest = new WMGetTriggersForResourePlanRequest(); + WMGetTriggersForResourePlanResponse wmGetTriggersForResourePlanResponse = new WMGetTriggersForResourePlanResponse(); + + when(primaryClient.get_triggers_for_resourceplan(wmGetTriggersForResourePlanRequest)).thenReturn(wmGetTriggersForResourePlanResponse); + WMGetTriggersForResourePlanResponse result = handler.get_triggers_for_resourceplan(wmGetTriggersForResourePlanRequest); + assertThat(result, is(wmGetTriggersForResourePlanResponse)); + verify(primaryClient, times(1)).get_triggers_for_resourceplan(wmGetTriggersForResourePlanRequest); + } + + @Test + public void create_wm_pool() throws TException { + WMCreatePoolRequest wmCreatePoolRequest = new WMCreatePoolRequest(); + WMCreatePoolResponse wmCreatePoolResponse = new WMCreatePoolResponse(); + + when(primaryClient.create_wm_pool(wmCreatePoolRequest)).thenReturn(wmCreatePoolResponse); + WMCreatePoolResponse result = handler.create_wm_pool(wmCreatePoolRequest); + assertThat(result, is(wmCreatePoolResponse)); + verify(primaryClient, times(1)).create_wm_pool(wmCreatePoolRequest); + } + + @Test + public void alter_wm_pool() throws TException { + WMAlterPoolRequest wmAlterPoolRequest = new WMAlterPoolRequest(); + WMAlterPoolResponse wmAlterPoolResponse = new WMAlterPoolResponse(); + + when(primaryClient.alter_wm_pool(wmAlterPoolRequest)).thenReturn(wmAlterPoolResponse); + WMAlterPoolResponse result = handler.alter_wm_pool(wmAlterPoolRequest); + assertThat(result, is(wmAlterPoolResponse)); + verify(primaryClient, times(1)).alter_wm_pool(wmAlterPoolRequest); + } + + @Test + public void drop_wm_pool() throws TException { + WMDropPoolRequest wmDropPoolRequest = new WMDropPoolRequest(); + WMDropPoolResponse wmDropPoolResponse = new WMDropPoolResponse(); + + when(primaryClient.drop_wm_pool(wmDropPoolRequest)).thenReturn(wmDropPoolResponse); + WMDropPoolResponse result = handler.drop_wm_pool(wmDropPoolRequest); + assertThat(result, is(wmDropPoolResponse)); + verify(primaryClient, times(1)).drop_wm_pool(wmDropPoolRequest); + } + + @Test + public void create_or_update_wm_mapping() throws TException { + WMCreateOrUpdateMappingRequest wmCreateOrUpdateMappingRequest = new WMCreateOrUpdateMappingRequest(); + WMCreateOrUpdateMappingResponse wmCreateOrUpdateMappingResponse = new WMCreateOrUpdateMappingResponse(); + + when(primaryClient.create_or_update_wm_mapping(wmCreateOrUpdateMappingRequest)).thenReturn(wmCreateOrUpdateMappingResponse); + WMCreateOrUpdateMappingResponse result = handler.create_or_update_wm_mapping(wmCreateOrUpdateMappingRequest); + assertThat(result, is(wmCreateOrUpdateMappingResponse)); + verify(primaryClient, times(1)).create_or_update_wm_mapping(wmCreateOrUpdateMappingRequest); + } + + @Test + public void drop_wm_mapping() throws TException { + WMDropMappingRequest wmDropMappingRequest = new WMDropMappingRequest(); + WMDropMappingResponse wmDropMappingResponse = new WMDropMappingResponse(); + + when(primaryClient.drop_wm_mapping(wmDropMappingRequest)).thenReturn(wmDropMappingResponse); + WMDropMappingResponse result = handler.drop_wm_mapping(wmDropMappingRequest); + assertThat(result, is(wmDropMappingResponse)); + verify(primaryClient, times(1)).drop_wm_mapping(wmDropMappingRequest); + } + + @Test + public void create_or_drop_wm_trigger_to_pool_mapping() throws TException { + WMCreateOrDropTriggerToPoolMappingRequest wmCreateOrDropTriggerToPoolMappingRequest = new WMCreateOrDropTriggerToPoolMappingRequest(); + WMCreateOrDropTriggerToPoolMappingResponse wmCreateOrDropTriggerToPoolMappingResponse = new WMCreateOrDropTriggerToPoolMappingResponse(); + + when(primaryClient.create_or_drop_wm_trigger_to_pool_mapping(wmCreateOrDropTriggerToPoolMappingRequest)).thenReturn(wmCreateOrDropTriggerToPoolMappingResponse); + WMCreateOrDropTriggerToPoolMappingResponse result = handler.create_or_drop_wm_trigger_to_pool_mapping(wmCreateOrDropTriggerToPoolMappingRequest); + assertThat(result, is(wmCreateOrDropTriggerToPoolMappingResponse)); + verify(primaryClient, times(1)).create_or_drop_wm_trigger_to_pool_mapping(wmCreateOrDropTriggerToPoolMappingRequest); + } + + @Test + public void add_schema_version() throws TException { + ISchemaName iSchemaName = new ISchemaName(); + iSchemaName.setDbName(DB_P); + iSchemaName.setCatName(CAT_1); + iSchemaName.setSchemaName(SCH_1); + + SchemaVersion schemaVersion = new SchemaVersion(); + schemaVersion.setSchema(iSchemaName); + + when(primaryMapping.transformInboundSchemaVersion(schemaVersion)).thenReturn(schemaVersion); + doNothing().when(primaryClient).add_schema_version(isA(SchemaVersion.class)); + handler.add_schema_version(schemaVersion); + verify(primaryClient, times(1)).add_schema_version(schemaVersion); + } + + @Test + public void get_schema_latest_version() throws TException { + ISchemaName iSchemaName = new ISchemaName(); + iSchemaName.setDbName(DB_P); + iSchemaName.setCatName(CAT_1); + iSchemaName.setSchemaName(SCH_1); + + SchemaVersion schemaVersion = new SchemaVersion(); + schemaVersion.setSchema(iSchemaName); + + SchemaVersion outboundSchemaVersion = new SchemaVersion(); + outboundSchemaVersion.setSchema(iSchemaName); + + when(primaryMapping.transformInboundISchemaName(iSchemaName)).thenReturn(iSchemaName); + when(primaryClient.get_schema_latest_version(iSchemaName)).thenReturn(schemaVersion); + when(primaryMapping.transformOutboundSchemaVersion(schemaVersion)).thenReturn(outboundSchemaVersion); + + SchemaVersion result = handler.get_schema_latest_version(iSchemaName); + assertThat(result, is(outboundSchemaVersion)); + verify(primaryClient, times(1)).get_schema_latest_version(iSchemaName); + } + + @Test + public void get_schema_all_versions() throws TException { + ISchemaName iSchemaName = new ISchemaName(); + iSchemaName.setDbName(DB_P); + iSchemaName.setCatName(CAT_1); + iSchemaName.setSchemaName(SCH_1); + + SchemaVersion schemaVersion = new SchemaVersion(); + schemaVersion.setSchema(iSchemaName); + + SchemaVersion outboundSchemaVersion = new SchemaVersion(); + outboundSchemaVersion.setSchema(iSchemaName); + + when(primaryClient.get_schema_all_versions(iSchemaName)).thenReturn(Lists.newArrayList(schemaVersion)); + when(primaryMapping.transformInboundISchemaName(iSchemaName)).thenReturn(iSchemaName); + when(primaryMapping.transformOutboundSchemaVersions(Lists.newArrayList(schemaVersion))).thenReturn(Lists.newArrayList(outboundSchemaVersion)); + List result = handler.get_schema_all_versions(iSchemaName); + assertThat(result, is(Lists.newArrayList(Lists.newArrayList(schemaVersion)))); + verify(primaryClient, times(1)).get_schema_all_versions(iSchemaName); + } + + @Test + public void drop_schema_version() throws TException { + SchemaVersionDescriptor schemaVersionDescriptor = new SchemaVersionDescriptor(); + ISchemaName iSchemaName = new ISchemaName(); + iSchemaName.setDbName(DB_P); + iSchemaName.setCatName(CAT_1); + iSchemaName.setSchemaName(SCH_1); + schemaVersionDescriptor.setSchema(iSchemaName); + + when(primaryMapping.transformInboundSchemaVersionDescriptor(schemaVersionDescriptor)).thenReturn(schemaVersionDescriptor); + doNothing().when(primaryClient).drop_schema_version(isA(SchemaVersionDescriptor.class)); + handler.drop_schema_version(schemaVersionDescriptor); + verify(primaryClient, times(1)).drop_schema_version(schemaVersionDescriptor); + } + + @Test + public void get_schemas_by_cols() throws TException { + FindSchemasByColsRqst findSchemasByColsRqst = new FindSchemasByColsRqst(); + FindSchemasByColsResp findSchemasByColsResp = new FindSchemasByColsResp(); + + when(primaryMapping.transformOutboundFindSchemasByColsResp(findSchemasByColsResp)).thenReturn(findSchemasByColsResp); + when(primaryClient.get_schemas_by_cols(findSchemasByColsRqst)).thenReturn(findSchemasByColsResp); + FindSchemasByColsResp result = handler.get_schemas_by_cols(findSchemasByColsRqst); + assertThat(result, is(findSchemasByColsResp)); + verify(primaryClient, times(1)).get_schemas_by_cols(findSchemasByColsRqst); + } + + @Test + public void map_schema_version_to_serde() throws TException { + MapSchemaVersionToSerdeRequest mapSchemaVersionToSerdeRequest = new MapSchemaVersionToSerdeRequest(); + + when(primaryMapping.transformInboundMapSchemaVersionToSerdeRequest(mapSchemaVersionToSerdeRequest)). + thenReturn(new MapSchemaVersionToSerdeRequest()); + + doNothing().when(primaryClient).map_schema_version_to_serde(isA(MapSchemaVersionToSerdeRequest.class)); + handler.map_schema_version_to_serde(mapSchemaVersionToSerdeRequest); + verify(primaryClient, times(1)).map_schema_version_to_serde(mapSchemaVersionToSerdeRequest); + } + + @Test + public void set_schema_version_state() throws TException { + SetSchemaVersionStateRequest setSchemaVersionStateRequest = new SetSchemaVersionStateRequest(); + doNothing().when(primaryClient).set_schema_version_state(isA(SetSchemaVersionStateRequest.class)); + when(primaryMapping.transformInboundSetSchemaVersionStateRequest(setSchemaVersionStateRequest)). + thenReturn(new SetSchemaVersionStateRequest()); + handler.set_schema_version_state(setSchemaVersionStateRequest); + verify(primaryClient, times(1)).set_schema_version_state(setSchemaVersionStateRequest); + } + + @Test + public void add_serde() throws TException { + SerDeInfo serDeInfo = new SerDeInfo(); + doNothing().when(primaryClient).add_serde(isA(SerDeInfo.class)); + handler.add_serde(serDeInfo); + verify(primaryClient, times(1)).add_serde(serDeInfo); + } + + @Test + public void get_serde() throws TException { + GetSerdeRequest serdeRequest = new GetSerdeRequest(); + serdeRequest.setSerdeName("serdeName"); + serdeRequest.setFieldValue(GetSerdeRequest._Fields.SERDE_NAME, "serdeName"); + + SerDeInfo serDeInfo = new SerDeInfo(); + + when(primaryClient.get_serde(serdeRequest)).thenReturn(serDeInfo); + SerDeInfo result = handler.get_serde(serdeRequest); + assertThat(result, is(serDeInfo)); + } + + @Test + public void get_lock_materialization_rebuild() throws TException { + LockResponse lockResponse = new LockResponse(); + lockResponse.setFieldValue(LockResponse._Fields.LOCKID, 1000L); + + when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn(DB_P); + when(primaryClient.get_lock_materialization_rebuild(DB_P, CAT_1, 1000L)).thenReturn(lockResponse); + LockResponse result = handler.get_lock_materialization_rebuild(DB_P, CAT_1, 1000L); + assertThat(result, is(lockResponse)); + } + + @Test + public void heartbeat_lock_materialization_rebuild() throws TException { + when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn(DB_P); + when(primaryClient.heartbeat_lock_materialization_rebuild(DB_P, CAT_1, 1000L)).thenReturn(true); + boolean result = handler.heartbeat_lock_materialization_rebuild(DB_P, CAT_1, 1000L); + assertThat(result, is(true)); + } + + @Test + public void add_runtime_stats() throws TException { + RuntimeStat runtimeStat = new RuntimeStat(); + runtimeStat.setFieldValue( RuntimeStat._Fields.PAYLOAD, ByteBuffer.allocate(10)); + + handler.add_runtime_stats(runtimeStat); + verify(primaryClient).add_runtime_stats(runtimeStat); + } + + @Test + public void get_runtime_stats() throws TException { + GetRuntimeStatsRequest getRuntimeStatsRequest = new GetRuntimeStatsRequest(); + getRuntimeStatsRequest.setFieldValue( MAX_WEIGHT, 1); + + List runtimeStatList = Lists.newArrayList(new RuntimeStat()); + + when(primaryClient.get_runtime_stats(getRuntimeStatsRequest)).thenReturn(runtimeStatList); + List result = handler.get_runtime_stats(getRuntimeStatsRequest); + assertThat(result, is(runtimeStatList)); + } + + @Test + public void cm_recycle() throws TException { + CmRecycleRequest cmRecycleRequest = new CmRecycleRequest(); + cmRecycleRequest.setFieldValue( DATA_PATH, "test"); + + CmRecycleResponse cmRecycleResponse = new CmRecycleResponse(); + + when(primaryClient.cm_recycle(cmRecycleRequest)).thenReturn(cmRecycleResponse); + CmRecycleResponse result = handler.cm_recycle(cmRecycleRequest); + assertThat(result, is(cmRecycleResponse)); + } + + @Test + public void get_notification_events_count() throws TException { + NotificationEventsCountRequest notificationEventsCountRequest = new NotificationEventsCountRequest(); + notificationEventsCountRequest.setDbName(DB_P); + notificationEventsCountRequest.setCatName(CAT_1); + + NotificationEventsCountResponse notificationEventsCountResponse = new NotificationEventsCountResponse(); + notificationEventsCountResponse.setEventsCount(10); + + when(primaryMapping.transformInboundNotificationEventsCountRequest(notificationEventsCountRequest)).thenReturn(notificationEventsCountRequest); + when(primaryClient.get_notification_events_count(notificationEventsCountRequest)).thenReturn(notificationEventsCountResponse); + NotificationEventsCountResponse result = handler.get_notification_events_count(notificationEventsCountRequest); + assertThat(result, is(notificationEventsCountResponse)); + } + + @Test + public void get_unique_constraints() throws TException { + UniqueConstraintsRequest uniqueConstraintsRequest = new UniqueConstraintsRequest(); + uniqueConstraintsRequest.setDb_name(DB_P); + uniqueConstraintsRequest.setTbl_name(TBL_1); + uniqueConstraintsRequest.setCatName(CAT_1); + + UniqueConstraintsResponse uniqueConstraintsResponse = new UniqueConstraintsResponse(); + uniqueConstraintsResponse.setUniqueConstraints(Lists.newArrayList(new SQLUniqueConstraint())); + + when(primaryMapping.transformInboundUniqueConstraintsRequest(uniqueConstraintsRequest)). + thenReturn(uniqueConstraintsRequest); + when(primaryMapping.transformOutboundUniqueConstraintsResponse(uniqueConstraintsResponse)). + thenReturn(uniqueConstraintsResponse); + + when(primaryClient.get_unique_constraints(uniqueConstraintsRequest)).thenReturn(uniqueConstraintsResponse); + UniqueConstraintsResponse result = handler.get_unique_constraints(uniqueConstraintsRequest); + assertThat(result, is(uniqueConstraintsResponse)); + } + + @Test + public void get_not_null_constraints() throws TException { + NotNullConstraintsRequest notNullConstraintsRequest = new NotNullConstraintsRequest(); + notNullConstraintsRequest.setDb_name(DB_P); + notNullConstraintsRequest.setTbl_name(TBL_1); + notNullConstraintsRequest.setCatName(CAT_1); + + NotNullConstraintsResponse notNullConstraintsResponse = new NotNullConstraintsResponse(); + notNullConstraintsResponse.setNotNullConstraints(Lists.newArrayList(new SQLNotNullConstraint())); + + when(primaryMapping.transformInboundNotNullConstraintsRequest(notNullConstraintsRequest)).thenReturn(notNullConstraintsRequest); + when(primaryMapping.transformOutboundNotNullConstraintsResponse(notNullConstraintsResponse)).thenReturn(notNullConstraintsResponse); + + when(primaryClient.get_not_null_constraints(notNullConstraintsRequest)).thenReturn(notNullConstraintsResponse); + NotNullConstraintsResponse result = handler.get_not_null_constraints(notNullConstraintsRequest); + assertThat(result, is(notNullConstraintsResponse)); + } + + @Test + public void get_default_constraints() throws TException { + DefaultConstraintsRequest defaultConstraintsRequest = new DefaultConstraintsRequest(); + defaultConstraintsRequest.setDb_name(DB_P); + defaultConstraintsRequest.setTbl_name(TBL_1); + defaultConstraintsRequest.setCatName(CAT_1); + + DefaultConstraintsResponse defaultConstraintsResponse = new DefaultConstraintsResponse(); + defaultConstraintsResponse.setDefaultConstraints(Lists.newArrayList(new SQLDefaultConstraint())); + + when(primaryMapping.transformInboundDefaultConstraintsRequest(defaultConstraintsRequest)). + thenReturn(defaultConstraintsRequest); + when(primaryMapping.transformOutboundDefaultConstraintsResponse(defaultConstraintsResponse)). + thenReturn(defaultConstraintsResponse); + + when(primaryClient.get_default_constraints(defaultConstraintsRequest)).thenReturn(defaultConstraintsResponse); + DefaultConstraintsResponse result = handler.get_default_constraints(defaultConstraintsRequest); + assertThat(result, is(defaultConstraintsResponse)); + } + + @Test + public void get_check_constraints() throws TException { + CheckConstraintsRequest checkConstraintsRequest = new CheckConstraintsRequest(); + checkConstraintsRequest.setDb_name(DB_P); + checkConstraintsRequest.setTbl_name(TBL_1); + checkConstraintsRequest.setCatName(CAT_1); + + CheckConstraintsResponse checkConstraintsResponse = new CheckConstraintsResponse(); + checkConstraintsResponse.setCheckConstraints(Lists.newArrayList(new SQLCheckConstraint())); + + when(primaryMapping.transformInboundCheckConstraintsRequest(checkConstraintsRequest)).thenReturn(checkConstraintsRequest); + when(primaryMapping.transformOutboundCheckConstraintsResponse(checkConstraintsResponse)).thenReturn(checkConstraintsResponse); + + when(primaryClient.get_check_constraints(checkConstraintsRequest)).thenReturn(checkConstraintsResponse); + CheckConstraintsResponse result = handler.get_check_constraints(checkConstraintsRequest); + assertThat(result, is(checkConstraintsResponse)); + } + + @Test + public void get_materialized_views_for_rewriting() throws TException { + String dbName = DB_P; + List expected = Arrays.asList("view1", "view2"); + when(primaryClient.get_materialized_views_for_rewriting(dbName)).thenReturn(expected); + List result = handler.get_materialized_views_for_rewriting(dbName); + assertThat(result, is(expected)); + } + + @Test + public void get_materialization_invalidation_info() throws TException { + CreationMetadata request = new CreationMetadata(); + request.setDbName(DB_P); + when(primaryMapping.transformInboundCreationMetadata(request)).thenReturn(request); + handler.get_materialization_invalidation_info(request, "dummy"); + verify(primaryClient).get_materialization_invalidation_info(request, "dummy"); + } + + @Test + public void update_creation_metadata() throws TException { + CreationMetadata request = new CreationMetadata(); + handler.update_creation_metadata(CAT_1, DB_P, TBL_1, request); + verify(primaryClient).update_creation_metadata(CAT_1, DB_P, TBL_1, request); + } } diff --git a/waggle-dance-integration-tests/pom.xml b/waggle-dance-integration-tests/pom.xml index 5b20b2792..e56995c45 100644 --- a/waggle-dance-integration-tests/pom.xml +++ b/waggle-dance-integration-tests/pom.xml @@ -10,7 +10,7 @@ waggle-dance-integration-tests - 9.5.0 + 12.4 @@ -36,46 +36,14 @@ org.apache.hive hive-common - - - org.mortbay.jetty - * - - - org.eclipse.jetty - * - - - org.eclipse.jetty.aggregate - * - - - org.eclipse.jetty.orbit - * - - - org.codehaus.jettison - * - - - javax.servlet - * - - - log4j-slf4j-impl - org.apache.logging.log4j - - org.apache.hive - hive-metastore - - - log4j-web - org.apache.logging.log4j - - + hive-standalone-metastore + + + org.projectlombok + lombok @@ -108,21 +76,11 @@ org.springframework.boot spring-boot-starter-test test - - - spring-boot-starter-logging - org.springframework.boot - - - org.hamcrest - * - -
com.github.stefanbirkner system-rules - 1.18.0 + 1.19.0 test @@ -134,23 +92,12 @@ fm.last.commons lastcommons-test - 5.2.1 test com.hotels beeju test - - - org.slf4j - slf4j-log4j12 - - - org.eclipse.jetty.aggregate - jetty-all - - io.github.openfeign diff --git a/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/TestUtils.java b/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/TestUtils.java index 335000e11..6b45e451f 100644 --- a/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/TestUtils.java +++ b/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/TestUtils.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,9 +29,11 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.thrift.TException; -final class TestUtils { +import lombok.AccessLevel; +import lombok.NoArgsConstructor; - private TestUtils() {} +@NoArgsConstructor(access = AccessLevel.PRIVATE) +final class TestUtils { public static final List DATA_COLUMNS = Arrays.asList(new FieldSchema("id", "bigint", ""), new FieldSchema("name", "string", ""), new FieldSchema("city", "tinyint", "")); diff --git a/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/WaggleDanceRunner.java b/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/WaggleDanceRunner.java index 910059a64..fa0d21d39 100644 --- a/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/WaggleDanceRunner.java +++ b/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/WaggleDanceRunner.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ */ package com.hotels.bdp.waggledance; -import static org.apache.directory.api.util.Strings.isNotEmpty; +import static org.apache.commons.lang3.StringUtils.isNotEmpty; import static com.google.common.base.Preconditions.checkArgument; diff --git a/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/junit/ServerSocketRule.java b/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/junit/ServerSocketRule.java index 83748ad43..b406a349e 100644 --- a/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/junit/ServerSocketRule.java +++ b/waggle-dance-integration-tests/src/main/java/com/hotels/bdp/waggledance/junit/ServerSocketRule.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2019 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,8 +24,8 @@ import java.nio.channels.SocketChannel; import org.junit.rules.ExternalResource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import lombok.extern.log4j.Log4j2; import com.google.common.io.ByteStreams; @@ -35,8 +35,9 @@ * This class can be used to emulate a Graphite Carbon relay, for example. *

*/ + +@Log4j2 public class ServerSocketRule extends ExternalResource { - private static final Logger LOG = LoggerFactory.getLogger(ServerSocketRule.class); private final InetSocketAddress address; private final ByteArrayOutputStream output = new ByteArrayOutputStream(); @@ -59,7 +60,7 @@ public ServerSocketRule() { @Override protected void after() { - LOG.info("Socket closing, handled {} requests", requests); + log.info("Socket closing, handled {} requests", requests); try { serverSocketChannel.close(); } catch (IOException e) { diff --git a/waggle-dance-integration-tests/src/test/java/com/hotels/bdp/waggledance/WaggleDanceIntegrationTest.java b/waggle-dance-integration-tests/src/test/java/com/hotels/bdp/waggledance/WaggleDanceIntegrationTest.java index 3194598f3..d7ad2b5ae 100644 --- a/waggle-dance-integration-tests/src/test/java/com/hotels/bdp/waggledance/WaggleDanceIntegrationTest.java +++ b/waggle-dance-integration-tests/src/test/java/com/hotels/bdp/waggledance/WaggleDanceIntegrationTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2016-2021 Expedia, Inc. + * Copyright (C) 2016-2023 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,6 +22,8 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; +import static com.google.common.collect.Lists.newArrayList; + import static com.hotels.bdp.waggledance.TestUtils.createPartitionedTable; import static com.hotels.bdp.waggledance.TestUtils.createUnpartitionedTable; import static com.hotels.bdp.waggledance.TestUtils.newPartition; @@ -60,13 +62,12 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.thrift.TException; +import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.springframework.boot.web.client.RestTemplateBuilder; import org.springframework.web.client.RestTemplate; @@ -76,6 +77,7 @@ import feign.jaxrs.JAXRSContract; import fm.last.commons.test.file.ClassDataFolder; import fm.last.commons.test.file.DataFolder; +import lombok.extern.log4j.Log4j2; import com.google.common.collect.Lists; @@ -93,10 +95,9 @@ import com.hotels.beeju.ThriftHiveMetaStoreJUnitRule; import com.hotels.hcommon.hive.metastore.client.tunnelling.MetastoreTunnel; +@Log4j2 public class WaggleDanceIntegrationTest { - private static final Logger LOG = LoggerFactory.getLogger(WaggleDanceIntegrationTest.class); - private static final String LOCAL_DATABASE = "local_database"; private static final String LOCAL_TABLE = "local_table"; private static final String REMOTE_DATABASE = "remote_database"; @@ -125,10 +126,10 @@ public void init() throws Exception { remoteWarehouseUri = temporaryFolder.newFolder("remote-warehouse"); createLocalTable(new File(localWarehouseUri, LOCAL_DATABASE + "/" + LOCAL_TABLE), LOCAL_TABLE); - LOG.info(">>>> Table {} ", localServer.client().getTable(LOCAL_DATABASE, LOCAL_TABLE)); + log.info(">>>> Table {} ", localServer.client().getTable(LOCAL_DATABASE, LOCAL_TABLE)); createRemoteTable(new File(remoteWarehouseUri, REMOTE_DATABASE + "/" + REMOTE_TABLE), REMOTE_TABLE); - LOG.info(">>>> Table {} ", remoteServer.client().getTable(REMOTE_DATABASE, REMOTE_TABLE)); + log.info(">>>> Table {} ", remoteServer.client().getTable(REMOTE_DATABASE, REMOTE_TABLE)); executor = Executors.newSingleThreadExecutor(); } @@ -138,7 +139,9 @@ public void destroy() throws Exception { if (runner != null) { runner.stop(); } - executor.shutdownNow(); + if(executor != null) { + executor.shutdownNow(); + } } private void createLocalTable(File tableUri, String table) throws Exception { @@ -156,7 +159,7 @@ private void createRemoteTable(File tableUri, String table) throws Exception { File partitionAsia = new File(tableUri, "continent=Asia"); File partitionChina = new File(partitionAsia, "country=China"); - LOG + log .info(">>>> Partitions added: {}", client .add_partitions(Arrays @@ -272,6 +275,43 @@ public void usePrefix() throws Exception { assertTypicalRemoteTable(proxy, waggledRemoteDbName); } + @Test + public void manyFederatedMetastores() throws Exception { + runner = WaggleDanceRunner + .builder(configLocation) + .databaseResolution(DatabaseResolution.PREFIXED) + .primary("primary", localServer.getThriftConnectionUri(), READ_ONLY) + .federate(SECONDARY_METASTORE_NAME, remoteServer.getThriftConnectionUri()) + .federate("fed1", remoteServer.getThriftConnectionUri()) + .federate("fed2", remoteServer.getThriftConnectionUri()) + .federate("fed3", remoteServer.getThriftConnectionUri()) + .federate("fed4", remoteServer.getThriftConnectionUri()) + .federate("fed5", remoteServer.getThriftConnectionUri()) + .federate("fed6", remoteServer.getThriftConnectionUri()) + .federate("fed7", remoteServer.getThriftConnectionUri()) + .federate("fed8", remoteServer.getThriftConnectionUri()) + .federate("fed9", remoteServer.getThriftConnectionUri()) + .federate("fed10", remoteServer.getThriftConnectionUri()) + .federate("fed11", remoteServer.getThriftConnectionUri()) + .federate("fed12", remoteServer.getThriftConnectionUri()) + .federate("fed13", remoteServer.getThriftConnectionUri()) + .build(); + + runWaggleDance(runner); + HiveMetaStoreClient proxy = getWaggleDanceClient(); + + List dbs = proxy.getAllDatabases(); + List expected = newArrayList("default", "local_database", "waggle_remote_default", + "waggle_remote_remote_database", "fed1_default", "fed1_remote_database", "fed2_default", "fed2_remote_database", + "fed3_default", "fed3_remote_database", "fed4_default", "fed4_remote_database", "fed5_default", + "fed5_remote_database", "fed6_default", "fed6_remote_database", "fed7_default", "fed7_remote_database", + "fed8_default", "fed8_remote_database", "fed9_default", "fed9_remote_database", "fed10_default", + "fed10_remote_database", "fed11_default", "fed11_remote_database", "fed12_default", "fed12_remote_database", + "fed13_default", "fed13_remote_database"); + assertThat(dbs.size(), is(expected.size())); + assertThat(dbs, Matchers.containsInAnyOrder(expected.toArray())); + } + @Test public void usePrimaryPrefix() throws Exception { String primaryPrefix = "primary_"; @@ -352,17 +392,17 @@ public void typicalWithGraphite() throws Exception { Set metrics = new TreeSet<>(Arrays.asList(new String(graphite.getOutput()).split("\n"))); assertMetric(metrics, - "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_all_databases.all.calls.count 2"); + "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_databases.all.calls;metricattribute=count 2"); assertMetric(metrics, - "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_all_databases.all.success.count 2"); + "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_databases.all.success;metricattribute=count 2"); assertMetric(metrics, - "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.primary.calls.count 1"); + "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.primary.calls;metricattribute=count 1"); assertMetric(metrics, - "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.primary.success.count 1"); + "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.primary.success;metricattribute=count 1"); assertMetric(metrics, - "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.remote.calls.count 1"); + "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.remote.calls;metricattribute=count 1"); assertMetric(metrics, - "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.remote.success.count 1"); + "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.remote.success;metricattribute=count 1"); } private void assertMetric(Set metrics, String partialMetric) { diff --git a/waggle-dance-rest/pom.xml b/waggle-dance-rest/pom.xml index d541715e1..f6992f179 100644 --- a/waggle-dance-rest/pom.xml +++ b/waggle-dance-rest/pom.xml @@ -38,42 +38,10 @@ org.apache.hive hive-common - - - log4j - log4j - - - org.slf4j - slf4j-log4j12 - - - javax.servlet - servlet-api - - org.apache.hive - hive-metastore - - - log4j-web - org.apache.logging.log4j - - - javax.servlet - javax.servlet-api - - - log4j - log4j - - - org.apache.hbase - hbase-client - - + hive-standalone-metastore @@ -81,12 +49,6 @@ org.springframework.boot spring-boot-starter-test test - - - org.mockito - * - -
@@ -111,6 +73,11 @@ hamcrest test + + junit + junit + test +
diff --git a/waggle-dance-rpm/pom.xml b/waggle-dance-rpm/pom.xml index fe41db8ed..d2a4d88d1 100644 --- a/waggle-dance-rpm/pom.xml +++ b/waggle-dance-rpm/pom.xml @@ -117,7 +117,7 @@ - sonatype-oss-release + sonatype-oss-release-github-actions @@ -127,11 +127,6 @@ attach-rpm - - - ${gpg.passphrase} - -