diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/hms/TestTrinoHive4CatalogWithHiveMetastore.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/hms/TestTrinoHive4CatalogWithHiveMetastore.java new file mode 100644 index 000000000000..7dd0c423d7e2 --- /dev/null +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/hms/TestTrinoHive4CatalogWithHiveMetastore.java @@ -0,0 +1,55 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.iceberg.catalog.hms; + +import io.trino.plugin.base.util.AutoCloseableCloser; +import io.trino.plugin.hive.containers.Hive4MinioDataLake; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; + +import java.net.URI; + +public class TestTrinoHive4CatalogWithHiveMetastore + extends TestTrinoHiveCatalogWithHiveMetastore +{ + private final AutoCloseableCloser closer = AutoCloseableCloser.create(); + private Hive4MinioDataLake dataLake; + + @BeforeAll + public void setUp() + { + dataLake = closer.register(new Hive4MinioDataLake(bucketName)); + dataLake.start(); + } + + @AfterAll + public void tearDown() + throws Exception + { + dataLake = null; + closer.close(); + } + + @Override + protected URI hiveMetastoreEndpoint() + { + return dataLake.getHiveMetastore().getHiveMetastoreEndpoint(); + } + + @Override + protected String minioAddress() + { + return dataLake.getMinio().getMinioAddress(); + } +} diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/hms/TestTrinoHiveCatalogWithHiveMetastore.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/hms/TestTrinoHiveCatalogWithHiveMetastore.java index 587f63d2accf..537da441c198 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/hms/TestTrinoHiveCatalogWithHiveMetastore.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/hms/TestTrinoHiveCatalogWithHiveMetastore.java @@ -57,6 +57,7 @@ import org.junit.jupiter.api.parallel.Execution; import java.io.IOException; +import java.net.URI; import java.util.List; import java.util.Map; import java.util.Optional; @@ -86,17 +87,16 @@ public class TestTrinoHiveCatalogWithHiveMetastore extends BaseTrinoCatalogTest { private static final Logger LOG = Logger.get(TestTrinoHiveCatalogWithHiveMetastore.class); + protected static final String bucketName = "test-hive-catalog-with-hms-" + randomNameSuffix(); private AutoCloseableCloser closer = AutoCloseableCloser.create(); // Use MinIO for storage, since HDFS is hard to get working in a unit test private HiveMinioDataLake dataLake; private TrinoFileSystem fileSystem; - private String bucketName; @BeforeAll public void setUp() { - bucketName = "test-hive-catalog-with-hms-" + randomNameSuffix(); dataLake = closer.register(new HiveMinioDataLake(bucketName, HIVE3_IMAGE)); dataLake.start(); } @@ -117,7 +117,7 @@ protected TrinoCatalog createTrinoCatalog(boolean useUniqueTableLocations) new HdfsConfigurationInitializer( new HdfsConfig(), Set.of(new TrinoS3ConfigurationInitializer(new HiveS3Config() - .setS3Endpoint(dataLake.getMinio().getMinioAddress()) + .setS3Endpoint(minioAddress()) .setS3SslEnabled(false) .setS3AwsAccessKey(MINIO_ACCESS_KEY) .setS3AwsSecretKey(MINIO_SECRET_KEY) @@ -130,7 +130,7 @@ protected TrinoCatalog createTrinoCatalog(boolean useUniqueTableLocations) .thriftMetastoreConfig(new ThriftMetastoreConfig() // Read timed out sometimes happens with the default timeout .setReadTimeout(new Duration(1, MINUTES))) - .metastoreClient(dataLake.getHiveHadoop().getHiveMetastoreEndpoint()) + .metastoreClient(hiveMetastoreEndpoint()) .build(closer::register); CachingHiveMetastore metastore = createPerTransactionCache(new BridgingHiveMetastore(thriftMetastore), 1000); fileSystem = fileSystemFactory.create(SESSION); @@ -229,6 +229,16 @@ public void testCreateMaterializedView() } } + protected URI hiveMetastoreEndpoint() + { + return dataLake.getHiveHadoop().getHiveMetastoreEndpoint(); + } + + protected String minioAddress() + { + return dataLake.getMinio().getMinioAddress(); + } + @Override protected Map defaultNamespaceProperties(String namespaceName) {