From f0318723cca52c4ad37abf76c53d175c0e9fb23d Mon Sep 17 00:00:00 2001 From: nk1506 Date: Mon, 8 Jan 2024 11:26:41 +0530 Subject: [PATCH] Hive: Unwrap RuntimeException for Hive InvalidOperationException with rename table. --- .../org/apache/iceberg/hive/HiveCatalog.java | 12 +++-- .../apache/iceberg/hive/MetastoreUtil.java | 18 +++++-- .../apache/iceberg/hive/TestHiveCatalog.java | 53 ------------------- 3 files changed, 24 insertions(+), 59 deletions(-) diff --git a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java index 516483e49a0c..da2f47cad6fd 100644 --- a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java +++ b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java @@ -251,9 +251,15 @@ public void renameTable(TableIdentifier from, TableIdentifier originalTo) { } catch (NoSuchObjectException e) { throw new NoSuchTableException("Table does not exist: %s", from); - } catch (AlreadyExistsException e) { - throw new org.apache.iceberg.exceptions.AlreadyExistsException( - "Table already exists: %s", to); + } catch (RuntimeException e) { + if (e.getCause() instanceof InvalidOperationException + && e.getMessage() != null + && e.getMessage().contains(String.format("new table %s already exists", to))) { + throw new org.apache.iceberg.exceptions.AlreadyExistsException( + "Table already exists: %s", to); + } else { + throw e; + } } catch (TException e) { throw new RuntimeException("Failed to rename " + from + " to " + to, e); diff --git a/hive-metastore/src/main/java/org/apache/iceberg/hive/MetastoreUtil.java b/hive-metastore/src/main/java/org/apache/iceberg/hive/MetastoreUtil.java index 83ff2b60d078..8bd5477382ad 100644 --- a/hive-metastore/src/main/java/org/apache/iceberg/hive/MetastoreUtil.java +++ b/hive-metastore/src/main/java/org/apache/iceberg/hive/MetastoreUtil.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.iceberg.common.DynMethods; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; @@ -54,7 +55,8 @@ private MetastoreUtil() {} * context will be set in a way that turns off stats updates to avoid recursive file listing. */ public static void alterTable( - IMetaStoreClient client, String databaseName, String tblName, Table table) { + IMetaStoreClient client, String databaseName, String tblName, Table table) + throws MetaException { alterTable(client, databaseName, tblName, table, ImmutableMap.of()); } @@ -67,11 +69,21 @@ public static void alterTable( String databaseName, String tblName, Table table, - Map extraEnv) { + Map extraEnv) + throws MetaException { Map env = Maps.newHashMapWithExpectedSize(extraEnv.size() + 1); env.putAll(extraEnv); env.put(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); - ALTER_TABLE.invoke(client, databaseName, tblName, table, new EnvironmentContext(env)); + try { + ALTER_TABLE.invoke(client, databaseName, tblName, table, new EnvironmentContext(env)); + } catch (RuntimeException e) { + // MetaException would be wrapped into RuntimeException during reflection + if (e.getCause() instanceof MetaException) { + throw (MetaException) e.getCause(); + } else { + throw e; + } + } } } diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java index 904e74939eb2..369ad46c8e49 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java @@ -52,7 +52,6 @@ import org.apache.iceberg.DataFile; import org.apache.iceberg.DataFiles; import org.apache.iceberg.FileFormat; -import org.apache.iceberg.HasTableOperations; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.PartitionSpecParser; import org.apache.iceberg.Schema; @@ -82,7 +81,6 @@ import org.apache.iceberg.types.Types; import org.apache.iceberg.util.JsonUtil; import org.apache.thrift.TException; -import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -1181,55 +1179,4 @@ public void testDatabaseLocationWithSlashInWarehouseDir() { assertThat(database.getLocationUri()).isEqualTo("s3://bucket/database.db"); } - - // TODO: This test should be removed after fix of https://github.com/apache/iceberg/issues/9289. - @Test - @Override - public void testRenameTableDestinationTableAlreadyExists() { - Namespace ns = Namespace.of("newdb"); - TableIdentifier renamedTable = TableIdentifier.of(ns, "table_renamed"); - - if (requiresNamespaceCreate()) { - catalog.createNamespace(ns); - } - - Assertions.assertThat(catalog.tableExists(TABLE)) - .as("Source table should not exist before create") - .isFalse(); - - catalog.buildTable(TABLE, SCHEMA).create(); - Assertions.assertThat(catalog.tableExists(TABLE)) - .as("Source table should exist after create") - .isTrue(); - - Assertions.assertThat(catalog.tableExists(renamedTable)) - .as("Destination table should not exist before create") - .isFalse(); - - catalog.buildTable(renamedTable, SCHEMA).create(); - Assertions.assertThat(catalog.tableExists(renamedTable)) - .as("Destination table should exist after create") - .isTrue(); - - // With fix of issues#9289,it should match with CatalogTests and expect - // AlreadyExistsException.class - // and message should contain as "Table already exists" - Assertions.assertThatThrownBy(() -> catalog.renameTable(TABLE, renamedTable)) - .isInstanceOf(RuntimeException.class) - .hasMessageContaining("new table newdb.table_renamed already exists"); - Assertions.assertThat(catalog.tableExists(TABLE)) - .as("Source table should still exist after failed rename") - .isTrue(); - Assertions.assertThat(catalog.tableExists(renamedTable)) - .as("Destination table should still exist after failed rename") - .isTrue(); - - String sourceTableUUID = - ((HasTableOperations) catalog.loadTable(TABLE)).operations().current().uuid(); - String destinationTableUUID = - ((HasTableOperations) catalog.loadTable(renamedTable)).operations().current().uuid(); - Assertions.assertThat(sourceTableUUID) - .as("Source and destination table should remain distinct after failed rename") - .isNotEqualTo(destinationTableUUID); - } }