From d75bf62cb4f43e1d4b3efb226f7ff8c4580fe393 Mon Sep 17 00:00:00 2001 From: manuzhang Date: Thu, 19 Dec 2024 16:43:08 +0800 Subject: [PATCH] Spark: Don't skip tests in TestSelect for SparkSessionCatalog --- .../org/apache/iceberg/spark/sql/TestSelect.java | 13 ------------- .../org/apache/iceberg/spark/sql/TestSelect.java | 13 ------------- .../org/apache/iceberg/spark/sql/TestSelect.java | 13 ------------- 3 files changed, 39 deletions(-) diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java index f5857973996f..b59368010acb 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java @@ -38,7 +38,6 @@ import org.apache.spark.sql.Row; import org.junit.After; import org.junit.Assert; -import org.junit.Assume; import org.junit.Before; import org.junit.Test; @@ -135,10 +134,6 @@ public void testExpressionPushdown() { @Test public void testMetadataTables() { - Assume.assumeFalse( - "Spark session catalog does not support metadata tables", - "spark_catalog".equals(catalogName)); - assertEquals( "Snapshot metadata table", ImmutableList.of(row(ANY, ANY, null, "append", ANY, ANY)), @@ -147,10 +142,6 @@ public void testMetadataTables() { @Test public void testSnapshotInTableName() { - Assume.assumeFalse( - "Spark session catalog does not support extended table names", - "spark_catalog".equals(catalogName)); - // get the snapshot ID of the last write and get the current row set as expected long snapshotId = validationCatalog.loadTable(tableIdent).currentSnapshot().snapshotId(); List expected = sql("SELECT * FROM %s", tableName); @@ -176,10 +167,6 @@ public void testSnapshotInTableName() { @Test public void testTimestampInTableName() { - Assume.assumeFalse( - "Spark session catalog does not support extended table names", - "spark_catalog".equals(catalogName)); - // get a timestamp just after the last write and get the current row set as expected long snapshotTs = validationCatalog.loadTable(tableIdent).currentSnapshot().timestampMillis(); long timestamp = waitUntilAfter(snapshotTs + 2); diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java index a9b2f6395dff..376a89f56261 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java @@ -38,7 +38,6 @@ import org.apache.spark.sql.Row; import org.junit.After; import org.junit.Assert; -import org.junit.Assume; import org.junit.Before; import org.junit.Test; @@ -135,10 +134,6 @@ public void testExpressionPushdown() { @Test public void testMetadataTables() { - Assume.assumeFalse( - "Spark session catalog does not support metadata tables", - "spark_catalog".equals(catalogName)); - assertEquals( "Snapshot metadata table", ImmutableList.of(row(ANY, ANY, null, "append", ANY, ANY)), @@ -147,10 +142,6 @@ public void testMetadataTables() { @Test public void testSnapshotInTableName() { - Assume.assumeFalse( - "Spark session catalog does not support extended table names", - "spark_catalog".equals(catalogName)); - // get the snapshot ID of the last write and get the current row set as expected long snapshotId = validationCatalog.loadTable(tableIdent).currentSnapshot().snapshotId(); List expected = sql("SELECT * FROM %s", tableName); @@ -176,10 +167,6 @@ public void testSnapshotInTableName() { @Test public void testTimestampInTableName() { - Assume.assumeFalse( - "Spark session catalog does not support extended table names", - "spark_catalog".equals(catalogName)); - // get a timestamp just after the last write and get the current row set as expected long snapshotTs = validationCatalog.loadTable(tableIdent).currentSnapshot().timestampMillis(); long timestamp = waitUntilAfter(snapshotTs + 2); diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java index 3ecfc60b49b4..bead44c1f2c1 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestSelect.java @@ -20,7 +20,6 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assumptions.assumeThat; import java.text.SimpleDateFormat; import java.util.Arrays; @@ -158,10 +157,6 @@ public void testExpressionPushdown() { @TestTemplate public void testMetadataTables() { - assumeThat(catalogName) - .as("Spark session catalog does not support metadata tables") - .isNotEqualTo("spark_catalog"); - assertEquals( "Snapshot metadata table", ImmutableList.of(row(ANY, ANY, null, "append", ANY, ANY)), @@ -170,10 +165,6 @@ public void testMetadataTables() { @TestTemplate public void testSnapshotInTableName() { - assumeThat(catalogName) - .as("Spark session catalog does not support extended table names") - .isNotEqualTo("spark_catalog"); - // get the snapshot ID of the last write and get the current row set as expected long snapshotId = validationCatalog.loadTable(tableIdent).currentSnapshot().snapshotId(); List expected = sql("SELECT * FROM %s", tableName); @@ -199,10 +190,6 @@ public void testSnapshotInTableName() { @TestTemplate public void testTimestampInTableName() { - assumeThat(catalogName) - .as("Spark session catalog does not support extended table names") - .isNotEqualTo("spark_catalog"); - // get a timestamp just after the last write and get the current row set as expected long snapshotTs = validationCatalog.loadTable(tableIdent).currentSnapshot().timestampMillis(); long timestamp = waitUntilAfter(snapshotTs + 2);