Skip to content

Commit

Permalink
Spark 3.5: Remove constructor from parameterized base class (apache#9368
Browse files Browse the repository at this point in the history
)
  • Loading branch information
nastra authored and geruh committed Jan 25, 2024
1 parent e3cb8f2 commit d360e03
Show file tree
Hide file tree
Showing 9 changed files with 82 additions and 78 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
package org.apache.iceberg.spark;

import java.nio.file.Path;
import java.util.Map;
import org.apache.iceberg.ParameterizedTestExtension;
import org.apache.iceberg.Parameters;
import org.junit.jupiter.api.extension.ExtendWith;
Expand All @@ -30,7 +29,7 @@ public abstract class CatalogTestBase extends TestBaseWithCatalog {

// these parameters are broken out to avoid changes that need to modify lots of test suites
@Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}")
public static Object[][] parameters() {
protected static Object[][] parameters() {
return new Object[][] {
{
SparkCatalogConfig.HIVE.catalogName(),
Expand All @@ -51,12 +50,4 @@ public static Object[][] parameters() {
}

@TempDir protected Path temp;

public CatalogTestBase(SparkCatalogConfig config) {
super(config);
}

public CatalogTestBase(String catalogName, String implementation, Map<String, String> config) {
super(catalogName, implementation, config);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.CatalogProperties;
import org.apache.iceberg.Parameter;
import org.apache.iceberg.ParameterizedTestExtension;
import org.apache.iceberg.Parameters;
import org.apache.iceberg.PlanningMode;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.catalog.Catalog;
Expand All @@ -35,11 +38,25 @@
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;

@ExtendWith(ParameterizedTestExtension.class)
public abstract class TestBaseWithCatalog extends TestBase {
protected static File warehouse = null;

@Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}")
protected static Object[][] parameters() {
return new Object[][] {
{
SparkCatalogConfig.HADOOP.catalogName(),
SparkCatalogConfig.HADOOP.implementation(),
SparkCatalogConfig.HADOOP.properties()
},
};
}

@BeforeAll
public static void createWarehouse() throws IOException {
TestBaseWithCatalog.warehouse = File.createTempFile("warehouse", null);
Expand All @@ -59,36 +76,33 @@ public static void dropWarehouse() throws IOException {

@TempDir protected File temp;

protected final String catalogName;
protected final Map<String, String> catalogConfig;
protected final Catalog validationCatalog;
protected final SupportsNamespaces validationNamespaceCatalog;
protected final TableIdentifier tableIdent = TableIdentifier.of(Namespace.of("default"), "table");
protected final String tableName;
@Parameter(index = 0)
protected String catalogName;

public TestBaseWithCatalog() {
this(SparkCatalogConfig.HADOOP);
}
@Parameter(index = 1)
protected String implementation;

public TestBaseWithCatalog(SparkCatalogConfig config) {
this(config.catalogName(), config.implementation(), config.properties());
}
@Parameter(index = 2)
protected Map<String, String> catalogConfig;

protected Catalog validationCatalog;
protected SupportsNamespaces validationNamespaceCatalog;
protected TableIdentifier tableIdent = TableIdentifier.of(Namespace.of("default"), "table");
protected String tableName;

public TestBaseWithCatalog(
String catalogName, String implementation, Map<String, String> config) {
this.catalogName = catalogName;
this.catalogConfig = config;
@BeforeEach
public void before() {
this.validationCatalog =
catalogName.equals("testhadoop")
? new HadoopCatalog(spark.sessionState().newHadoopConf(), "file:" + warehouse)
: catalog;
this.validationNamespaceCatalog = (SupportsNamespaces) validationCatalog;

spark.conf().set("spark.sql.catalog." + catalogName, implementation);
config.forEach(
catalogConfig.forEach(
(key, value) -> spark.conf().set("spark.sql.catalog." + catalogName + "." + key, value));

if (config.get("type").equalsIgnoreCase("hadoop")) {
if (catalogConfig.get("type").equalsIgnoreCase("hadoop")) {
spark.conf().set("spark.sql.catalog." + catalogName + ".warehouse", "file:" + warehouse);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
import org.apache.spark.sql.internal.SQLConf;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestTemplate;

public class TestDataFrameWriterV2 extends TestBaseWithCatalog {
@BeforeEach
Expand All @@ -50,7 +50,7 @@ public void removeTables() {
sql("DROP TABLE IF EXISTS %s", tableName);
}

@Test
@TestTemplate
public void testMergeSchemaFailsWithoutWriterOption() throws Exception {
sql(
"ALTER TABLE %s SET TBLPROPERTIES ('%s'='true')",
Expand Down Expand Up @@ -82,7 +82,7 @@ public void testMergeSchemaFailsWithoutWriterOption() throws Exception {
.hasMessage("Field new_col not found in source schema");
}

@Test
@TestTemplate
public void testMergeSchemaWithoutAcceptAnySchema() throws Exception {
Dataset<Row> twoColDF =
jsonToDF(
Expand All @@ -109,7 +109,7 @@ public void testMergeSchemaWithoutAcceptAnySchema() throws Exception {
"Cannot write to `testhadoop`.`default`.`table`, the reason is too many data columns");
}

@Test
@TestTemplate
public void testMergeSchemaSparkProperty() throws Exception {
sql(
"ALTER TABLE %s SET TBLPROPERTIES ('%s'='true')",
Expand Down Expand Up @@ -143,7 +143,7 @@ public void testMergeSchemaSparkProperty() throws Exception {
sql("select * from %s order by id", tableName));
}

@Test
@TestTemplate
public void testMergeSchemaIcebergProperty() throws Exception {
sql(
"ALTER TABLE %s SET TBLPROPERTIES ('%s'='true')",
Expand Down Expand Up @@ -177,7 +177,7 @@ public void testMergeSchemaIcebergProperty() throws Exception {
sql("select * from %s order by id", tableName));
}

@Test
@TestTemplate
public void testWriteWithCaseSensitiveOption() throws NoSuchTableException, ParseException {
SparkSession sparkSession = spark.cloneSession();
sparkSession
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestTemplate;
import org.junit.jupiter.api.io.TempDir;

public class TestDataSourceOptions extends TestBaseWithCatalog {
Expand All @@ -84,7 +84,7 @@ public static void stopSpark() {
currentSpark.stop();
}

@Test
@TestTemplate
public void testWriteFormatOptionOverridesTableProperties() throws IOException {
String tableLocation = temp.resolve("iceberg-table").toFile().toString();

Expand Down Expand Up @@ -114,7 +114,7 @@ public void testWriteFormatOptionOverridesTableProperties() throws IOException {
}
}

@Test
@TestTemplate
public void testNoWriteFormatOption() throws IOException {
String tableLocation = temp.resolve("iceberg-table").toFile().toString();

Expand All @@ -139,7 +139,7 @@ public void testNoWriteFormatOption() throws IOException {
}
}

@Test
@TestTemplate
public void testHadoopOptions() throws IOException {
String tableLocation = temp.resolve("iceberg-table").toFile().toString();
Configuration sparkHadoopConf = spark.sessionState().newHadoopConf();
Expand Down Expand Up @@ -181,7 +181,7 @@ public void testHadoopOptions() throws IOException {
}
}

@Test
@TestTemplate
public void testSplitOptionsOverridesTableProperties() throws IOException {
String tableLocation = temp.resolve("iceberg-table").toFile().toString();

Expand Down Expand Up @@ -224,7 +224,7 @@ public void testSplitOptionsOverridesTableProperties() throws IOException {
.isEqualTo(2);
}

@Test
@TestTemplate
public void testIncrementalScanOptions() throws IOException {
String tableLocation = temp.resolve("iceberg-table").toFile().toString();

Expand Down Expand Up @@ -315,7 +315,7 @@ public void testIncrementalScanOptions() throws IOException {
assertThat(resultDf.count()).as("Unprocessed count should match record count").isEqualTo(1);
}

@Test
@TestTemplate
public void testMetadataSplitSizeOptionOverrideTableProperties() throws IOException {
String tableLocation = temp.resolve("iceberg-table").toFile().toString();

Expand Down Expand Up @@ -355,7 +355,7 @@ public void testMetadataSplitSizeOptionOverrideTableProperties() throws IOExcept
assertThat(entriesDf.javaRDD().getNumPartitions()).as("Num partitions must match").isEqualTo(1);
}

@Test
@TestTemplate
public void testDefaultMetadataSplitSize() throws IOException {
String tableLocation = temp.resolve("iceberg-table").toFile().toString();

Expand Down Expand Up @@ -389,7 +389,7 @@ public void testDefaultMetadataSplitSize() throws IOException {
assertThat(partitionNum).as("Spark partitions should match").isEqualTo(expectedSplits);
}

@Test
@TestTemplate
public void testExtraSnapshotMetadata() throws IOException {
String tableLocation = temp.resolve("iceberg-table").toFile().toString();
HadoopTables tables = new HadoopTables(CONF);
Expand All @@ -414,7 +414,7 @@ public void testExtraSnapshotMetadata() throws IOException {
.containsEntry("another-key", "anotherValue");
}

@Test
@TestTemplate
public void testExtraSnapshotMetadataWithSQL() throws InterruptedException, IOException {
String tableLocation = temp.resolve("iceberg-table").toFile().toString();
HadoopTables tables = new HadoopTables(CONF);
Expand Down Expand Up @@ -459,7 +459,7 @@ public void testExtraSnapshotMetadataWithSQL() throws InterruptedException, IOEx
.containsEntry("another-key", "anotherValue");
}

@Test
@TestTemplate
public void testExtraSnapshotMetadataWithDelete()
throws InterruptedException, NoSuchTableException {
spark.sessionState().conf().setConfString("spark.sql.shuffle.partitions", "1");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import java.util.Map;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.Files;
import org.apache.iceberg.Parameters;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
Expand All @@ -48,7 +49,7 @@
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestTemplate;
import org.junit.jupiter.api.io.TempDir;

public class TestMetadataTableReadableMetrics extends TestBaseWithCatalog {
Expand Down Expand Up @@ -78,9 +79,16 @@ public class TestMetadataTableReadableMetrics extends TestBaseWithCatalog {
optional(8, "fixedCol", Types.FixedType.ofLength(3)),
optional(9, "binaryCol", Types.BinaryType.get()));

public TestMetadataTableReadableMetrics() {
// only SparkCatalog supports metadata table sql queries
super(SparkCatalogConfig.HIVE);
@Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}")
protected static Object[][] parameters() {
return new Object[][] {
{
// only SparkCatalog supports metadata table sql queries
SparkCatalogConfig.HIVE.catalogName(),
SparkCatalogConfig.HIVE.implementation(),
SparkCatalogConfig.HIVE.properties()
},
};
}

protected String tableName() {
Expand Down Expand Up @@ -190,7 +198,7 @@ private GenericRecord createNestedRecord(Long longCol, Double doubleCol) {
return record;
}

@Test
@TestTemplate
public void testPrimitiveColumns() throws Exception {
Table table = createPrimitiveTable();
DataFile dataFile = table.currentSnapshot().addedDataFiles(table.io()).iterator().next();
Expand Down Expand Up @@ -289,7 +297,7 @@ public void testPrimitiveColumns() throws Exception {
assertEquals("Row should match for entries table", expected, entriesReadableMetrics);
}

@Test
@TestTemplate
public void testSelectPrimitiveValues() throws Exception {
createPrimitiveTable();

Expand Down Expand Up @@ -328,7 +336,7 @@ public void testSelectPrimitiveValues() throws Exception {
sql("SELECT readable_metrics.longCol.value_count, status FROM %s.entries", tableName));
}

@Test
@TestTemplate
public void testSelectNestedValues() throws Exception {
createNestedTable();

Expand All @@ -349,7 +357,7 @@ public void testSelectNestedValues() throws Exception {
entriesReadableMetrics);
}

@Test
@TestTemplate
public void testNestedValues() throws Exception {
Pair<Table, DataFile> table = createNestedTable();
int longColId =
Expand Down
Loading

0 comments on commit d360e03

Please sign in to comment.