From 287f90a2839ffafd7d5e785e0f56b255e5517015 Mon Sep 17 00:00:00 2001 From: Kirill Saied <38224153+PickBas@users.noreply.github.com> Date: Fri, 13 Oct 2023 14:52:39 +0200 Subject: [PATCH] Spark: Replace .size() > 0 with isEmpty() (#8814) --- .../iceberg/spark/FileScanTaskSetManager.java | 2 +- .../actions/RewriteDataFilesSparkAction.java | 2 +- .../iceberg/spark/source/SparkBatch.java | 2 +- .../iceberg/TestScanTaskSerialization.java | 4 ++-- .../spark/actions/TestCreateActions.java | 22 ++++++++--------- .../iceberg/spark/ScanTaskSetManager.java | 2 +- .../SparkBinPackPositionDeletesRewriter.java | 2 +- .../actions/SparkZOrderDataRewriter.java | 2 +- .../iceberg/spark/source/SparkBatch.java | 2 +- .../SparkPositionDeletesRewriteBuilder.java | 2 +- .../iceberg/TestScanTaskSerialization.java | 4 ++-- .../spark/actions/TestCreateActions.java | 22 ++++++++--------- .../TestRewritePositionDeleteFilesAction.java | 2 +- .../iceberg/spark/ScanTaskSetManager.java | 2 +- .../SparkBinPackPositionDeletesRewriter.java | 2 +- .../actions/SparkZOrderDataRewriter.java | 2 +- .../SparkPositionDeletesRewriteBuilder.java | 2 +- .../iceberg/TestScanTaskSerialization.java | 4 ++-- .../spark/actions/TestCreateActions.java | 22 ++++++++--------- .../TestRewritePositionDeleteFilesAction.java | 2 +- .../iceberg/spark/ScanTaskSetManager.java | 2 +- .../SparkBinPackPositionDeletesRewriter.java | 2 +- .../actions/SparkZOrderDataRewriter.java | 2 +- .../SparkPositionDeletesRewriteBuilder.java | 2 +- .../iceberg/TestScanTaskSerialization.java | 4 ++-- .../spark/actions/TestCreateActions.java | 24 +++++++++---------- .../TestRewritePositionDeleteFilesAction.java | 2 +- 27 files changed, 72 insertions(+), 72 deletions(-) diff --git a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/FileScanTaskSetManager.java b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/FileScanTaskSetManager.java index 4b6da39905c1..c9215a2d8dc7 100644 --- a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/FileScanTaskSetManager.java +++ b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/FileScanTaskSetManager.java @@ -44,7 +44,7 @@ public static FileScanTaskSetManager get() { public void stageTasks(Table table, String setID, List tasks) { Preconditions.checkArgument( - tasks != null && tasks.size() > 0, "Cannot stage null or empty tasks"); + tasks != null && !tasks.isEmpty(), "Cannot stage null or empty tasks"); Pair id = toID(table, setID); tasksMap.put(id, tasks); } diff --git a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java index 9ceb1766e950..0cac051d43b9 100644 --- a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java +++ b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java @@ -229,7 +229,7 @@ Map>> planFileGroups(long startingSnapshotId Iterable filtered = strategy.selectFilesToRewrite(tasks); Iterable> groupedTasks = strategy.planFileGroups(filtered); List> fileGroups = ImmutableList.copyOf(groupedTasks); - if (fileGroups.size() > 0) { + if (!fileGroups.isEmpty()) { fileGroupsByPartition.put(partition, fileGroups); } }); diff --git a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatch.java b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatch.java index bcfa70bcf234..d76ef7be930c 100644 --- a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatch.java +++ b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatch.java @@ -112,7 +112,7 @@ private boolean parquetOnly() { private boolean parquetBatchReadsEnabled() { return readConf.parquetVectorizationEnabled() && // vectorization enabled - expectedSchema.columns().size() > 0 + !expectedSchema.columns().isEmpty() && // at least one column is projected expectedSchema.columns().stream() .allMatch(c -> c.type().isPrimitiveType()); // only primitives diff --git a/spark/v3.2/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java b/spark/v3.2/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java index 5e5d657eab56..281eb2543aee 100644 --- a/spark/v3.2/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java +++ b/spark/v3.2/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java @@ -117,7 +117,7 @@ public void testBaseCombinedScanTaskJavaSerialization() throws Exception { public void testBaseScanTaskGroupKryoSerialization() throws Exception { BaseScanTaskGroup taskGroup = prepareBaseScanTaskGroupForSerDeTest(); - Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0); + Assert.assertFalse("Task group can't be empty", taskGroup.tasks().isEmpty()); File data = temp.newFile(); Assert.assertTrue(data.delete()); @@ -141,7 +141,7 @@ public void testBaseScanTaskGroupKryoSerialization() throws Exception { public void testBaseScanTaskGroupJavaSerialization() throws Exception { BaseScanTaskGroup taskGroup = prepareBaseScanTaskGroupForSerDeTest(); - Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0); + Assert.assertFalse("Task group can't be empty", taskGroup.tasks().isEmpty()); ByteArrayOutputStream bytes = new ByteArrayOutputStream(); try (ObjectOutputStream out = new ObjectOutputStream(bytes)) { diff --git a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java index 3dccd7952704..c96ed2909fe7 100644 --- a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java +++ b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java @@ -269,7 +269,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception { // reads should succeed without any exceptions List results1 = sql("select * from %s order by id", dest); - Assert.assertTrue(results1.size() > 0); + Assert.assertFalse(results1.isEmpty()); assertEquals("Output must match", results1, expected1); String newCol2 = "newCol2"; @@ -279,7 +279,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception { // reads should succeed without any exceptions List results2 = sql("select * from %s order by id", dest); - Assert.assertTrue(results2.size() > 0); + Assert.assertFalse(results2.isEmpty()); assertEquals("Output must match", results2, expected2); } @@ -313,7 +313,7 @@ public void testAddColumnOnMigratedTableAtMiddle() throws Exception { // reads should succeed List results = sql("select * from %s order by id", dest); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", results, expected); } @@ -351,7 +351,7 @@ public void removeColumnsAtEnd() throws Exception { // reads should succeed without any exceptions List results1 = sql("select * from %s order by id", dest); - Assert.assertTrue(results1.size() > 0); + Assert.assertFalse(results1.isEmpty()); assertEquals("Output must match", expected1, results1); sql("ALTER TABLE %s DROP COLUMN %s", dest, colName2); @@ -360,7 +360,7 @@ public void removeColumnsAtEnd() throws Exception { // reads should succeed without any exceptions List results2 = sql("select * from %s order by id", dest); - Assert.assertTrue(results2.size() > 0); + Assert.assertFalse(results2.isEmpty()); assertEquals("Output must match", expected2, results2); } @@ -392,7 +392,7 @@ public void removeColumnFromMiddle() throws Exception { // reads should return same output as that of non-iceberg table List results = sql("select * from %s order by id", dest); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -806,7 +806,7 @@ public boolean accept(File dir, String name) { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -828,7 +828,7 @@ private void threeLevelList(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -853,7 +853,7 @@ private void threeLevelListWithNestedStruct(boolean useLegacyMode) throws Except // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -880,7 +880,7 @@ private void threeLevelLists(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -904,7 +904,7 @@ private void structOfThreeLevelLists(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java index 84dab88fbad5..e8cd7decce51 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java @@ -45,7 +45,7 @@ public static ScanTaskSetManager get() { public void stageTasks(Table table, String setId, List tasks) { Preconditions.checkArgument( - tasks != null && tasks.size() > 0, "Cannot stage null or empty tasks"); + tasks != null && !tasks.isEmpty(), "Cannot stage null or empty tasks"); Pair id = toId(table, setId); tasksMap.put(id, tasks); } diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java index 1b36441c3c4d..5afd724aad88 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java @@ -91,7 +91,7 @@ public Set rewrite(List group) { protected void doRewrite(String groupId, List group) { // all position deletes are of the same partition, because they are in same file group - Preconditions.checkArgument(group.size() > 0, "Empty group"); + Preconditions.checkArgument(!group.isEmpty(), "Empty group"); Types.StructType partitionType = group.get(0).spec().partitionType(); StructLike partition = group.get(0).partition(); diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java index 68db76d37fcb..138e126f0ad2 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java @@ -176,7 +176,7 @@ private List validZOrderColNames( } Preconditions.checkArgument( - validZOrderColNames.size() > 0, + !validZOrderColNames.isEmpty(), "Cannot ZOrder, all columns provided were identity partition columns and cannot be used"); return validZOrderColNames; diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatch.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatch.java index 63aef25ba9b1..1fe86816d834 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatch.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatch.java @@ -119,7 +119,7 @@ public PartitionReaderFactory createReaderFactory() { // - all tasks are of FileScanTask type and read only Parquet files private boolean useParquetBatchReads() { return readConf.parquetVectorizationEnabled() - && expectedSchema.columns().size() > 0 + && !expectedSchema.columns().isEmpty() && expectedSchema.columns().stream().allMatch(c -> c.type().isPrimitiveType()) && taskGroups.stream().allMatch(this::supportsParquetBatchReads); } diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java index cc5c987fc4cd..3e6b72cdc39c 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java @@ -81,7 +81,7 @@ public Write build() { ScanTaskSetManager taskSetManager = ScanTaskSetManager.get(); List tasks = taskSetManager.fetchTasks(table, fileSetId); Preconditions.checkArgument( - tasks != null && tasks.size() > 0, "No scan tasks found for %s", fileSetId); + tasks != null && !tasks.isEmpty(), "No scan tasks found for %s", fileSetId); int specId = specId(fileSetId, tasks); StructLike partition = partition(fileSetId, tasks); diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java index 5e5d657eab56..14e9e7f605b5 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java @@ -117,7 +117,7 @@ public void testBaseCombinedScanTaskJavaSerialization() throws Exception { public void testBaseScanTaskGroupKryoSerialization() throws Exception { BaseScanTaskGroup taskGroup = prepareBaseScanTaskGroupForSerDeTest(); - Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0); + Assert.assertTrue("Task group can't be empty", !taskGroup.tasks().isEmpty()); File data = temp.newFile(); Assert.assertTrue(data.delete()); @@ -141,7 +141,7 @@ public void testBaseScanTaskGroupKryoSerialization() throws Exception { public void testBaseScanTaskGroupJavaSerialization() throws Exception { BaseScanTaskGroup taskGroup = prepareBaseScanTaskGroupForSerDeTest(); - Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0); + Assert.assertTrue("Task group can't be empty", !taskGroup.tasks().isEmpty()); ByteArrayOutputStream bytes = new ByteArrayOutputStream(); try (ObjectOutputStream out = new ObjectOutputStream(bytes)) { diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java index 3dccd7952704..c96ed2909fe7 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java @@ -269,7 +269,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception { // reads should succeed without any exceptions List results1 = sql("select * from %s order by id", dest); - Assert.assertTrue(results1.size() > 0); + Assert.assertFalse(results1.isEmpty()); assertEquals("Output must match", results1, expected1); String newCol2 = "newCol2"; @@ -279,7 +279,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception { // reads should succeed without any exceptions List results2 = sql("select * from %s order by id", dest); - Assert.assertTrue(results2.size() > 0); + Assert.assertFalse(results2.isEmpty()); assertEquals("Output must match", results2, expected2); } @@ -313,7 +313,7 @@ public void testAddColumnOnMigratedTableAtMiddle() throws Exception { // reads should succeed List results = sql("select * from %s order by id", dest); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", results, expected); } @@ -351,7 +351,7 @@ public void removeColumnsAtEnd() throws Exception { // reads should succeed without any exceptions List results1 = sql("select * from %s order by id", dest); - Assert.assertTrue(results1.size() > 0); + Assert.assertFalse(results1.isEmpty()); assertEquals("Output must match", expected1, results1); sql("ALTER TABLE %s DROP COLUMN %s", dest, colName2); @@ -360,7 +360,7 @@ public void removeColumnsAtEnd() throws Exception { // reads should succeed without any exceptions List results2 = sql("select * from %s order by id", dest); - Assert.assertTrue(results2.size() > 0); + Assert.assertFalse(results2.isEmpty()); assertEquals("Output must match", expected2, results2); } @@ -392,7 +392,7 @@ public void removeColumnFromMiddle() throws Exception { // reads should return same output as that of non-iceberg table List results = sql("select * from %s order by id", dest); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -806,7 +806,7 @@ public boolean accept(File dir, String name) { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -828,7 +828,7 @@ private void threeLevelList(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -853,7 +853,7 @@ private void threeLevelListWithNestedStruct(boolean useLegacyMode) throws Except // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -880,7 +880,7 @@ private void threeLevelLists(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -904,7 +904,7 @@ private void structOfThreeLevelLists(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java index db0c1cb27c3d..77800e2ea007 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java @@ -807,7 +807,7 @@ private void assertLocallySorted(List deleteFiles) { spark.read().format("iceberg").load("default." + TABLE_NAME + ".position_deletes"); deletes.filter(deletes.col("delete_file_path").equalTo(deleteFile.path().toString())); List rows = deletes.collectAsList(); - Assert.assertTrue("Empty delete file found", rows.size() > 0); + Assert.assertFalse("Empty delete file found", rows.isEmpty()); int lastPos = 0; String lastPath = ""; for (Row row : rows) { diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java index 84dab88fbad5..e8cd7decce51 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java @@ -45,7 +45,7 @@ public static ScanTaskSetManager get() { public void stageTasks(Table table, String setId, List tasks) { Preconditions.checkArgument( - tasks != null && tasks.size() > 0, "Cannot stage null or empty tasks"); + tasks != null && !tasks.isEmpty(), "Cannot stage null or empty tasks"); Pair id = toId(table, setId); tasksMap.put(id, tasks); } diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java index 1b36441c3c4d..5afd724aad88 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java @@ -91,7 +91,7 @@ public Set rewrite(List group) { protected void doRewrite(String groupId, List group) { // all position deletes are of the same partition, because they are in same file group - Preconditions.checkArgument(group.size() > 0, "Empty group"); + Preconditions.checkArgument(!group.isEmpty(), "Empty group"); Types.StructType partitionType = group.get(0).spec().partitionType(); StructLike partition = group.get(0).partition(); diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java index 91eaa91f6889..9a618661fe40 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java @@ -181,7 +181,7 @@ private List validZOrderColNames( } Preconditions.checkArgument( - validZOrderColNames.size() > 0, + !validZOrderColNames.isEmpty(), "Cannot ZOrder, all columns provided were identity partition columns and cannot be used"); return validZOrderColNames; diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java index 498ac1efc811..9fccc05ea25c 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java @@ -76,7 +76,7 @@ public Write build() { ScanTaskSetManager taskSetManager = ScanTaskSetManager.get(); List tasks = taskSetManager.fetchTasks(table, fileSetId); Preconditions.checkArgument( - tasks != null && tasks.size() > 0, "No scan tasks found for %s", fileSetId); + tasks != null && !tasks.isEmpty(), "No scan tasks found for %s", fileSetId); int specId = specId(fileSetId, tasks); StructLike partition = partition(fileSetId, tasks); diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java index 5e5d657eab56..281eb2543aee 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java @@ -117,7 +117,7 @@ public void testBaseCombinedScanTaskJavaSerialization() throws Exception { public void testBaseScanTaskGroupKryoSerialization() throws Exception { BaseScanTaskGroup taskGroup = prepareBaseScanTaskGroupForSerDeTest(); - Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0); + Assert.assertFalse("Task group can't be empty", taskGroup.tasks().isEmpty()); File data = temp.newFile(); Assert.assertTrue(data.delete()); @@ -141,7 +141,7 @@ public void testBaseScanTaskGroupKryoSerialization() throws Exception { public void testBaseScanTaskGroupJavaSerialization() throws Exception { BaseScanTaskGroup taskGroup = prepareBaseScanTaskGroupForSerDeTest(); - Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0); + Assert.assertFalse("Task group can't be empty", taskGroup.tasks().isEmpty()); ByteArrayOutputStream bytes = new ByteArrayOutputStream(); try (ObjectOutputStream out = new ObjectOutputStream(bytes)) { diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java index 3dccd7952704..c96ed2909fe7 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java @@ -269,7 +269,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception { // reads should succeed without any exceptions List results1 = sql("select * from %s order by id", dest); - Assert.assertTrue(results1.size() > 0); + Assert.assertFalse(results1.isEmpty()); assertEquals("Output must match", results1, expected1); String newCol2 = "newCol2"; @@ -279,7 +279,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception { // reads should succeed without any exceptions List results2 = sql("select * from %s order by id", dest); - Assert.assertTrue(results2.size() > 0); + Assert.assertFalse(results2.isEmpty()); assertEquals("Output must match", results2, expected2); } @@ -313,7 +313,7 @@ public void testAddColumnOnMigratedTableAtMiddle() throws Exception { // reads should succeed List results = sql("select * from %s order by id", dest); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", results, expected); } @@ -351,7 +351,7 @@ public void removeColumnsAtEnd() throws Exception { // reads should succeed without any exceptions List results1 = sql("select * from %s order by id", dest); - Assert.assertTrue(results1.size() > 0); + Assert.assertFalse(results1.isEmpty()); assertEquals("Output must match", expected1, results1); sql("ALTER TABLE %s DROP COLUMN %s", dest, colName2); @@ -360,7 +360,7 @@ public void removeColumnsAtEnd() throws Exception { // reads should succeed without any exceptions List results2 = sql("select * from %s order by id", dest); - Assert.assertTrue(results2.size() > 0); + Assert.assertFalse(results2.isEmpty()); assertEquals("Output must match", expected2, results2); } @@ -392,7 +392,7 @@ public void removeColumnFromMiddle() throws Exception { // reads should return same output as that of non-iceberg table List results = sql("select * from %s order by id", dest); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -806,7 +806,7 @@ public boolean accept(File dir, String name) { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -828,7 +828,7 @@ private void threeLevelList(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -853,7 +853,7 @@ private void threeLevelListWithNestedStruct(boolean useLegacyMode) throws Except // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -880,7 +880,7 @@ private void threeLevelLists(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -904,7 +904,7 @@ private void structOfThreeLevelLists(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java index 59c5d44bda77..9149bb7652dc 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java @@ -806,7 +806,7 @@ private void assertLocallySorted(List deleteFiles) { spark.read().format("iceberg").load("default." + TABLE_NAME + ".position_deletes"); deletes.filter(deletes.col("delete_file_path").equalTo(deleteFile.path().toString())); List rows = deletes.collectAsList(); - Assert.assertTrue("Empty delete file found", rows.size() > 0); + Assert.assertFalse("Empty delete file found", rows.isEmpty()); int lastPos = 0; String lastPath = ""; for (Row row : rows) { diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java index 84dab88fbad5..e8cd7decce51 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/ScanTaskSetManager.java @@ -45,7 +45,7 @@ public static ScanTaskSetManager get() { public void stageTasks(Table table, String setId, List tasks) { Preconditions.checkArgument( - tasks != null && tasks.size() > 0, "Cannot stage null or empty tasks"); + tasks != null && !tasks.isEmpty(), "Cannot stage null or empty tasks"); Pair id = toId(table, setId); tasksMap.put(id, tasks); } diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java index 1b36441c3c4d..5afd724aad88 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/SparkBinPackPositionDeletesRewriter.java @@ -91,7 +91,7 @@ public Set rewrite(List group) { protected void doRewrite(String groupId, List group) { // all position deletes are of the same partition, because they are in same file group - Preconditions.checkArgument(group.size() > 0, "Empty group"); + Preconditions.checkArgument(!group.isEmpty(), "Empty group"); Types.StructType partitionType = group.get(0).spec().partitionType(); StructLike partition = group.get(0).partition(); diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java index 91eaa91f6889..9a618661fe40 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/SparkZOrderDataRewriter.java @@ -181,7 +181,7 @@ private List validZOrderColNames( } Preconditions.checkArgument( - validZOrderColNames.size() > 0, + !validZOrderColNames.isEmpty(), "Cannot ZOrder, all columns provided were identity partition columns and cannot be used"); return validZOrderColNames; diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java index 498ac1efc811..9fccc05ea25c 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeletesRewriteBuilder.java @@ -76,7 +76,7 @@ public Write build() { ScanTaskSetManager taskSetManager = ScanTaskSetManager.get(); List tasks = taskSetManager.fetchTasks(table, fileSetId); Preconditions.checkArgument( - tasks != null && tasks.size() > 0, "No scan tasks found for %s", fileSetId); + tasks != null && !tasks.isEmpty(), "No scan tasks found for %s", fileSetId); int specId = specId(fileSetId, tasks); StructLike partition = partition(fileSetId, tasks); diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java index 5e5d657eab56..14e9e7f605b5 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/TestScanTaskSerialization.java @@ -117,7 +117,7 @@ public void testBaseCombinedScanTaskJavaSerialization() throws Exception { public void testBaseScanTaskGroupKryoSerialization() throws Exception { BaseScanTaskGroup taskGroup = prepareBaseScanTaskGroupForSerDeTest(); - Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0); + Assert.assertTrue("Task group can't be empty", !taskGroup.tasks().isEmpty()); File data = temp.newFile(); Assert.assertTrue(data.delete()); @@ -141,7 +141,7 @@ public void testBaseScanTaskGroupKryoSerialization() throws Exception { public void testBaseScanTaskGroupJavaSerialization() throws Exception { BaseScanTaskGroup taskGroup = prepareBaseScanTaskGroupForSerDeTest(); - Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0); + Assert.assertTrue("Task group can't be empty", !taskGroup.tasks().isEmpty()); ByteArrayOutputStream bytes = new ByteArrayOutputStream(); try (ObjectOutputStream out = new ObjectOutputStream(bytes)) { diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java index 1844385b27f0..1edf57e70d33 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java @@ -269,7 +269,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception { // reads should succeed without any exceptions List results1 = sql("select * from %s order by id", dest); - Assert.assertTrue(results1.size() > 0); + Assert.assertFalse(results1.isEmpty()); assertEquals("Output must match", results1, expected1); String newCol2 = "newCol2"; @@ -279,7 +279,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception { // reads should succeed without any exceptions List results2 = sql("select * from %s order by id", dest); - Assert.assertTrue(results2.size() > 0); + Assert.assertFalse(results2.isEmpty()); assertEquals("Output must match", results2, expected2); } @@ -313,7 +313,7 @@ public void testAddColumnOnMigratedTableAtMiddle() throws Exception { // reads should succeed List results = sql("select * from %s order by id", dest); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", results, expected); } @@ -351,7 +351,7 @@ public void removeColumnsAtEnd() throws Exception { // reads should succeed without any exceptions List results1 = sql("select * from %s order by id", dest); - Assert.assertTrue(results1.size() > 0); + Assert.assertFalse(results1.isEmpty()); assertEquals("Output must match", expected1, results1); sql("ALTER TABLE %s DROP COLUMN %s", dest, colName2); @@ -360,7 +360,7 @@ public void removeColumnsAtEnd() throws Exception { // reads should succeed without any exceptions List results2 = sql("select * from %s order by id", dest); - Assert.assertTrue(results2.size() > 0); + Assert.assertFalse(results2.isEmpty()); assertEquals("Output must match", expected2, results2); } @@ -392,7 +392,7 @@ public void removeColumnFromMiddle() throws Exception { // reads should return same output as that of non-iceberg table List results = sql("select * from %s order by id", dest); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -806,7 +806,7 @@ public boolean accept(File dir, String name) { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -828,7 +828,7 @@ private void threeLevelList(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -853,7 +853,7 @@ private void threeLevelListWithNestedStruct(boolean useLegacyMode) throws Except // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -880,7 +880,7 @@ private void threeLevelLists(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -904,7 +904,7 @@ private void structOfThreeLevelLists(boolean useLegacyMode) throws Exception { // check migrated table is returning expected result List results = sql("SELECT * FROM %s", tableName); - Assert.assertTrue(results.size() > 0); + Assert.assertFalse(results.isEmpty()); assertEquals("Output must match", expected, results); } @@ -982,7 +982,7 @@ private long expectedFilesCount(String source) throws NoSuchDatabaseException, NoSuchTableException, ParseException { CatalogTable sourceTable = loadSessionTable(source); List uris; - if (sourceTable.partitionColumnNames().size() == 0) { + if (sourceTable.partitionColumnNames().isEmpty()) { uris = Lists.newArrayList(); uris.add(sourceTable.location()); } else { diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java index 59c5d44bda77..9149bb7652dc 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewritePositionDeleteFilesAction.java @@ -806,7 +806,7 @@ private void assertLocallySorted(List deleteFiles) { spark.read().format("iceberg").load("default." + TABLE_NAME + ".position_deletes"); deletes.filter(deletes.col("delete_file_path").equalTo(deleteFile.path().toString())); List rows = deletes.collectAsList(); - Assert.assertTrue("Empty delete file found", rows.size() > 0); + Assert.assertFalse("Empty delete file found", rows.isEmpty()); int lastPos = 0; String lastPath = ""; for (Row row : rows) {