Skip to content

Commit

Permalink
Spark: Replace .size() > 0 with isEmpty() (#8814)
Browse files Browse the repository at this point in the history
  • Loading branch information
PickBas authored Oct 13, 2023
1 parent 2268bd8 commit 287f90a
Show file tree
Hide file tree
Showing 27 changed files with 72 additions and 72 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ public static FileScanTaskSetManager get() {

public void stageTasks(Table table, String setID, List<FileScanTask> tasks) {
Preconditions.checkArgument(
tasks != null && tasks.size() > 0, "Cannot stage null or empty tasks");
tasks != null && !tasks.isEmpty(), "Cannot stage null or empty tasks");
Pair<String, String> id = toID(table, setID);
tasksMap.put(id, tasks);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ Map<StructLike, List<List<FileScanTask>>> planFileGroups(long startingSnapshotId
Iterable<FileScanTask> filtered = strategy.selectFilesToRewrite(tasks);
Iterable<List<FileScanTask>> groupedTasks = strategy.planFileGroups(filtered);
List<List<FileScanTask>> fileGroups = ImmutableList.copyOf(groupedTasks);
if (fileGroups.size() > 0) {
if (!fileGroups.isEmpty()) {
fileGroupsByPartition.put(partition, fileGroups);
}
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ private boolean parquetOnly() {
private boolean parquetBatchReadsEnabled() {
return readConf.parquetVectorizationEnabled()
&& // vectorization enabled
expectedSchema.columns().size() > 0
!expectedSchema.columns().isEmpty()
&& // at least one column is projected
expectedSchema.columns().stream()
.allMatch(c -> c.type().isPrimitiveType()); // only primitives
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ public void testBaseCombinedScanTaskJavaSerialization() throws Exception {
public void testBaseScanTaskGroupKryoSerialization() throws Exception {
BaseScanTaskGroup<FileScanTask> taskGroup = prepareBaseScanTaskGroupForSerDeTest();

Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0);
Assert.assertFalse("Task group can't be empty", taskGroup.tasks().isEmpty());

File data = temp.newFile();
Assert.assertTrue(data.delete());
Expand All @@ -141,7 +141,7 @@ public void testBaseScanTaskGroupKryoSerialization() throws Exception {
public void testBaseScanTaskGroupJavaSerialization() throws Exception {
BaseScanTaskGroup<FileScanTask> taskGroup = prepareBaseScanTaskGroupForSerDeTest();

Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0);
Assert.assertFalse("Task group can't be empty", taskGroup.tasks().isEmpty());

ByteArrayOutputStream bytes = new ByteArrayOutputStream();
try (ObjectOutputStream out = new ObjectOutputStream(bytes)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception {

// reads should succeed without any exceptions
List<Object[]> results1 = sql("select * from %s order by id", dest);
Assert.assertTrue(results1.size() > 0);
Assert.assertFalse(results1.isEmpty());
assertEquals("Output must match", results1, expected1);

String newCol2 = "newCol2";
Expand All @@ -279,7 +279,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception {

// reads should succeed without any exceptions
List<Object[]> results2 = sql("select * from %s order by id", dest);
Assert.assertTrue(results2.size() > 0);
Assert.assertFalse(results2.isEmpty());
assertEquals("Output must match", results2, expected2);
}

Expand Down Expand Up @@ -313,7 +313,7 @@ public void testAddColumnOnMigratedTableAtMiddle() throws Exception {

// reads should succeed
List<Object[]> results = sql("select * from %s order by id", dest);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", results, expected);
}

Expand Down Expand Up @@ -351,7 +351,7 @@ public void removeColumnsAtEnd() throws Exception {

// reads should succeed without any exceptions
List<Object[]> results1 = sql("select * from %s order by id", dest);
Assert.assertTrue(results1.size() > 0);
Assert.assertFalse(results1.isEmpty());
assertEquals("Output must match", expected1, results1);

sql("ALTER TABLE %s DROP COLUMN %s", dest, colName2);
Expand All @@ -360,7 +360,7 @@ public void removeColumnsAtEnd() throws Exception {

// reads should succeed without any exceptions
List<Object[]> results2 = sql("select * from %s order by id", dest);
Assert.assertTrue(results2.size() > 0);
Assert.assertFalse(results2.isEmpty());
assertEquals("Output must match", expected2, results2);
}

Expand Down Expand Up @@ -392,7 +392,7 @@ public void removeColumnFromMiddle() throws Exception {

// reads should return same output as that of non-iceberg table
List<Object[]> results = sql("select * from %s order by id", dest);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand Down Expand Up @@ -806,7 +806,7 @@ public boolean accept(File dir, String name) {

// check migrated table is returning expected result
List<Object[]> results = sql("SELECT * FROM %s", tableName);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand All @@ -828,7 +828,7 @@ private void threeLevelList(boolean useLegacyMode) throws Exception {

// check migrated table is returning expected result
List<Object[]> results = sql("SELECT * FROM %s", tableName);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand All @@ -853,7 +853,7 @@ private void threeLevelListWithNestedStruct(boolean useLegacyMode) throws Except

// check migrated table is returning expected result
List<Object[]> results = sql("SELECT * FROM %s", tableName);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand All @@ -880,7 +880,7 @@ private void threeLevelLists(boolean useLegacyMode) throws Exception {

// check migrated table is returning expected result
List<Object[]> results = sql("SELECT * FROM %s", tableName);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand All @@ -904,7 +904,7 @@ private void structOfThreeLevelLists(boolean useLegacyMode) throws Exception {

// check migrated table is returning expected result
List<Object[]> results = sql("SELECT * FROM %s", tableName);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ public static ScanTaskSetManager get() {

public <T extends ScanTask> void stageTasks(Table table, String setId, List<T> tasks) {
Preconditions.checkArgument(
tasks != null && tasks.size() > 0, "Cannot stage null or empty tasks");
tasks != null && !tasks.isEmpty(), "Cannot stage null or empty tasks");
Pair<String, String> id = toId(table, setId);
tasksMap.put(id, tasks);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ public Set<DeleteFile> rewrite(List<PositionDeletesScanTask> group) {

protected void doRewrite(String groupId, List<PositionDeletesScanTask> group) {
// all position deletes are of the same partition, because they are in same file group
Preconditions.checkArgument(group.size() > 0, "Empty group");
Preconditions.checkArgument(!group.isEmpty(), "Empty group");
Types.StructType partitionType = group.get(0).spec().partitionType();
StructLike partition = group.get(0).partition();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ private List<String> validZOrderColNames(
}

Preconditions.checkArgument(
validZOrderColNames.size() > 0,
!validZOrderColNames.isEmpty(),
"Cannot ZOrder, all columns provided were identity partition columns and cannot be used");

return validZOrderColNames;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ public PartitionReaderFactory createReaderFactory() {
// - all tasks are of FileScanTask type and read only Parquet files
private boolean useParquetBatchReads() {
return readConf.parquetVectorizationEnabled()
&& expectedSchema.columns().size() > 0
&& !expectedSchema.columns().isEmpty()
&& expectedSchema.columns().stream().allMatch(c -> c.type().isPrimitiveType())
&& taskGroups.stream().allMatch(this::supportsParquetBatchReads);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ public Write build() {
ScanTaskSetManager taskSetManager = ScanTaskSetManager.get();
List<PositionDeletesScanTask> tasks = taskSetManager.fetchTasks(table, fileSetId);
Preconditions.checkArgument(
tasks != null && tasks.size() > 0, "No scan tasks found for %s", fileSetId);
tasks != null && !tasks.isEmpty(), "No scan tasks found for %s", fileSetId);

int specId = specId(fileSetId, tasks);
StructLike partition = partition(fileSetId, tasks);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ public void testBaseCombinedScanTaskJavaSerialization() throws Exception {
public void testBaseScanTaskGroupKryoSerialization() throws Exception {
BaseScanTaskGroup<FileScanTask> taskGroup = prepareBaseScanTaskGroupForSerDeTest();

Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0);
Assert.assertTrue("Task group can't be empty", !taskGroup.tasks().isEmpty());

File data = temp.newFile();
Assert.assertTrue(data.delete());
Expand All @@ -141,7 +141,7 @@ public void testBaseScanTaskGroupKryoSerialization() throws Exception {
public void testBaseScanTaskGroupJavaSerialization() throws Exception {
BaseScanTaskGroup<FileScanTask> taskGroup = prepareBaseScanTaskGroupForSerDeTest();

Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0);
Assert.assertTrue("Task group can't be empty", !taskGroup.tasks().isEmpty());

ByteArrayOutputStream bytes = new ByteArrayOutputStream();
try (ObjectOutputStream out = new ObjectOutputStream(bytes)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception {

// reads should succeed without any exceptions
List<Object[]> results1 = sql("select * from %s order by id", dest);
Assert.assertTrue(results1.size() > 0);
Assert.assertFalse(results1.isEmpty());
assertEquals("Output must match", results1, expected1);

String newCol2 = "newCol2";
Expand All @@ -279,7 +279,7 @@ public void testAddColumnOnMigratedTableAtEnd() throws Exception {

// reads should succeed without any exceptions
List<Object[]> results2 = sql("select * from %s order by id", dest);
Assert.assertTrue(results2.size() > 0);
Assert.assertFalse(results2.isEmpty());
assertEquals("Output must match", results2, expected2);
}

Expand Down Expand Up @@ -313,7 +313,7 @@ public void testAddColumnOnMigratedTableAtMiddle() throws Exception {

// reads should succeed
List<Object[]> results = sql("select * from %s order by id", dest);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", results, expected);
}

Expand Down Expand Up @@ -351,7 +351,7 @@ public void removeColumnsAtEnd() throws Exception {

// reads should succeed without any exceptions
List<Object[]> results1 = sql("select * from %s order by id", dest);
Assert.assertTrue(results1.size() > 0);
Assert.assertFalse(results1.isEmpty());
assertEquals("Output must match", expected1, results1);

sql("ALTER TABLE %s DROP COLUMN %s", dest, colName2);
Expand All @@ -360,7 +360,7 @@ public void removeColumnsAtEnd() throws Exception {

// reads should succeed without any exceptions
List<Object[]> results2 = sql("select * from %s order by id", dest);
Assert.assertTrue(results2.size() > 0);
Assert.assertFalse(results2.isEmpty());
assertEquals("Output must match", expected2, results2);
}

Expand Down Expand Up @@ -392,7 +392,7 @@ public void removeColumnFromMiddle() throws Exception {

// reads should return same output as that of non-iceberg table
List<Object[]> results = sql("select * from %s order by id", dest);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand Down Expand Up @@ -806,7 +806,7 @@ public boolean accept(File dir, String name) {

// check migrated table is returning expected result
List<Object[]> results = sql("SELECT * FROM %s", tableName);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand All @@ -828,7 +828,7 @@ private void threeLevelList(boolean useLegacyMode) throws Exception {

// check migrated table is returning expected result
List<Object[]> results = sql("SELECT * FROM %s", tableName);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand All @@ -853,7 +853,7 @@ private void threeLevelListWithNestedStruct(boolean useLegacyMode) throws Except

// check migrated table is returning expected result
List<Object[]> results = sql("SELECT * FROM %s", tableName);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand All @@ -880,7 +880,7 @@ private void threeLevelLists(boolean useLegacyMode) throws Exception {

// check migrated table is returning expected result
List<Object[]> results = sql("SELECT * FROM %s", tableName);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand All @@ -904,7 +904,7 @@ private void structOfThreeLevelLists(boolean useLegacyMode) throws Exception {

// check migrated table is returning expected result
List<Object[]> results = sql("SELECT * FROM %s", tableName);
Assert.assertTrue(results.size() > 0);
Assert.assertFalse(results.isEmpty());
assertEquals("Output must match", expected, results);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -807,7 +807,7 @@ private void assertLocallySorted(List<DeleteFile> deleteFiles) {
spark.read().format("iceberg").load("default." + TABLE_NAME + ".position_deletes");
deletes.filter(deletes.col("delete_file_path").equalTo(deleteFile.path().toString()));
List<Row> rows = deletes.collectAsList();
Assert.assertTrue("Empty delete file found", rows.size() > 0);
Assert.assertFalse("Empty delete file found", rows.isEmpty());
int lastPos = 0;
String lastPath = "";
for (Row row : rows) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ public static ScanTaskSetManager get() {

public <T extends ScanTask> void stageTasks(Table table, String setId, List<T> tasks) {
Preconditions.checkArgument(
tasks != null && tasks.size() > 0, "Cannot stage null or empty tasks");
tasks != null && !tasks.isEmpty(), "Cannot stage null or empty tasks");
Pair<String, String> id = toId(table, setId);
tasksMap.put(id, tasks);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ public Set<DeleteFile> rewrite(List<PositionDeletesScanTask> group) {

protected void doRewrite(String groupId, List<PositionDeletesScanTask> group) {
// all position deletes are of the same partition, because they are in same file group
Preconditions.checkArgument(group.size() > 0, "Empty group");
Preconditions.checkArgument(!group.isEmpty(), "Empty group");
Types.StructType partitionType = group.get(0).spec().partitionType();
StructLike partition = group.get(0).partition();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ private List<String> validZOrderColNames(
}

Preconditions.checkArgument(
validZOrderColNames.size() > 0,
!validZOrderColNames.isEmpty(),
"Cannot ZOrder, all columns provided were identity partition columns and cannot be used");

return validZOrderColNames;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ public Write build() {
ScanTaskSetManager taskSetManager = ScanTaskSetManager.get();
List<PositionDeletesScanTask> tasks = taskSetManager.fetchTasks(table, fileSetId);
Preconditions.checkArgument(
tasks != null && tasks.size() > 0, "No scan tasks found for %s", fileSetId);
tasks != null && !tasks.isEmpty(), "No scan tasks found for %s", fileSetId);

int specId = specId(fileSetId, tasks);
StructLike partition = partition(fileSetId, tasks);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ public void testBaseCombinedScanTaskJavaSerialization() throws Exception {
public void testBaseScanTaskGroupKryoSerialization() throws Exception {
BaseScanTaskGroup<FileScanTask> taskGroup = prepareBaseScanTaskGroupForSerDeTest();

Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0);
Assert.assertFalse("Task group can't be empty", taskGroup.tasks().isEmpty());

File data = temp.newFile();
Assert.assertTrue(data.delete());
Expand All @@ -141,7 +141,7 @@ public void testBaseScanTaskGroupKryoSerialization() throws Exception {
public void testBaseScanTaskGroupJavaSerialization() throws Exception {
BaseScanTaskGroup<FileScanTask> taskGroup = prepareBaseScanTaskGroupForSerDeTest();

Assert.assertTrue("Task group can't be empty", taskGroup.tasks().size() > 0);
Assert.assertFalse("Task group can't be empty", taskGroup.tasks().isEmpty());

ByteArrayOutputStream bytes = new ByteArrayOutputStream();
try (ObjectOutputStream out = new ObjectOutputStream(bytes)) {
Expand Down
Loading

0 comments on commit 287f90a

Please sign in to comment.