Skip to content

Commit

Permalink
Core: Remove deprecated operations method from BaseMetadataTable (apa…
Browse files Browse the repository at this point in the history
  • Loading branch information
ajantha-bhat authored and devangjhabakh committed Apr 22, 2024
1 parent b20fd1a commit 2a0d9f0
Show file tree
Hide file tree
Showing 12 changed files with 126 additions and 67 deletions.
71 changes: 71 additions & 0 deletions .palantir/revapi.yml
Original file line number Diff line number Diff line change
Expand Up @@ -874,6 +874,74 @@ acceptedBreaks:
justification: "Static utility class - should not have public constructor"
"1.4.0":
org.apache.iceberg:iceberg-core:
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.AllDataFilesTable"
new: "class org.apache.iceberg.AllDataFilesTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.AllDeleteFilesTable"
new: "class org.apache.iceberg.AllDeleteFilesTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.AllEntriesTable"
new: "class org.apache.iceberg.AllEntriesTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.AllFilesTable"
new: "class org.apache.iceberg.AllFilesTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.AllManifestsTable"
new: "class org.apache.iceberg.AllManifestsTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.BaseMetadataTable"
new: "class org.apache.iceberg.BaseMetadataTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.DataFilesTable"
new: "class org.apache.iceberg.DataFilesTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.DeleteFilesTable"
new: "class org.apache.iceberg.DeleteFilesTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.FilesTable"
new: "class org.apache.iceberg.FilesTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.HistoryTable"
new: "class org.apache.iceberg.HistoryTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.ManifestEntriesTable"
new: "class org.apache.iceberg.ManifestEntriesTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.ManifestsTable"
new: "class org.apache.iceberg.ManifestsTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.MetadataLogEntriesTable"
new: "class org.apache.iceberg.MetadataLogEntriesTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.PartitionsTable"
new: "class org.apache.iceberg.PartitionsTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.PositionDeletesTable"
new: "class org.apache.iceberg.PositionDeletesTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.RefsTable"
new: "class org.apache.iceberg.RefsTable"
justification: "Removing deprecated code"
- code: "java.class.noLongerImplementsInterface"
old: "class org.apache.iceberg.SnapshotsTable"
new: "class org.apache.iceberg.SnapshotsTable"
justification: "Removing deprecated code"
- code: "java.class.defaultSerializationChanged"
old: "class org.apache.iceberg.mapping.NameMapping"
new: "class org.apache.iceberg.mapping.NameMapping"
Expand All @@ -890,6 +958,9 @@ acceptedBreaks:
- code: "java.field.serialVersionUIDChanged"
new: "field org.apache.iceberg.util.SerializableMap<K, V>.serialVersionUID"
justification: "Serialization is not be used"
- code: "java.method.removed"
old: "method org.apache.iceberg.TableOperations org.apache.iceberg.BaseMetadataTable::operations()"
justification: "Removing deprecated code"
apache-iceberg-0.14.0:
org.apache.iceberg:iceberg-api:
- code: "java.class.defaultSerializationChanged"
Expand Down
12 changes: 2 additions & 10 deletions core/src/main/java/org/apache/iceberg/BaseMetadataTable.java
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,7 @@
* the metadata table using a {@link StaticTableOperations}. This way no Catalog related calls are
* needed when reading the table data after deserialization.
*/
public abstract class BaseMetadataTable extends BaseReadOnlyTable
implements HasTableOperations, Serializable {
public abstract class BaseMetadataTable extends BaseReadOnlyTable implements Serializable {
private final PartitionSpec spec = PartitionSpec.unpartitioned();
private final SortOrder sortOrder = SortOrder.unsorted();
private final BaseTable table;
Expand Down Expand Up @@ -101,17 +100,10 @@ static Map<Integer, PartitionSpec> transformSpecs(

abstract MetadataTableType metadataTableType();

protected BaseTable table() {
public BaseTable table() {
return table;
}

/** @deprecated will be removed in 1.4.0; do not use metadata table TableOperations */
@Override
@Deprecated
public TableOperations operations() {
return table.operations();
}

@Override
public String name() {
return name;
Expand Down
2 changes: 2 additions & 0 deletions core/src/main/java/org/apache/iceberg/SerializableTable.java
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@ private String metadataFileLocation(Table table) {
if (table instanceof HasTableOperations) {
TableOperations ops = ((HasTableOperations) table).operations();
return ops.current().metadataFileLocation();
} else if (table instanceof BaseMetadataTable) {
return ((BaseMetadataTable) table).table().operations().current().metadataFileLocation();
} else {
return null;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,7 @@
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.ContentFile;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.util.Pair;
Expand Down Expand Up @@ -72,18 +70,12 @@ public void clearRewrite(Table table, String fileSetId) {

public Set<String> fetchSetIds(Table table) {
return resultMap.keySet().stream()
.filter(e -> e.first().equals(tableUUID(table)))
.filter(e -> e.first().equals(Spark3Util.baseTableUUID(table)))
.map(Pair::second)
.collect(Collectors.toSet());
}

private Pair<String, String> toId(Table table, String setId) {
String tableUUID = tableUUID(table);
return Pair.of(tableUUID, setId);
}

private String tableUUID(Table table) {
TableOperations ops = ((HasTableOperations) table).operations();
return ops.current().uuid();
return Pair.of(Spark3Util.baseTableUUID(table), setId);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,8 @@
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.ScanTask;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.util.Pair;
Expand Down Expand Up @@ -64,17 +62,12 @@ public <T extends ScanTask> List<T> removeTasks(Table table, String setId) {

public Set<String> fetchSetIds(Table table) {
return tasksMap.keySet().stream()
.filter(e -> e.first().equals(tableUUID(table)))
.filter(e -> e.first().equals(Spark3Util.baseTableUUID(table)))
.map(Pair::second)
.collect(Collectors.toSet());
}

private String tableUUID(Table table) {
TableOperations ops = ((HasTableOperations) table).operations();
return ops.current().uuid();
}

private Pair<String, String> toId(Table table, String setId) {
return Pair.of(tableUUID(table), setId);
return Pair.of(Spark3Util.baseTableUUID(table), setId);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
import java.util.stream.Stream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.BaseMetadataTable;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.NullOrder;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.PartitionSpec;
Expand Down Expand Up @@ -945,6 +947,17 @@ public static org.apache.spark.sql.catalyst.TableIdentifier toV1TableIdentifier(
return org.apache.spark.sql.catalyst.TableIdentifier.apply(table, database);
}

static String baseTableUUID(org.apache.iceberg.Table table) {
if (table instanceof HasTableOperations) {
TableOperations ops = ((HasTableOperations) table).operations();
return ops.current().uuid();
} else if (table instanceof BaseMetadataTable) {
return ((BaseMetadataTable) table).table().operations().current().uuid();
} else {
throw new UnsupportedOperationException("Cannot retrieve UUID for table " + table.name());
}
}

private static class DescribeSortOrderVisitor implements SortOrderVisitor<String> {
private static final DescribeSortOrderVisitor INSTANCE = new DescribeSortOrderVisitor();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,7 @@
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.ContentFile;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.util.Pair;
Expand Down Expand Up @@ -72,18 +70,12 @@ public void clearRewrite(Table table, String fileSetId) {

public Set<String> fetchSetIds(Table table) {
return resultMap.keySet().stream()
.filter(e -> e.first().equals(tableUUID(table)))
.filter(e -> e.first().equals(Spark3Util.baseTableUUID(table)))
.map(Pair::second)
.collect(Collectors.toSet());
}

private Pair<String, String> toId(Table table, String setId) {
String tableUUID = tableUUID(table);
return Pair.of(tableUUID, setId);
}

private String tableUUID(Table table) {
TableOperations ops = ((HasTableOperations) table).operations();
return ops.current().uuid();
return Pair.of(Spark3Util.baseTableUUID(table), setId);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,8 @@
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.ScanTask;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.util.Pair;
Expand Down Expand Up @@ -64,17 +62,12 @@ public <T extends ScanTask> List<T> removeTasks(Table table, String setId) {

public Set<String> fetchSetIds(Table table) {
return tasksMap.keySet().stream()
.filter(e -> e.first().equals(tableUUID(table)))
.filter(e -> e.first().equals(Spark3Util.baseTableUUID(table)))
.map(Pair::second)
.collect(Collectors.toSet());
}

private String tableUUID(Table table) {
TableOperations ops = ((HasTableOperations) table).operations();
return ops.current().uuid();
}

private Pair<String, String> toId(Table table, String setId) {
return Pair.of(tableUUID(table), setId);
return Pair.of(Spark3Util.baseTableUUID(table), setId);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
import java.util.stream.Stream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.BaseMetadataTable;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.NullOrder;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.PartitionSpec;
Expand Down Expand Up @@ -948,6 +950,17 @@ public static org.apache.spark.sql.catalyst.TableIdentifier toV1TableIdentifier(
return org.apache.spark.sql.catalyst.TableIdentifier.apply(table, database);
}

static String baseTableUUID(org.apache.iceberg.Table table) {
if (table instanceof HasTableOperations) {
TableOperations ops = ((HasTableOperations) table).operations();
return ops.current().uuid();
} else if (table instanceof BaseMetadataTable) {
return ((BaseMetadataTable) table).table().operations().current().uuid();
} else {
throw new UnsupportedOperationException("Cannot retrieve UUID for table " + table.name());
}
}

private static class DescribeSortOrderVisitor implements SortOrderVisitor<String> {
private static final DescribeSortOrderVisitor INSTANCE = new DescribeSortOrderVisitor();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,7 @@
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.ContentFile;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.util.Pair;
Expand Down Expand Up @@ -72,18 +70,12 @@ public void clearRewrite(Table table, String fileSetId) {

public Set<String> fetchSetIds(Table table) {
return resultMap.keySet().stream()
.filter(e -> e.first().equals(tableUUID(table)))
.filter(e -> e.first().equals(Spark3Util.baseTableUUID(table)))
.map(Pair::second)
.collect(Collectors.toSet());
}

private Pair<String, String> toId(Table table, String setId) {
String tableUUID = tableUUID(table);
return Pair.of(tableUUID, setId);
}

private String tableUUID(Table table) {
TableOperations ops = ((HasTableOperations) table).operations();
return ops.current().uuid();
return Pair.of(Spark3Util.baseTableUUID(table), setId);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,8 @@
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.ScanTask;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.util.Pair;
Expand Down Expand Up @@ -64,17 +62,12 @@ public <T extends ScanTask> List<T> removeTasks(Table table, String setId) {

public Set<String> fetchSetIds(Table table) {
return tasksMap.keySet().stream()
.filter(e -> e.first().equals(tableUUID(table)))
.filter(e -> e.first().equals(Spark3Util.baseTableUUID(table)))
.map(Pair::second)
.collect(Collectors.toSet());
}

private String tableUUID(Table table) {
TableOperations ops = ((HasTableOperations) table).operations();
return ops.current().uuid();
}

private Pair<String, String> toId(Table table, String setId) {
return Pair.of(tableUUID(table), setId);
return Pair.of(Spark3Util.baseTableUUID(table), setId);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.BaseMetadataTable;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.NullOrder;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.PartitionSpec;
Expand Down Expand Up @@ -948,6 +950,17 @@ public static org.apache.spark.sql.catalyst.TableIdentifier toV1TableIdentifier(
return org.apache.spark.sql.catalyst.TableIdentifier.apply(table, database);
}

static String baseTableUUID(org.apache.iceberg.Table table) {
if (table instanceof HasTableOperations) {
TableOperations ops = ((HasTableOperations) table).operations();
return ops.current().uuid();
} else if (table instanceof BaseMetadataTable) {
return ((BaseMetadataTable) table).table().operations().current().uuid();
} else {
throw new UnsupportedOperationException("Cannot retrieve UUID for table " + table.name());
}
}

private static class DescribeSortOrderVisitor implements SortOrderVisitor<String> {
private static final DescribeSortOrderVisitor INSTANCE = new DescribeSortOrderVisitor();

Expand Down

0 comments on commit 2a0d9f0

Please sign in to comment.