diff --git a/tests/integration/iceberg/docker-compose/Dockerfile b/tests/integration/iceberg/docker-compose/Dockerfile index 4756104f0b..6894899499 100644 --- a/tests/integration/iceberg/docker-compose/Dockerfile +++ b/tests/integration/iceberg/docker-compose/Dockerfile @@ -38,9 +38,9 @@ WORKDIR ${SPARK_HOME} ENV SPARK_VERSION=3.4.2 ENV ICEBERG_SPARK_RUNTIME_VERSION=3.4_2.12 -ENV ICEBERG_VERSION=1.4.0 +ENV ICEBERG_VERSION=1.4.3 ENV AWS_SDK_VERSION=2.20.18 -ENV PYICEBERG_VERSION=0.4.0 +ENV PYICEBERG_VERSION=0.5.1 RUN curl --retry 3 -s -C - https://daft-public-data.s3.us-west-2.amazonaws.com/distribution/spark-${SPARK_VERSION}-bin-hadoop3.tgz -o spark-${SPARK_VERSION}-bin-hadoop3.tgz \ && tar xzf spark-${SPARK_VERSION}-bin-hadoop3.tgz --directory /opt/spark --strip-components 1 \ diff --git a/tests/integration/iceberg/docker-compose/provision.py b/tests/integration/iceberg/docker-compose/provision.py index e1a7a6c0de..fb9ee2e82c 100644 --- a/tests/integration/iceberg/docker-compose/provision.py +++ b/tests/integration/iceberg/docker-compose/provision.py @@ -322,3 +322,19 @@ ('123') """ ) + +spark.sql( + """ + CREATE OR REPLACE TABLE default.add_new_column + USING iceberg + AS SELECT + 1 AS idx + UNION ALL SELECT + 2 AS idx + UNION ALL SELECT + 3 AS idx +""" +) + +spark.sql("ALTER TABLE default.add_new_column ADD COLUMN name STRING") +spark.sql("INSERT INTO default.add_new_column VALUES (3, 'abc'), (4, 'def')") \ No newline at end of file diff --git a/tests/integration/iceberg/test_table_load.py b/tests/integration/iceberg/test_table_load.py index 81c8d7c8cb..3b03c76647 100644 --- a/tests/integration/iceberg/test_table_load.py +++ b/tests/integration/iceberg/test_table_load.py @@ -37,6 +37,7 @@ def test_daft_iceberg_table_open(local_iceberg_tables): # "test_table_sanitized_character", # Bug in scan().to_arrow().to_arrow() "test_table_version", # we have bugs when loading no files "test_uuid_and_fixed_unpartitioned", + "add_new_column" ]