diff --git a/CHANGELOG.md b/CHANGELOG.md index 339f7f5c4..32a0df4d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## dbt-spark 0.20.1 (June 22, 2021) + +### Features + +### Fixes +- dbt seed command fixed with expected behavior from dbt global project to [truncate table](https://spark.apache.org/docs/3.0.0-preview/sql-ref-syntax-ddl-truncate-table.html) in order remove all rows from the existing seed tables and replace values. As explained in [issue 112](https://github.com/fishtown-analytics/dbt-spark/issues/112), the current seed command in dbt-spark appends to existing seeded tables instead overwriting. + +### Contributors +- [@mv1742](https://github.com/mv1742) ([#181](https://github.com/mv1742/) + ## dbt-spark 0.20.0 (Release TBD) ### Fixes diff --git a/dbt/include/spark/macros/materializations/seed.sql b/dbt/include/spark/macros/materializations/seed.sql index 536e6447b..c407dcc10 100644 --- a/dbt/include/spark/macros/materializations/seed.sql +++ b/dbt/include/spark/macros/materializations/seed.sql @@ -1,7 +1,7 @@ {% macro spark__load_csv_rows(model, agate_table) %} {% set batch_size = 1000 %} {% set column_override = model['config'].get('column_types', {}) %} - + {% set statements = [] %} {% for chunk in agate_table.rows | batch(batch_size) %} @@ -37,7 +37,9 @@ {% macro spark__reset_csv_table(model, full_refresh, old_relation, agate_table) %} {% if old_relation %} - {{ adapter.drop_relation(old_relation) }} + {{ adapter.truncate_relation(old_relation) }} + {% set sql = "truncate table " ~ old_relation %} + {{ return(sql) }} {% endif %} {% set sql = create_csv_table(model, agate_table) %} {{ return(sql) }}