From 7b492cedb499dd246ea1aa621c0d6f4b2fd663ab Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Mon, 25 Sep 2023 18:27:59 +0100 Subject: [PATCH 01/27] Add test for cte quoting --- .../tests/unit/model_b_references_a.sql | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/integration-tests/tests/unit/model_b_references_a.sql b/integration-tests/tests/unit/model_b_references_a.sql index 8de515b..b1ab7f8 100644 --- a/integration-tests/tests/unit/model_b_references_a.sql +++ b/integration-tests/tests/unit/model_b_references_a.sql @@ -14,3 +14,15 @@ select 1 as a, 'b' as b {% endcall %} {% endcall %} + +UNION ALL + +{% call test_condition_on_model_query('model_b_references_a', "should ", {}, + assert_should_contain, dbt_unit_testing.quote_identifier("model_a")) %} + {% call dbt_unit_testing.mock_ref ('model_a') %} + select 1 as id + {% endcall %} + {% call dbt_unit_testing.expect() %} + select 1 as id + {% endcall %} +{% endcall %} From 94a246af03006e546a2db88cd0b773729d0d2ab4 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Mon, 25 Sep 2023 18:28:48 +0100 Subject: [PATCH 02/27] Fix including missing columns on columns with reserved names --- .../tests/unit/reserved_column_names_mocking.sql | 13 ++++++++++++- macros/mock_builders.sql | 2 +- macros/utils.sql | 2 +- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/integration-tests/tests/unit/reserved_column_names_mocking.sql b/integration-tests/tests/unit/reserved_column_names_mocking.sql index 31ee8b6..720b90f 100644 --- a/integration-tests/tests/unit/reserved_column_names_mocking.sql +++ b/integration-tests/tests/unit/reserved_column_names_mocking.sql @@ -5,11 +5,22 @@ }} {% call dbt_unit_testing.test('model_references_model_with_reserved_column_name', 'sample test') %} - {% call dbt_unit_testing.mock_ref ('model_with_reserved_column_name') %} + {% call dbt_unit_testing.mock_ref ('model_with_reserved_column_name', options={"include_missing_columns": true}) %} select 1 as a, 'b' as b, 1 as "END" {% endcall %} {% call dbt_unit_testing.expect() %} select 1 as a, 'b' as b, 1 as "END" {% endcall %} {% endcall %} + + UNION ALL + + {% call dbt_unit_testing.test('model_references_model_with_reserved_column_name', 'sample test') %} + {% call dbt_unit_testing.mock_ref ('model_with_reserved_column_name', options={"include_missing_columns": true}) %} + select 1 as a, 'b' as b + {% endcall %} + {% call dbt_unit_testing.expect() %} + select 1 as a, 'b' as b + {% endcall %} +{% endcall %} \ No newline at end of file diff --git a/macros/mock_builders.sql b/macros/mock_builders.sql index 883752d..63b6232 100644 --- a/macros/mock_builders.sql +++ b/macros/mock_builders.sql @@ -83,7 +83,7 @@ {% set input_values_sql %} {% set node_sql = dbt_unit_testing.build_node_sql(model_node, options.use_database_models) %} select * from ({{ input_values_sql }}) as m1 - left join (select {{ missing_columns | join (",")}} + left join (select {{ dbt_unit_testing.quote_and_join_columns(missing_columns)}} from ({{ node_sql }}) as m2) as m3 on false {%- endset -%} {%- endif -%} diff --git a/macros/utils.sql b/macros/utils.sql index 744cf05..b16a83c 100644 --- a/macros/utils.sql +++ b/macros/utils.sql @@ -19,7 +19,7 @@ {% endmacro %} {% macro extract_columns_difference(cl1, cl2) %} - {% set columns = cl1 | map('lower') | list | reject('in', cl2 | map('lower') | list) | list %} + {% set columns = cl1 | reject('in', cl2) | list %} {{ return(columns) }} {% endmacro %} From 88a3c7b1994c045e28dc15c58b46e70715834319 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Mon, 25 Sep 2023 18:59:00 +0100 Subject: [PATCH 03/27] Code cleanup --- integration-tests/tests/unit/model_b_references_a.sql | 2 +- macros/tests.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/integration-tests/tests/unit/model_b_references_a.sql b/integration-tests/tests/unit/model_b_references_a.sql index b1ab7f8..963b3e5 100644 --- a/integration-tests/tests/unit/model_b_references_a.sql +++ b/integration-tests/tests/unit/model_b_references_a.sql @@ -17,7 +17,7 @@ UNION ALL -{% call test_condition_on_model_query('model_b_references_a', "should ", {}, +{% call test_condition_on_model_query('model_b_references_a', "should quote CTE", {}, assert_should_contain, dbt_unit_testing.quote_identifier("model_a")) %} {% call dbt_unit_testing.mock_ref ('model_a') %} select 1 as id diff --git a/macros/tests.sql b/macros/tests.sql index ac56896..557118b 100644 --- a/macros/tests.sql +++ b/macros/tests.sql @@ -20,7 +20,7 @@ {% macro build_configuration_and_test_queries(model_node, test_description, options, mocks_and_expectations_json_str) %} {{ dbt_unit_testing.set_test_context("model_being_tested", dbt_unit_testing.ref_cte_name(model_node)) }} {% set test_configuration = { - "model_name": model_node.model_name, + "model_name": model_node.name, "description": test_description, "model_node": model_node, "options": dbt_unit_testing.merge_configs([options])} From 58e6161c784231c88e3ce06a4df19b6d0c46ac37 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Sat, 14 Oct 2023 09:33:21 +0100 Subject: [PATCH 04/27] Update README --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index 1ac288d..4e27fb1 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,20 @@ The first line is boilerplate we can't avoid: We leverage the command dbt test to run the unit tests; then, we need a way to isolate the unit tests. The rest of the lines are the test itself, the mocks (test setup) and expectations. +## Running tests + +The framework leverages the dbt test command to run the tests. You can run all the tests in your project with the following command: + +```bash +dbt test +``` + +If you want to run just the unit tests, you can use the following command: + +```bash +dbt test --select tag:unit-test +``` + ## Available Macros | macro name | description | From c7fe019424202c57ef07cd0e4c5b28fe9147d164 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Fri, 20 Oct 2023 08:59:14 +0100 Subject: [PATCH 05/27] Fix README --- README.md | 1 + integration-tests/models/model_ephemeral.sql | 3 +++ .../models/model_references_ephemeral.sql | 1 + .../tests/unit/model_references_ephemeral.sql | 16 ++++++++++++++++ macros/utils.sql | 1 + 5 files changed, 22 insertions(+) create mode 100644 integration-tests/models/model_ephemeral.sql create mode 100644 integration-tests/models/model_references_ephemeral.sql create mode 100644 integration-tests/tests/unit/model_references_ephemeral.sql diff --git a/README.md b/README.md index 4e27fb1..42402df 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ You can test models independently by mocking their dependencies (models, sources - [Main Features](#main-features) - [Documentation](#documentation) - [Anatomy of a test](#anatomy-of-a-test) + - [Running Tests](#running-tests) - [Available Macros](#available-macros) - [Test Examples](#test-examples) - [Different ways to build mock values](#different-ways-to-build-mock-values) diff --git a/integration-tests/models/model_ephemeral.sql b/integration-tests/models/model_ephemeral.sql new file mode 100644 index 0000000..ee2dbfd --- /dev/null +++ b/integration-tests/models/model_ephemeral.sql @@ -0,0 +1,3 @@ +{{ config (materialized = 'incremental' ) }} + +select 1 as a, 'b' as b \ No newline at end of file diff --git a/integration-tests/models/model_references_ephemeral.sql b/integration-tests/models/model_references_ephemeral.sql new file mode 100644 index 0000000..94f3f79 --- /dev/null +++ b/integration-tests/models/model_references_ephemeral.sql @@ -0,0 +1 @@ +select * from {{ dbt_unit_testing.ref('model_ephemeral') }} where a >= 1 diff --git a/integration-tests/tests/unit/model_references_ephemeral.sql b/integration-tests/tests/unit/model_references_ephemeral.sql new file mode 100644 index 0000000..88d92a0 --- /dev/null +++ b/integration-tests/tests/unit/model_references_ephemeral.sql @@ -0,0 +1,16 @@ +{{ + config( + tags=['unit-test', 'bigquery', 'snowflake', 'postgres'] + ) +}} + +{% call dbt_unit_testing.test('model_ephemeral', 'sample test') %} + {% call dbt_unit_testing.mock_ref ('model_a') %} + select 0 as a, 'a' as b + UNION ALL + select 1 as a, 'b' as b + {% endcall %} + {% call dbt_unit_testing.expect() %} + select 1 as a, 'b' as b + {% endcall %} +{% endcall %} \ No newline at end of file diff --git a/macros/utils.sql b/macros/utils.sql index b16a83c..9629273 100644 --- a/macros/utils.sql +++ b/macros/utils.sql @@ -19,6 +19,7 @@ {% endmacro %} {% macro extract_columns_difference(cl1, cl2) %} + !! WRONG !! {% set columns = cl1 | reject('in', cl2) | list %} {{ return(columns) }} {% endmacro %} From 81e5e9ad5d7b82185ca51a9f792432f5ceae6a4d Mon Sep 17 00:00:00 2001 From: Amarnath Goud <78682995+Amar-AIcloud@users.noreply.github.com> Date: Sat, 28 Oct 2023 17:56:49 +0000 Subject: [PATCH 06/27] removed the extra endcall in readme --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 42402df..02ce739 100644 --- a/README.md +++ b/README.md @@ -292,7 +292,6 @@ With the above configuration, you could write your tests like this: {% endcall %} {% endcall %} -{% endcall %} ``` ## Mocking From 49f92aae96fa0fe4961b96a586ff8dbb992ff9d2 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Wed, 22 Nov 2023 19:01:45 +0000 Subject: [PATCH 07/27] Add CTE prefix --- integration-tests/tests/unit/model_b_references_a.sql | 2 +- macros/sql_builders.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/integration-tests/tests/unit/model_b_references_a.sql b/integration-tests/tests/unit/model_b_references_a.sql index 963b3e5..6eba84e 100644 --- a/integration-tests/tests/unit/model_b_references_a.sql +++ b/integration-tests/tests/unit/model_b_references_a.sql @@ -18,7 +18,7 @@ UNION ALL {% call test_condition_on_model_query('model_b_references_a', "should quote CTE", {}, - assert_should_contain, dbt_unit_testing.quote_identifier("model_a")) %} + assert_should_contain, dbt_unit_testing.quote_identifier("DBT_CTE__model_a")) %} {% call dbt_unit_testing.mock_ref ('model_a') %} select 1 as id {% endcall %} diff --git a/macros/sql_builders.sql b/macros/sql_builders.sql index 90f261d..56321ab 100644 --- a/macros/sql_builders.sql +++ b/macros/sql_builders.sql @@ -38,7 +38,7 @@ {% macro ref_cte_name(node) %} {% set node = dbt_unit_testing.model_node(node) %} - {% set parts = [node.name] %} + {% set parts = ["DBT_CTE", node.name] %} {% if node.package_name != model.package_name %} {% set parts = [node.package_name] + parts %} {% endif %} From 68b4b30fe596d698f34003f7ecdd6f69a83fde88 Mon Sep 17 00:00:00 2001 From: Nicolas Guary Date: Wed, 6 Dec 2023 18:07:51 +0100 Subject: [PATCH 08/27] doc: update readme with multiple tests example --- README.md | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/README.md b/README.md index 02ce739..58a146d 100644 --- a/README.md +++ b/README.md @@ -110,6 +110,46 @@ The first line is boilerplate we can't avoid: We leverage the command dbt test to run the unit tests; then, we need a way to isolate the unit tests. The rest of the lines are the test itself, the mocks (test setup) and expectations. +### Creating multiple tests in the same file + +When creating multiple tests in the same test file, you need to make sure they are all separated by an `UNION ALL` statement: + +``` +{{ config(tags=['unit-test']) }} + +{% call dbt_unit_testing.test ('[Model to Test]','[Test Name]') %} + {% call dbt_unit_testing.mock_ref ('[model name]') %} + select ... + {% endcall %} + + {% call dbt_unit_testing.mock_source('[source name]') %} + select ... + {% endcall %} + + {% call dbt_unit_testing.expect() %} + select ... + {% endcall %} + +{% endcall %} + +UNION ALL + +{% call dbt_unit_testing.test ('[Model to Test]','[Another Test]') %} + {% call dbt_unit_testing.mock_ref ('[model name]') %} + select ... + {% endcall %} + + {% call dbt_unit_testing.mock_source('[source name]') %} + select ... + {% endcall %} + + {% call dbt_unit_testing.expect() %} + select ... + {% endcall %} + +{% endcall %} +``` + ## Running tests The framework leverages the dbt test command to run the tests. You can run all the tests in your project with the following command: From c8b3594e24ee461566fc37a0deb0a77da021a291 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Fri, 5 Jan 2024 12:26:50 +0000 Subject: [PATCH 09/27] Run tests only on test command --- macros/tests.sql | 2 +- run-test-on-dbt-version.sh | 36 ------------------------------------ run-tests-on-dbt-version.sh | 6 ++++-- 3 files changed, 5 insertions(+), 39 deletions(-) delete mode 100755 run-test-on-dbt-version.sh diff --git a/macros/tests.sql b/macros/tests.sql index 557118b..67b1a65 100644 --- a/macros/tests.sql +++ b/macros/tests.sql @@ -1,7 +1,7 @@ {% macro test(model_name, test_description='(no description)', options={}) %} {{ dbt_unit_testing.ref_tested_model(model_name) }} - {% if execute %} + {% if execute and flags.WHICH == 'test' %} {% set mocks_and_expectations_json_str = caller() %} {% set model_version = kwargs["version"] | default(kwargs["v"]) | default(none) %} {% set model_node = {"package_name": model.package_name, "name": model_name, "version": model_version} %} diff --git a/run-test-on-dbt-version.sh b/run-test-on-dbt-version.sh deleted file mode 100755 index bd57a0a..0000000 --- a/run-test-on-dbt-version.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -set -e - -if [ -z "$1" ]; then - echo 'Please provide test script to run' - exit 1 -fi - -if [ -z "$2" ]; then - echo 'Please provide profile name' - exit 1 -fi - -if [ -z "$3" ]; then - echo 'Please provide dbt version' - exit 1 -fi - -TEST_SCRIPT="$1" -PROFILE="$2" -DBT_VERSION=$3 - -SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) -VENV_NAME="venv-$PROFILE" -VENV_FOLDER="$SCRIPT_DIR/$VENV_NAME" - -rm -rf "$VENV_FOLDER" -python3 -m venv "$VENV_FOLDER" - -pip install --upgrade pip setuptools -pip install "dbt-$PROFILE==$DBT_VERSION" - -source "$VENV_FOLDER/bin/activate" - -"$SCRIPT_DIR/$TEST_SCRIPT.sh" "$PROFILE" diff --git a/run-tests-on-dbt-version.sh b/run-tests-on-dbt-version.sh index cc828d0..41b77da 100755 --- a/run-tests-on-dbt-version.sh +++ b/run-tests-on-dbt-version.sh @@ -21,6 +21,8 @@ TEST_SCRIPT="$1" PROFILE="$2" DBT_VERSION=$3 +echo "Running $TEST_SCRIPT on dbt $DBT_VERSION with profile $PROFILE" + SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) VENV_NAME="venv-$PROFILE" VENV_FOLDER="$SCRIPT_DIR/$VENV_NAME" @@ -28,9 +30,9 @@ VENV_FOLDER="$SCRIPT_DIR/$VENV_NAME" rm -rf "$VENV_FOLDER" python3 -m venv "$VENV_FOLDER" +source "$VENV_FOLDER/bin/activate" + pip install --upgrade pip setuptools pip install "dbt-$PROFILE==$DBT_VERSION" -source "$VENV_FOLDER/bin/activate" - "$SCRIPT_DIR/$TEST_SCRIPT.sh" "$PROFILE" "$DBT_VERSION" From feef4735f5dcada036a38ef273facde46ae122ad Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Mon, 8 Jan 2024 13:19:34 +0000 Subject: [PATCH 10/27] README updates --- README.md | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 58a146d..88a44ee 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ You can test models independently by mocking their dependencies (models, sources - [Different ways to build mock values](#different-ways-to-build-mock-values) - [Mocking](#mocking) - [Database dependencies in detail](#database-dependencies-in-detail) - - [Requirement](#requirement) + - [Important Requirement](#important-requirement) - [Incremental Models](#incremental-models) - [Available Options](#available-options) - [Test Feedback](#test-feedback) @@ -96,7 +96,7 @@ The test is composed of a test setup (mocks) and expectations: {% endcall %} {% call dbt_unit_testing.expect() %} - select ... + select ... {% endcall %} {% endcall %} @@ -114,7 +114,7 @@ We leverage the command dbt test to run the unit tests; then, we need a way to i When creating multiple tests in the same test file, you need to make sure they are all separated by an `UNION ALL` statement: -``` +```Jinja {{ config(tags=['unit-test']) }} {% call dbt_unit_testing.test ('[Model to Test]','[Test Name]') %} @@ -169,8 +169,8 @@ dbt test --select tag:unit-test | macro name | description | |------------------------------|-------------------------------------------------| | dbt_unit_testing.test | Defines a Test | -| dbt_unit_testing.mock-ref | Mocks a **model** / **snapshot** / **seed** | -| dbt_unit_testing.mock-source | Mocks a **source** | +| dbt_unit_testing.mock_ref | Mocks a **model** / **snapshot** / **seed** | +| dbt_unit_testing.mock_source | Mocks a **source** | | dbt_unit_testing.expect | Defines Test expectations | ## Test Examples @@ -315,13 +315,13 @@ With the above configuration, you could write your tests like this: {% endcall %} {% call dbt_unit_testing.mock_ref ('stg_orders', {"input_format": "csv"}) %} - order_id | customer_id | order_date + order_id | customer_id | order_date::date 1 | 1 | null 2 | 1 | null {% endcall %} {% call dbt_unit_testing.mock_ref ('stg_payments', {"input_format": "csv"}) %} - order_id | amount + order_id | amount::int 1 | 10 2 | 10 {% endcall %} @@ -334,6 +334,10 @@ With the above configuration, you could write your tests like this: ``` +Notice that you can specify the type of a column by adding the type name after the column name, separated by "::". + +The name of the type is the same as the name of the type in the database (e.g. `int`, `float`, `date`, `timestamp`, etc). + ## Mocking Mocks can be completely independent of the dev/test environment if you set up all the required dependencies (it's explained here [How](#how). @@ -396,9 +400,9 @@ This SQL can be a pretty complex query; sometimes, it's non-performant or even a You can use the option **'use-database-models'** to avoid the recursive inspection and use the model defined in the database. Be aware that this makes a new dependency on the underlying model definition, and it needs to be updated each time you run a test. -### Requirement +### Important Requirement -To be able to mock the models and sources in tests, in your dbt models you **must** use the macros **dbt_unit_testing.ref** and **dbt_unit_testing.source**, for example: +To be able to mock the models and sources in tests, in your dbt models you can use the macros **dbt_unit_testing.ref** and **dbt_unit_testing.source**, for example: ```sql @@ -406,7 +410,7 @@ To be able to mock the models and sources in tests, in your dbt models you **mus ``` -Alternatively, if you prefer to keep using the standard `ref` macro in the models, you can add these macros to your project: +Alternatively, if you prefer to keep using the standard `ref` and `source` macros in the models, you can override them by adding these lines to your project: ```jinja {% macro ref() %} @@ -551,6 +555,8 @@ To test the `is_incremental` section of your model, you must include the option Note that in this case, we are also mocking the model being tested (`incremental_model`) to ensure the incremental logic functions correctly. It is necessary to mock the model itself when writing a test for the `is_incremental` part of the model. +*Note: As previously mentioned, these type of tests are meant to test the `is_incremental` part of the model. Testing different increments strategies (such as `merge`, `delete+insert` or `insert_overwrite`) is not supported.* + ## Available Options | option | description | default | scope* | From 225f31c953d7b645309b3cb8b5a24fe881d7b3f0 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Mon, 8 Jan 2024 20:04:22 +0000 Subject: [PATCH 11/27] Fix bug when using include_missing_columns with incremental models --- integration-tests/package-lock.yml | 6 ++++++ integration-tests/tests/unit/incremental_model_1.sql | 2 +- macros/overrides.sql | 4 ++-- macros/tests.sql | 3 ++- run-tests-helper.sh | 2 +- 5 files changed, 12 insertions(+), 5 deletions(-) create mode 100644 integration-tests/package-lock.yml diff --git a/integration-tests/package-lock.yml b/integration-tests/package-lock.yml new file mode 100644 index 0000000..0edbf90 --- /dev/null +++ b/integration-tests/package-lock.yml @@ -0,0 +1,6 @@ +packages: +- local: ../ +- local: ../integration-tests-sub-package +- package: dbt-labs/dbt_utils + version: 1.1.1 +sha1_hash: 255fde144a549202e6bc6893da2f03a3ffb86fa0 diff --git a/integration-tests/tests/unit/incremental_model_1.sql b/integration-tests/tests/unit/incremental_model_1.sql index 3452b19..2a5d7ae 100644 --- a/integration-tests/tests/unit/incremental_model_1.sql +++ b/integration-tests/tests/unit/incremental_model_1.sql @@ -47,7 +47,7 @@ UNION ALL UNION ALL select 30 as c1 {% endcall %} - {% call dbt_unit_testing.mock_ref ('incremental_model_1') %} + {% call dbt_unit_testing.mock_ref ('incremental_model_1', options = {"include_missing_columns": true}) %} select 10 as c1 {% endcall %} {% call dbt_unit_testing.expect() %} diff --git a/macros/overrides.sql b/macros/overrides.sql index 81f6c98..86e67f4 100644 --- a/macros/overrides.sql +++ b/macros/overrides.sql @@ -20,9 +20,9 @@ {% macro is_incremental() %} {% if dbt_unit_testing.running_unit_test() %} {% set options = dbt_unit_testing.get_test_context("options", {}) %} - {% set model_being_tested = dbt_unit_testing.get_test_context("model_being_tested", "") %} + {% set is_incremental_should_be_true_for_this_model = dbt_unit_testing.get_test_context("is_incremental_should_be_true_for_this_model", "") %} {% set model_being_rendered = dbt_unit_testing.get_test_context("model_being_rendered", "") %} - {{ return (options.get("run_as_incremental", False) and model_being_rendered == model_being_tested and model_being_rendered != "") }} + {{ return (options.get("run_as_incremental", False) and model_being_rendered == is_incremental_should_be_true_for_this_model and model_being_rendered != "") }} {% else %} {{ return (dbt.is_incremental())}} {% endif %} diff --git a/macros/tests.sql b/macros/tests.sql index 67b1a65..92607a0 100644 --- a/macros/tests.sql +++ b/macros/tests.sql @@ -2,6 +2,7 @@ {{ dbt_unit_testing.ref_tested_model(model_name) }} {% if execute and flags.WHICH == 'test' %} + {{ dbt_unit_testing.set_test_context("is_incremental_should_be_true_for_this_model", "") }} {% set mocks_and_expectations_json_str = caller() %} {% set model_version = kwargs["version"] | default(kwargs["v"]) | default(none) %} {% set model_node = {"package_name": model.package_name, "name": model_name, "version": model_version} %} @@ -18,7 +19,6 @@ {% endmacro %} {% macro build_configuration_and_test_queries(model_node, test_description, options, mocks_and_expectations_json_str) %} - {{ dbt_unit_testing.set_test_context("model_being_tested", dbt_unit_testing.ref_cte_name(model_node)) }} {% set test_configuration = { "model_name": model_node.name, "description": test_description, @@ -30,6 +30,7 @@ {{ dbt_unit_testing.verbose("CONFIG: " ~ test_configuration) }} {% do test_configuration.update (dbt_unit_testing.build_mocks_and_expectations(test_configuration, mocks_and_expectations_json_str)) %} + {{ dbt_unit_testing.set_test_context("is_incremental_should_be_true_for_this_model", dbt_unit_testing.ref_cte_name(model_node)) }} {% set test_queries = dbt_unit_testing.build_test_queries(test_configuration) %} {{ return ((test_configuration, test_queries)) }} diff --git a/run-tests-helper.sh b/run-tests-helper.sh index 4fa8dab..14367b8 100644 --- a/run-tests-helper.sh +++ b/run-tests-helper.sh @@ -5,7 +5,7 @@ function run_tests() { local PROFILE=$2 if [ "$PROFILE" == "postgres" ]; then - VERSIONS="1.3.4 1.4.6 1.5.4" + VERSIONS="1.3.4 1.4.6 1.5.4 1.7.4" elif [ "$PROFILE" == "bigquery" ]; then VERSIONS="1.3.2" elif [ "$PROFILE" == "snowflake" ]; then From e4a1ee4316c81bda02012582396e158803600f83 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Wed, 10 Jan 2024 15:15:39 +0000 Subject: [PATCH 12/27] Add column transformations --- README.md | 130 ++++++++++++++++++ integration-tests/dbt_project.yml | 5 + .../model_references_model_with_structs.sql | 1 + .../models/biquery/model_with_structs.sql | 1 + .../model_for_column_transformations.sql | 1 + .../model_references_model_with_structs.sql | 20 +++ .../unit/model_for_column_transformations.sql | 19 +++ macros/input_parsing.sql | 3 +- macros/tests.sql | 20 ++- macros/utils.sql | 58 +++++++- 10 files changed, 243 insertions(+), 15 deletions(-) create mode 100644 integration-tests/models/biquery/model_references_model_with_structs.sql create mode 100644 integration-tests/models/biquery/model_with_structs.sql create mode 100644 integration-tests/models/model_for_column_transformations.sql create mode 100644 integration-tests/tests/unit/bigquery/model_references_model_with_structs.sql create mode 100644 integration-tests/tests/unit/model_for_column_transformations.sql diff --git a/README.md b/README.md index 88a44ee..c7624a7 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,10 @@ You can test models independently by mocking their dependencies (models, sources - [Database dependencies in detail](#database-dependencies-in-detail) - [Important Requirement](#important-requirement) - [Incremental Models](#incremental-models) + - [Column Transformations](#column-transformations) + - [How to use](#how-to-use) + - [Use Case 1: Rounding Column Values](#use-case-1-rounding-column-values) + - [Use Case 2: Converting Structs to JSON Strings in BigQuery](#use-case-2-converting-structs-to-json-strings-in-bigquery) - [Available Options](#available-options) - [Test Feedback](#test-feedback) - [Example](#example) @@ -557,6 +561,130 @@ Note that in this case, we are also mocking the model being tested (`incremental *Note: As previously mentioned, these type of tests are meant to test the `is_incremental` part of the model. Testing different increments strategies (such as `merge`, `delete+insert` or `insert_overwrite`) is not supported.* +## Column Transformations + +This functionality allows for the application of transformations to columns before the execution of unit tests. + +Column transformations enable the alteration of column data, catering to the need for data standardization, conversion, or formatting prior to testing. This addition aims to support more sophisticated testing requirements and ensure data integrity within the testing process. + +### How to use + +To leverage the column transformations feature in `dbt-unit-testing`, you need to define a JSON structure specifying the desired transformations for each column. This structure is integrated into the unit test options. Below is an example of how to format this JSON structure: + +```Json +{ + "col_name_1": "round(col_name_1, 4)", + "col_name_2": "to_json_string(col_name_2)" +} +``` + +You can assign this JSON into a variable and pass it into the unit test options. Below is an example of how to do this: + +```Jinja +{% set column_transformations = { + "col_name_1": "round(col_name_1, 4)", + "col_name_2": "to_json_string(col_name_2)" +} %} + +{% call dbt_unit_testing.test('some_model_name', options={"column_transformations": column_transformations}) %} + +... +``` + +In this example, `col_name_1` is rounded to four decimal places, and `col_name_2` is converted to a JSON string. These transformations are applied before the execution of the unit test. + +In addition to specifying column transformations at the individual test level, `dbt-unit-testing` also allows for a more generalized approach. You can define column transformations globally in the dbt_project.yml file. This approach allows for the application of column transformations to all unit tests in the project. + +Here is an example of how to set up column transformations in the dbt_project.yml file: + +```yaml +vars: + unit_tests_config: + column_transformations: + some_model_name: + col_name_1: round(col_name_1, 4) + # ... additional transformations for 'some_model_name' + another_model_name: + # ... transformations for 'another_model_name' +``` + +In this configuration, transformations such as `round(col_name_1, 4)` are applied to `col_name_1` in the context of `some_model_name`. + +You can also use the special token `##column##` in your column transformations. This token will be replaced by the column name in the test query, properly quoted for the current database adapter: + + +```yaml +vars: + unit_tests_config: + column_transformations: + some_model_name: + col_name_1: "round(##column##, 2)" + # ... additional transformations +``` + +In this example, `round(##column##)` will be evaluated such that `##column##` is replaced with the properly quoted name of `col_name_1`. This ensures that the column name is correctly formatted for the database adapter in use. + +### Use Case 1: Rounding Column Values + +In many scenarios, especially when dealing with floating-point numbers, precision issues can arise, leading to inconsistencies in data testing. For example, calculations or aggregations may result in floating-point numbers with excessive decimal places, making direct comparisons challenging. To address this, `dbt-unit-testing` can round these values to a specified number of decimal places, ensuring consistency and precision in tests. + +Consider a model, `financial_model`, which contains a column `avg_revenue` representing the average of the revenue values from another table. Due to calculations, `avg_revenue` might have an extensive number of decimal places. For testing purposes, you might want to round these values to a fixed number of decimal places. + +```SQL +SELECT + id, + AVG(revenue) as avg_revenue +FROM + raw_financial_data +``` + +You can ensure that revenue is rounded to two decimal places when testing `financial_model`, and your expectations are also rounded to five decimal places. This ensures that the test is precise and consistent. + +```Jinja +{% set column_transformations = { + "revenue": "round(##column##, 5)" +} %} + +{% call dbt_unit_testing.test('financial_model', options={"column_transformations": column_transformations}) %} + {% call dbt_unit_testing.mock_ref ('raw_financial_data') %} + select 5.0 as revenue + UNION ALL + select 2.0 as revenue + UNION ALL + select 3.0 as revenue + {% endcall %} + {% call dbt_unit_testing.expect() %} + select 3.33333 as revenue + {% endcall %} +{% endcall %} +``` + +### Use Case 2: Converting Structs to JSON Strings in BigQuery + +In BigQuery, certain data types like structs and arrays pose a challenge for `dbt-unit-testing`, because it needs to use the EXCEPT clause. BigQuery does not support these operations directly on structs or arrays. A practical solution is to convert these complex data types into JSON strings, allowing for standard SQL operations to be applied in tests. This can be achieved using column transformations. + +Let's consider a model, `user_activity_model`, which includes a struct column `activity_details` in BigQuery. To facilitate testing involving grouping or comparison, we transform `activity_details` into a JSON string. + +```SQL +SELECT + user_id, + activity_details -- struct column +FROM + raw_user_activity +``` + +```Jinja +{% set column_transformations = { + "activity_details": "to_json_string(##column##)" +} %} + +{% call dbt_unit_testing.test('user_activity_model', options={"column_transformations": column_transformations}) %} + -- Test cases and assertions here +{% endcall %} +``` + +In this example, the `activity_details` column, which is a struct, is transformed into a JSON string using `to_json_string(##column##)` before the execution of the unit test. This transformation facilitates operations like grouping and EXCEPT in BigQuery by converting the struct into a more manageable string format. + ## Available Options | option | description | default | scope* | @@ -572,6 +700,8 @@ Note that in this case, we are also mocking the model being tested (`incremental | **diff_column** | The name of the `diff` column in the test report | diff| project/test | | **count_column** | The name of the `count` column in the test report | count| project/test | | **run_as_incremental** | Runs the model in `incremental` mode (it has no effect if the model is not incremental) | false| project/test | +| **column_transformations** | A JSON structure specifying the desired transformations for each column. See [Column Transformations](#column-transformations) for more details. | {}| project/test | + Notes: - **scope** is the place where the option can be defined: diff --git a/integration-tests/dbt_project.yml b/integration-tests/dbt_project.yml index 3ae3dbf..5b8e53b 100644 --- a/integration-tests/dbt_project.yml +++ b/integration-tests/dbt_project.yml @@ -22,6 +22,11 @@ clean-targets: # directories to be removed by `dbt clean` vars: unit_tests_config: + column_transformations: + model_for_column_transformations: + b: upper(b) + model_references_model_with_structs: + a: to_json_string(##column##) verbose: false use_qualified_sources: true use_database_models: false diff --git a/integration-tests/models/biquery/model_references_model_with_structs.sql b/integration-tests/models/biquery/model_references_model_with_structs.sql new file mode 100644 index 0000000..1bb3eea --- /dev/null +++ b/integration-tests/models/biquery/model_references_model_with_structs.sql @@ -0,0 +1 @@ +select a, b, c, d from {{ dbt_unit_testing.ref('model_with_structs')}} diff --git a/integration-tests/models/biquery/model_with_structs.sql b/integration-tests/models/biquery/model_with_structs.sql new file mode 100644 index 0000000..643afde --- /dev/null +++ b/integration-tests/models/biquery/model_with_structs.sql @@ -0,0 +1 @@ +select [1, 2, 3] as a, struct(1 as f1, "f2" as f2) as b, 3 as c, "value" as d diff --git a/integration-tests/models/model_for_column_transformations.sql b/integration-tests/models/model_for_column_transformations.sql new file mode 100644 index 0000000..4c1ad93 --- /dev/null +++ b/integration-tests/models/model_for_column_transformations.sql @@ -0,0 +1 @@ +select a / 3 as a, b from {{ dbt_unit_testing.ref ('model_a') }} diff --git a/integration-tests/tests/unit/bigquery/model_references_model_with_structs.sql b/integration-tests/tests/unit/bigquery/model_references_model_with_structs.sql new file mode 100644 index 0000000..26656d0 --- /dev/null +++ b/integration-tests/tests/unit/bigquery/model_references_model_with_structs.sql @@ -0,0 +1,20 @@ +{{ + config( + tags=['unit-test', 'bigquery'] + ) +}} + +{% set column_transformations = { + "b": "to_json_string(b)" + } +%} + +{% call dbt_unit_testing.test('model_references_model_with_structs', options={"column_transformations": column_transformations}) %} + {% call dbt_unit_testing.mock_ref ('model_with_structs') %} + select [1, 2, 3] as a, struct(1 as f1, "f2" as f2) as b, 3 as c, "value" as d + {% endcall %} + {% call dbt_unit_testing.expect() %} + select [1, 2, 3] as a, struct(1 as f1, "f2" as f2) as b, 3 as c, "value" as d + {% endcall %} +{% endcall %} + \ No newline at end of file diff --git a/integration-tests/tests/unit/model_for_column_transformations.sql b/integration-tests/tests/unit/model_for_column_transformations.sql new file mode 100644 index 0000000..8249d23 --- /dev/null +++ b/integration-tests/tests/unit/model_for_column_transformations.sql @@ -0,0 +1,19 @@ +{{ + config( + tags=['unit-test', 'bigquery', 'snowflake', 'postgres'] + ) +}} + +{% set column_transformations = { + "a": "round(##column##, 4)" + } +%} + +{% call dbt_unit_testing.test('model_for_column_transformations', options={"column_transformations": column_transformations}) %} + {% call dbt_unit_testing.mock_ref ('model_a') %} + select 10.0 as a, 'lower' as b + {% endcall %} + {% call dbt_unit_testing.expect() %} + select 3.3333 as a, 'LOWER' as b + {% endcall %} +{% endcall %} diff --git a/macros/input_parsing.sql b/macros/input_parsing.sql index 7e45362..59483a8 100644 --- a/macros/input_parsing.sql +++ b/macros/input_parsing.sql @@ -50,5 +50,4 @@ {{ return (sql) }} - {% endmacro %} - +{% endmacro %} diff --git a/macros/tests.sql b/macros/tests.sql index 92607a0..a109985 100644 --- a/macros/tests.sql +++ b/macros/tests.sql @@ -19,6 +19,8 @@ {% endmacro %} {% macro build_configuration_and_test_queries(model_node, test_description, options, mocks_and_expectations_json_str) %} + {% set model_name = model_node.name %} + {% do options.update({"column_transformations": {model_name: options.get("column_transformations", {})}}) %} {% set test_configuration = { "model_name": model_node.name, "description": test_description, @@ -79,17 +81,21 @@ {% set expectations = test_configuration.expectations %} {% set model_node = dbt_unit_testing.model_node(test_configuration.model_node) %} {%- set model_complete_sql = dbt_unit_testing.build_model_complete_sql(model_node, test_configuration.mocks, test_configuration.options) -%} - {% set columns = dbt_unit_testing.quote_and_join_columns(dbt_unit_testing.extract_columns_list(expectations.input_values)) %} + {% set column_transformations = test_configuration.options.column_transformations[test_configuration.model_name] | default({}) %} + {% set columns_list = dbt_unit_testing.extract_columns_list(expectations.input_values) %} + {% set columns_list_str = dbt_unit_testing.quote_and_join_columns(columns_list) %} + {% set transformed_columns_list_str = dbt_unit_testing.apply_transformations_to_columns(columns_list, column_transformations, use_alias=true) | join(", ") %} + {% set transformed_columns_list_for_grouping_str = dbt_unit_testing.apply_transformations_to_columns(columns_list, column_transformations, use_alias=false) | join(", ") %} {% set diff_column = test_configuration.options.diff_column | default("diff") %} {% set count_column = test_configuration.options.count_column | default("count") %} {%- set actual_query -%} - select count(1) as {{ count_column }}, {{columns}} from ( {{ model_complete_sql }} ) as s group by {{ columns }} + select count(1) as {{ count_column }}, {{ transformed_columns_list_str }} from ( {{ model_complete_sql }} ) as s group by {{ transformed_columns_list_for_grouping_str }} {% endset %} {%- set expectations_query -%} - select count(1) as {{ count_column }}, {{columns}} from ({{ expectations.input_values }}) as s group by {{ columns }} + select count(1) as {{ count_column }}, {{ transformed_columns_list_str }} from ({{ expectations.input_values }}) as s group by {{ transformed_columns_list_for_grouping_str }} {% endset %} {%- set test_query -%} @@ -101,14 +107,14 @@ ), extra_entries as ( - select '+' as {{ diff_column }}, {{ count_column }}, {{columns}} from actual + select '+' as {{ diff_column }}, {{ count_column }}, {{ columns_list_str }} from actual {{ except() }} - select '+' as {{ diff_column }}, {{ count_column }}, {{columns}} from expectations), + select '+' as {{ diff_column }}, {{ count_column }}, {{ columns_list_str }} from expectations), missing_entries as ( - select '-' as {{ diff_column }}, {{ count_column }}, {{columns}} from expectations + select '-' as {{ diff_column }}, {{ count_column }}, {{ columns_list_str }} from expectations {{ except() }} - select '-' as {{ diff_column }}, {{ count_column }}, {{columns}} from actual) + select '-' as {{ diff_column }}, {{ count_column }}, {{ columns_list_str }} from actual) select * from extra_entries UNION ALL diff --git a/macros/utils.sql b/macros/utils.sql index 9629273..f0ca550 100644 --- a/macros/utils.sql +++ b/macros/utils.sql @@ -25,10 +25,33 @@ {% endmacro %} {% macro quote_and_join_columns(columns) %} - {% set columns = dbt_unit_testing.map(columns, dbt_unit_testing.quote_identifier) | join(",") %} + {% set columns = dbt_unit_testing.map(columns, dbt_unit_testing.quote_identifier) | join(", ") %} {{ return (columns) }} {% endmacro %} +{% macro apply_transformations_to_columns(columns, column_transformations, use_alias=true) %} + {% set transformed_columns = [] %} + {% for column in columns %} + {% set transformed_column = dbt_unit_testing.apply_transformations_to_column(column, column_transformations, use_alias) %} + {% do transformed_columns.append(transformed_column) %} + {% endfor %} + {{ return (transformed_columns) }} +{% endmacro %} + +{% macro apply_transformations_to_column(column, column_transformations, use_alias) %} + {% set quoted_column = dbt_unit_testing.quote_identifier(column) %} + {% set transformation = column_transformations.get(column, "") %} + {% if transformation != "" %} + {% set transformation = transformation | replace("##column##", quoted_column) %} + {% if use_alias %} + {% set transformation = transformation ~ " as " ~ quoted_column %} + {% endif %} + {% else %} + {% set transformation = quoted_column %} + {% endif %} + {{ return (transformation) }} +{% endmacro %} + {% macro sql_encode(s) %} {{ return (s.replace('"', '####_quote_####').replace('\n', '####_cr_####').replace('\t', '####_tab_####')) }} {% endmacro %} @@ -107,12 +130,35 @@ {% endif %} {% endmacro %} -{% macro merge_jsons(jsons) %} +{% macro deep_merge_2_jsons(json1, json2) %} + {% set json = dbt_unit_testing.merge_json_left(json1, json2) %} + {% do json.update(dbt_unit_testing.merge_json_left(json2, json1)) %} + {{ return (json) }} +{% endmacro %} + +{% macro merge_json_left(json1, json2) %} + {% set json = {} %} + {% for k,v in json1.items() %} + {% if v is mapping %} + {% set other = json2.get(k, {}) %} + {% if other is mapping %} + {% set v = dbt_unit_testing.deep_merge_2_jsons(v, other) %} + {% endif %} + {% elif v is sequence and v is not string %} + {% set other = json2.get(k, []) %} + {% if other is sequence and other is not string %} + {% set v = dbt_unit_testing.map((v + other) | map("tojson") | unique, fromjson) %} + {% endif %} + {% endif %} + {% do json.update({k: v}) %} + {% endfor %} + {{ return (json) }} +{% endmacro %} + +{% macro deep_merge_jsons(jsons) %} {% set json = {} %} {% for j in jsons %} - {% for k,v in j.items() %} - {% do json.update({k: v}) %} - {% endfor %} + {% do json.update(dbt_unit_testing.deep_merge_2_jsons(json, j)) %} {% endfor %} {{ return (json) }} {% endmacro %} @@ -130,7 +176,7 @@ {% macro merge_configs(configs) %} {% set unit_tests_config = var("unit_tests_config", {}) %} {% set unit_tests_config = {} if unit_tests_config is none else unit_tests_config %} - {{ return (dbt_unit_testing.merge_jsons([unit_tests_config] + configs)) }} + {{ return (dbt_unit_testing.deep_merge_jsons([unit_tests_config] + configs)) }} {% endmacro %} {% macro quote_identifier(identifier) %} From f94f500f3335acac9b7877669f5c1520aea477e8 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Wed, 10 Jan 2024 19:23:49 +0000 Subject: [PATCH 13/27] Fix issue when running tests from a different package --- .../models/model_references_parent_model.sql | 1 + .../tests/model_references_parent_model.sql | 15 +++++++++++++++ macros/overrides.sql | 8 ++++++-- macros/utils.sql | 2 +- 4 files changed, 23 insertions(+), 3 deletions(-) create mode 100644 integration-tests-sub-package/models/model_references_parent_model.sql create mode 100644 integration-tests-sub-package/tests/model_references_parent_model.sql diff --git a/integration-tests-sub-package/models/model_references_parent_model.sql b/integration-tests-sub-package/models/model_references_parent_model.sql new file mode 100644 index 0000000..ee6a140 --- /dev/null +++ b/integration-tests-sub-package/models/model_references_parent_model.sql @@ -0,0 +1 @@ +select * from {{ ref('model_a') }} \ No newline at end of file diff --git a/integration-tests-sub-package/tests/model_references_parent_model.sql b/integration-tests-sub-package/tests/model_references_parent_model.sql new file mode 100644 index 0000000..df11f48 --- /dev/null +++ b/integration-tests-sub-package/tests/model_references_parent_model.sql @@ -0,0 +1,15 @@ +{{ + config( + tags=['unit-test', 'bigquery', 'snowflake', 'postgres', 'subpack'] + ) +}} + +{% call dbt_unit_testing.test('model_references_parent_model', 'sample test') %} + {% call dbt_unit_testing.mock_ref ('model_a') %} + select 1 as a, 'b' as b + {% endcall %} + {% call dbt_unit_testing.expect() %} + select 1 as a, 'b' as b + {% endcall %} +{% endcall %} + \ No newline at end of file diff --git a/macros/overrides.sql b/macros/overrides.sql index 86e67f4..0bc27bb 100644 --- a/macros/overrides.sql +++ b/macros/overrides.sql @@ -1,11 +1,15 @@ {% macro ref(project_or_package, model_name) %} - {% set project_or_package, model_name = dbt_unit_testing.setup_project_and_model_name(project_or_package, model_name) %} {% if dbt_unit_testing.running_unit_test() %} + {% set project_or_package, model_name = dbt_unit_testing.setup_project_and_model_name(project_or_package, model_name) %} {% set node_version = kwargs["version"] | default (kwargs["v"]) %} {% set node = {"package_name": project_or_package, "name": model_name, "version": node_version} %} {{ return (dbt_unit_testing.ref_cte_name(node)) }} {% else %} - {{ return (builtins.ref(project_or_package, model_name, **kwargs)) }} + {% if model_name is undefined %} + {{ return (builtins.ref(project_or_package, **kwargs)) }} + {% else %} + {{ return (builtins.ref(project_or_package, model_name, **kwargs)) }} + {% endif %} {% endif %} {% endmacro %} diff --git a/macros/utils.sql b/macros/utils.sql index f0ca550..0277802 100644 --- a/macros/utils.sql +++ b/macros/utils.sql @@ -97,7 +97,7 @@ {% macro model_node (node) %} {% set graph_nodes = graph.nodes.values() | selectattr('resource_type', 'in', ['model', 'snapshot', 'seed']) | - selectattr('package_name', 'equalto', node.package_name) | + selectattr('package_name', 'in', [node.package_name, project_name]) | selectattr('name', 'equalto', node.name) | list %} {% if graph_nodes | length > 0 %} From 94ff85e8ed5182824beca3c7303c5dee789e9528 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Thu, 11 Jan 2024 09:40:38 +0000 Subject: [PATCH 14/27] Add last_spaces_replace_char option --- integration-tests/dbt_project.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/integration-tests/dbt_project.yml b/integration-tests/dbt_project.yml index 5b8e53b..140c81c 100644 --- a/integration-tests/dbt_project.yml +++ b/integration-tests/dbt_project.yml @@ -38,3 +38,4 @@ vars: type_separator: "::" diff_column: "diff" count_column: "count" + last_spaces_replace_char: "." From bb5fbd1aa4a4253edb555a24a1eabb55044d3680 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Thu, 11 Jan 2024 10:08:29 +0000 Subject: [PATCH 15/27] Add expect_no_rows macro --- README.md | 53 ++++++++++++++++--- .../tests/unit/expect_no_rows.sql | 13 +++++ macros/mock_builders.sql | 11 ++++ macros/output.sql | 20 ++++++- macros/tests.sql | 2 +- 5 files changed, 91 insertions(+), 8 deletions(-) create mode 100644 integration-tests/tests/unit/expect_no_rows.sql diff --git a/README.md b/README.md index c7624a7..b1e5e3b 100644 --- a/README.md +++ b/README.md @@ -154,6 +154,23 @@ UNION ALL {% endcall %} ``` +### Expectations + +The expectations are the results you expect from the model. The framework compares the expectations with the actuals and shows the differences in the test report. + +```jinja +{% call dbt_unit_testing.expect() %} + select ... +{% endcall %} +``` + +You can use the macro `expect_no_rows` to test if the model returns no rows: + +```jinja +{% call dbt_unit_testing.expect_no_rows() %} +{% endcall %} +``` + ## Running tests The framework leverages the dbt test command to run the tests. You can run all the tests in your project with the following command: @@ -170,12 +187,13 @@ dbt test --select tag:unit-test ## Available Macros -| macro name | description | -|------------------------------|-------------------------------------------------| -| dbt_unit_testing.test | Defines a Test | -| dbt_unit_testing.mock_ref | Mocks a **model** / **snapshot** / **seed** | -| dbt_unit_testing.mock_source | Mocks a **source** | -| dbt_unit_testing.expect | Defines Test expectations | +| macro name | description | +|---------------------------------|-------------------------------------------------| +| dbt_unit_testing.test | Defines a Test | +| dbt_unit_testing.mock_ref | Mocks a **model** / **snapshot** / **seed** | +| dbt_unit_testing.mock_source | Mocks a **source** | +| dbt_unit_testing.expect | Defines the Test expectations | +| dbt_unit_testing.expect_no_rows | Used to test if the model returns no rows | ## Test Examples @@ -701,6 +719,7 @@ In this example, the `activity_details` column, which is a struct, is transforme | **count_column** | The name of the `count` column in the test report | count| project/test | | **run_as_incremental** | Runs the model in `incremental` mode (it has no effect if the model is not incremental) | false| project/test | | **column_transformations** | A JSON structure specifying the desired transformations for each column. See [Column Transformations](#column-transformations) for more details. | {}| project/test | +| last_spaces_replace_char | Replace the spaces at the end of the values with another character. See [Test Feedback](#test-feedback) for more details. | (space) | project/test | Notes: @@ -740,6 +759,28 @@ Rows mismatch: The first line was not on the model, but the second line was. +### Spaces at the end of the diff values + +It can be hard to spot the difference when the values have spaces at the end. To avoid this, you can use the option `last_spaces_replace_char` to replace the spaces at the end of the values with another character: + +```yaml +vars: + unit_tests_config: + last_spaces_replace_char: "." +``` + +This will replace the spaces at the end of the values with a dot. The result will be displayed like this: + +```yaml +MODEL: customers +TEST: should sum order values to calculate customer_lifetime_value +Rows mismatch: +| diff | count | some_column | +| ---- | ----- | ----------- | +| + | 1 | John | +| - | 1 | John.. | +``` + # Known Limitations - You can not have a *model* with the same name as a *source* or a *seed* (unless you set the *use_qualified_sources* option to *true*). diff --git a/integration-tests/tests/unit/expect_no_rows.sql b/integration-tests/tests/unit/expect_no_rows.sql new file mode 100644 index 0000000..c39effb --- /dev/null +++ b/integration-tests/tests/unit/expect_no_rows.sql @@ -0,0 +1,13 @@ +{{ + config( + tags=['unit-test', 'bigquery', 'snowflake', 'postgres'] + ) +}} + +{% call dbt_unit_testing.test('model_b_references_a', 'sample test') %} + {% call dbt_unit_testing.mock_ref ('model_a') %} + select 0 as a, 'a' as b + {% endcall %} + {% call dbt_unit_testing.expect_no_rows() %} + {% endcall %} +{% endcall %} diff --git a/macros/mock_builders.sql b/macros/mock_builders.sql index 63b6232..fddffd8 100644 --- a/macros/mock_builders.sql +++ b/macros/mock_builders.sql @@ -53,6 +53,17 @@ {{ return (dbt_unit_testing.append_json(expectations)) }} {% endmacro %} +{% macro expect_no_rows(options={}) %} + {% set dummy = caller() %} + {% set expectations = { + "type": "expectations", + "options": options, + "input_values": "select a from (select 1 as a) as s where false", + } + %} + {{ return (dbt_unit_testing.append_json(expectations)) }} +{% endmacro %} + {% macro append_json(json) %} {{ return (json | tojson() ~ '####_JSON_LINE_DELIMITER_####') }} {% endmacro %} diff --git a/macros/output.sql b/macros/output.sql index 3d3ad0a..4eb0277 100644 --- a/macros/output.sql +++ b/macros/output.sql @@ -1,4 +1,4 @@ -{% macro print_table(agate_table) %} +{% macro print_table(agate_table, options={}) %} {% set columns_start_index = 2 %} {% set columns_info = [] %} {% for col_name in agate_table.column_names %} @@ -47,10 +47,14 @@ {% endfor %} {{ dbt_unit_testing.println("| " ~ cells | join(" | ") ~ " |")}} + {% set last_spaces_replace_char = options.last_spaces_replace_char | default(" ") %} {% for row in agate_table.rows %} {% set cells = [] %} {% for cell_value in row %} {% set col_index = loop.index0 %} + {% if cell_value is string %} + {% set cell_value = dbt_unit_testing.replace_last_spaces_with(last_spaces_replace_char, cell_value) %} + {% endif %} {% set padded = dbt_unit_testing.pad(cell_value, columns_info[col_index].max_length, pad_right=cell_value is string) %} {% if columns_info[col_index].has_differences %} {% do cells.append("{RED}" ~ padded ~ "{RESET}") %} @@ -60,7 +64,20 @@ {% endfor %} {{ dbt_unit_testing.println("| " ~ cells | join(" | ") ~ " |")}} {% endfor %} +{% endmacro %} +{% macro replace_last_spaces_with(replacement, s) %} + {% set rs = s | reverse %} + {% set replaced = namespace(value="", stop=false) %} + {% for i in range(0, rs | length) %} + {% if rs[i] == ' ' and not replaced.stop %} + {% set replaced.value = replaced.value ~ replacement %} + {% else %} + {% set replaced.value = replaced.value ~ rs[i] %} + {% set replaced.stop = true %} + {% endif %} + {% endfor %} + {{ return(replaced.value | reverse) }} {% endmacro %} {% macro pad(v, pad, pad_right=false, c=" ") %} @@ -77,6 +94,7 @@ .replace("{RED}", "\x1b[0m\x1b[31m") .replace("{GREEN}", "\x1b[0m\x1b[32m") .replace("{YELLOW}", "\x1b[0m\x1b[33m") + .replace("{BG_YELLOW}", "\x1b[0m\x1b[43m") .replace("{RESET}", "\x1b[0m")) }} {% endmacro %} diff --git a/macros/tests.sql b/macros/tests.sql index a109985..551b850 100644 --- a/macros/tests.sql +++ b/macros/tests.sql @@ -147,7 +147,7 @@ {% endif %} {% if test_report.different_rows_count > 0 %} {{ dbt_unit_testing.println('{RED}ERROR: {YELLOW}Rows mismatch:') }} - {{ dbt_unit_testing.print_table(test_report.test_differences) }} + {{ dbt_unit_testing.print_table(test_report.test_differences, options=test_configuration.options) }} {% endif %} {% endmacro %} From d4bfa655c7e7e2259459a6cd43631266e14158fc Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Thu, 11 Jan 2024 14:02:48 +0000 Subject: [PATCH 16/27] Fix column transformations for Snowflake --- integration-tests/dbt_project.yml | 2 +- .../model_for_column_transformations.sql | 2 +- .../unit/model_for_column_transformations.sql | 23 ++++++++++++++++--- .../model_for_column_transformations_sf.sql | 20 ++++++++++++++++ run-tests-helper.sh | 4 ++-- 5 files changed, 44 insertions(+), 7 deletions(-) create mode 100644 integration-tests/tests/unit/snowflake/model_for_column_transformations_sf.sql diff --git a/integration-tests/dbt_project.yml b/integration-tests/dbt_project.yml index 140c81c..2493b7b 100644 --- a/integration-tests/dbt_project.yml +++ b/integration-tests/dbt_project.yml @@ -24,7 +24,7 @@ vars: unit_tests_config: column_transformations: model_for_column_transformations: - b: upper(b) + column_a: round(##column##, 5) model_references_model_with_structs: a: to_json_string(##column##) verbose: false diff --git a/integration-tests/models/model_for_column_transformations.sql b/integration-tests/models/model_for_column_transformations.sql index 4c1ad93..f0b09ec 100644 --- a/integration-tests/models/model_for_column_transformations.sql +++ b/integration-tests/models/model_for_column_transformations.sql @@ -1 +1 @@ -select a / 3 as a, b from {{ dbt_unit_testing.ref ('model_a') }} +select a / 3 as column_a, b as column_b from {{ dbt_unit_testing.ref ('model_a') }} diff --git a/integration-tests/tests/unit/model_for_column_transformations.sql b/integration-tests/tests/unit/model_for_column_transformations.sql index 8249d23..90d1990 100644 --- a/integration-tests/tests/unit/model_for_column_transformations.sql +++ b/integration-tests/tests/unit/model_for_column_transformations.sql @@ -1,11 +1,12 @@ {{ config( - tags=['unit-test', 'bigquery', 'snowflake', 'postgres'] + tags=['unit-test', 'bigquery', 'postgres'] ) }} {% set column_transformations = { - "a": "round(##column##, 4)" + "column_a": "round(##column##, 4)", + "column_b": "upper(##column##)" } %} @@ -14,6 +15,22 @@ select 10.0 as a, 'lower' as b {% endcall %} {% call dbt_unit_testing.expect() %} - select 3.3333 as a, 'LOWER' as b + select 3.3333 as column_a, 'LOWER' as column_b + {% endcall %} +{% endcall %} + +UNION ALL + +{% set column_transformations = { + "column_b": "upper(##column##)" + } +%} + +{% call dbt_unit_testing.test('model_for_column_transformations', 'should merge config from dbt_project', options={"column_transformations": column_transformations}) %} + {% call dbt_unit_testing.mock_ref ('model_a') %} + select 10.0 as a, 'lower' as b + {% endcall %} + {% call dbt_unit_testing.expect() %} + select 3.33333 as column_a, 'LOWER' as column_b {% endcall %} {% endcall %} diff --git a/integration-tests/tests/unit/snowflake/model_for_column_transformations_sf.sql b/integration-tests/tests/unit/snowflake/model_for_column_transformations_sf.sql new file mode 100644 index 0000000..b9077ce --- /dev/null +++ b/integration-tests/tests/unit/snowflake/model_for_column_transformations_sf.sql @@ -0,0 +1,20 @@ +{{ + config( + tags=['unit-test', 'snowflake'] + ) +}} + +{% set column_transformations = { + "COLUMN_A": "round(##column##, 4)", + "COLUMN_B": "upper(##column##)" + } +%} + +{% call dbt_unit_testing.test('model_for_column_transformations', options={"column_transformations": column_transformations}) %} + {% call dbt_unit_testing.mock_ref ('model_a') %} + select 10.0 as a, 'lower' as b + {% endcall %} + {% call dbt_unit_testing.expect() %} + select 3.3333 as COLUMN_A, 'LOWER' as column_b + {% endcall %} +{% endcall %} diff --git a/run-tests-helper.sh b/run-tests-helper.sh index 14367b8..5e417bc 100644 --- a/run-tests-helper.sh +++ b/run-tests-helper.sh @@ -7,9 +7,9 @@ function run_tests() { if [ "$PROFILE" == "postgres" ]; then VERSIONS="1.3.4 1.4.6 1.5.4 1.7.4" elif [ "$PROFILE" == "bigquery" ]; then - VERSIONS="1.3.2" + VERSIONS="1.7.2" elif [ "$PROFILE" == "snowflake" ]; then - VERSIONS="1.3.1" + VERSIONS="1.7.1" else echo "Invalid profile name: $PROFILE" exit 1 From 6b72b431d1126f65b20d0c4408585feb2d6bc946 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Mon, 15 Jan 2024 09:54:31 +0000 Subject: [PATCH 17/27] Fix README --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b1e5e3b..b553889 100644 --- a/README.md +++ b/README.md @@ -656,11 +656,11 @@ FROM raw_financial_data ``` -You can ensure that revenue is rounded to two decimal places when testing `financial_model`, and your expectations are also rounded to five decimal places. This ensures that the test is precise and consistent. +You can ensure that `avg_revenue` is rounded to five decimal places when testing `financial_model`, and your expectations are also rounded to five decimal places. This ensures that the test is precise and consistent. ```Jinja {% set column_transformations = { - "revenue": "round(##column##, 5)" + "avg_revenue": "round(##column##, 5)" } %} {% call dbt_unit_testing.test('financial_model', options={"column_transformations": column_transformations}) %} @@ -672,7 +672,7 @@ You can ensure that revenue is rounded to two decimal places when testing `finan select 3.0 as revenue {% endcall %} {% call dbt_unit_testing.expect() %} - select 3.33333 as revenue + select 3.33333 as avg_revenue {% endcall %} {% endcall %} ``` From 7b7dc63884c75431f5fcc5d6f419f06227850f02 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Tue, 16 Jan 2024 10:30:56 +0000 Subject: [PATCH 18/27] Fix issue with build command --- macros/tests.sql | 30 +++++++++++++++++------------- run-integration-tests.sh | 2 ++ 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/macros/tests.sql b/macros/tests.sql index 551b850..4d54f38 100644 --- a/macros/tests.sql +++ b/macros/tests.sql @@ -1,20 +1,24 @@ {% macro test(model_name, test_description='(no description)', options={}) %} {{ dbt_unit_testing.ref_tested_model(model_name) }} - {% if execute and flags.WHICH == 'test' %} - {{ dbt_unit_testing.set_test_context("is_incremental_should_be_true_for_this_model", "") }} - {% set mocks_and_expectations_json_str = caller() %} - {% set model_version = kwargs["version"] | default(kwargs["v"]) | default(none) %} - {% set model_node = {"package_name": model.package_name, "name": model_name, "version": model_version} %} - {% set test_configuration, test_queries = dbt_unit_testing.build_configuration_and_test_queries(model_node, test_description, options, mocks_and_expectations_json_str) %} - {% set test_report = dbt_unit_testing.build_test_report(test_configuration, test_queries) %} - - {% if not test_report.succeeded %} - {{ dbt_unit_testing.show_test_report(test_configuration, test_report) }} + {% if execute %} + {% if flags.WHICH == 'test' %} + {{ dbt_unit_testing.set_test_context("is_incremental_should_be_true_for_this_model", "") }} + {% set mocks_and_expectations_json_str = caller() %} + {% set model_version = kwargs["version"] | default(kwargs["v"]) | default(none) %} + {% set model_node = {"package_name": model.package_name, "name": model_name, "version": model_version} %} + {% set test_configuration, test_queries = dbt_unit_testing.build_configuration_and_test_queries(model_node, test_description, options, mocks_and_expectations_json_str) %} + {% set test_report = dbt_unit_testing.build_test_report(test_configuration, test_queries) %} + + {% if not test_report.succeeded %} + {{ dbt_unit_testing.show_test_report(test_configuration, test_report) }} + {% endif %} + + select 1 as a from (select 1) as t where {{ not test_report.succeeded }} + {{ dbt_unit_testing.clear_test_context() }} + {% else %} + select 1 as a from (select 1) as t where false {% endif %} - - select 1 as a from (select 1) as t where {{ not test_report.succeeded }} - {{ dbt_unit_testing.clear_test_context() }} {% endif %} {% endmacro %} diff --git a/run-integration-tests.sh b/run-integration-tests.sh index 2674a59..cd2e349 100755 --- a/run-integration-tests.sh +++ b/run-integration-tests.sh @@ -25,6 +25,8 @@ dbt deps --target "$PROFILE" dbt run-operation macro_with_ref --target "$PROFILE" +dbt build --target "$PROFILE" --select tag:unit-test,tag:"$PROFILE" --exclude tag:versioned tag:db-dependency + # create seeds in the database dbt seed --target "$PROFILE" --select seeds/real_seeds # run tests with no database dependency From fd0b959f577035cb4eab2aeada3afa69d9ea1bf9 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Wed, 17 Jan 2024 08:47:37 +0000 Subject: [PATCH 19/27] Fix issue with expect_no_rows --- macros/mock_builders.sql | 2 +- macros/sql_builders.sql | 2 +- macros/tests.sql | 15 +++++++++++---- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/macros/mock_builders.sql b/macros/mock_builders.sql index fddffd8..0297511 100644 --- a/macros/mock_builders.sql +++ b/macros/mock_builders.sql @@ -58,7 +58,7 @@ {% set expectations = { "type": "expectations", "options": options, - "input_values": "select a from (select 1 as a) as s where false", + "no_rows": true, } %} {{ return (dbt_unit_testing.append_json(expectations)) }} diff --git a/macros/sql_builders.sql b/macros/sql_builders.sql index 56321ab..3c95943 100644 --- a/macros/sql_builders.sql +++ b/macros/sql_builders.sql @@ -91,7 +91,7 @@ {%- endif %} {% set name_parts = dbt_unit_testing.map([node.database, node.schema, name], dbt_unit_testing.quote_identifier) %} - select * from {{ name_parts | join('.') }} where false + select * from {{ name_parts | join('.') }} where 1 = 0 {%- else -%} {% if complete %} {{ dbt_unit_testing.build_model_complete_sql(node) }} diff --git a/macros/tests.sql b/macros/tests.sql index 4d54f38..3ab8167 100644 --- a/macros/tests.sql +++ b/macros/tests.sql @@ -14,10 +14,10 @@ {{ dbt_unit_testing.show_test_report(test_configuration, test_report) }} {% endif %} - select 1 as a from (select 1) as t where {{ not test_report.succeeded }} + select * from (select 1) as t where {{ not test_report.succeeded }} {{ dbt_unit_testing.clear_test_context() }} {% else %} - select 1 as a from (select 1) as t where false + select * from (select 1) as t where 1 = 0 {% endif %} {% endif %} {% endmacro %} @@ -85,8 +85,15 @@ {% set expectations = test_configuration.expectations %} {% set model_node = dbt_unit_testing.model_node(test_configuration.model_node) %} {%- set model_complete_sql = dbt_unit_testing.build_model_complete_sql(model_node, test_configuration.mocks, test_configuration.options) -%} + + {% if expectations.no_rows %} + {% set expectations_sql = "select * from (" ~ model_complete_sql ~ ") as t where 1 = 0" %} + {% else %} + {% set expectations_sql = expectations.input_values %} + {% endif %} + {% set column_transformations = test_configuration.options.column_transformations[test_configuration.model_name] | default({}) %} - {% set columns_list = dbt_unit_testing.extract_columns_list(expectations.input_values) %} + {% set columns_list = dbt_unit_testing.extract_columns_list(expectations_sql) %} {% set columns_list_str = dbt_unit_testing.quote_and_join_columns(columns_list) %} {% set transformed_columns_list_str = dbt_unit_testing.apply_transformations_to_columns(columns_list, column_transformations, use_alias=true) | join(", ") %} {% set transformed_columns_list_for_grouping_str = dbt_unit_testing.apply_transformations_to_columns(columns_list, column_transformations, use_alias=false) | join(", ") %} @@ -99,7 +106,7 @@ {% endset %} {%- set expectations_query -%} - select count(1) as {{ count_column }}, {{ transformed_columns_list_str }} from ({{ expectations.input_values }}) as s group by {{ transformed_columns_list_for_grouping_str }} + select count(1) as {{ count_column }}, {{ transformed_columns_list_str }} from ({{ expectations_sql }}) as s group by {{ transformed_columns_list_for_grouping_str }} {% endset %} {%- set test_query -%} From 5be2a9e75a9bf58c1113abb2187283fece805f0d Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Wed, 17 Jan 2024 09:11:39 +0000 Subject: [PATCH 20/27] Run tests on the build command as well --- macros/tests.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macros/tests.sql b/macros/tests.sql index 3ab8167..255eeee 100644 --- a/macros/tests.sql +++ b/macros/tests.sql @@ -2,7 +2,7 @@ {{ dbt_unit_testing.ref_tested_model(model_name) }} {% if execute %} - {% if flags.WHICH == 'test' %} + {% if flags.WHICH in ('test', 'build') %} {{ dbt_unit_testing.set_test_context("is_incremental_should_be_true_for_this_model", "") }} {% set mocks_and_expectations_json_str = caller() %} {% set model_version = kwargs["version"] | default(kwargs["v"]) | default(none) %} From 49b53e9a1c299f5941d2c3242576857538cc363c Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Wed, 17 Jan 2024 19:21:16 +0000 Subject: [PATCH 21/27] Add input_values to no_rows expectations --- macros/mock_builders.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/macros/mock_builders.sql b/macros/mock_builders.sql index 0297511..417bc2c 100644 --- a/macros/mock_builders.sql +++ b/macros/mock_builders.sql @@ -58,6 +58,7 @@ {% set expectations = { "type": "expectations", "options": options, + "input_values": "select * from (select 1) as t where 1 = 0", "no_rows": true, } %} From c5d2f5757c9130eb6063798ccf5d4962f13b19be Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Fri, 19 Jan 2024 13:00:07 +0000 Subject: [PATCH 22/27] Add stale github action --- .github/workflows/stale.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000..cdd0640 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,28 @@ +name: "Close Stale Issues and PRs" + +on: + schedule: + - cron: "0 0 * * *" # Runs every day at midnight + +jobs: + stale: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Mark stale issues and pull requests + uses: actions/stale@v9 + with: + debug-only: true + stale-issue-message: "This issue has been automatically marked as stale because it has not had activity in the last 30 days. It will be closed in 5 days if no further activity occurs. Thank you for your contributions." + stale-pr-message: "This pull request has been automatically marked as stale because it has not had activity in the last 45. It will be closed in 10 days if no further activity occurs. Thank you for your contributions." + days-before-issue-stale: 30 + days-before-pr-stale: 45 + days-before-issue-close: 5 + days-before-pr-close: 10 + stale-issue-label: "stale" + stale-pr-label: "stale" + close-issue-label: "wontfix" + close-pr-label: "wontfix" + any-of-labels: "question" From c30618eb6226f57c224bd3fc28fd7d8a51904ba2 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Fri, 19 Jan 2024 13:03:48 +0000 Subject: [PATCH 23/27] Add workflow_dispatch to stale action --- .github/workflows/stale.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index cdd0640..abde33c 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -3,6 +3,7 @@ name: "Close Stale Issues and PRs" on: schedule: - cron: "0 0 * * *" # Runs every day at midnight + workflow_dispatch: jobs: stale: From e54329c3971c8c397f284a36de6a627832a24263 Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Mon, 22 Jan 2024 12:39:28 +0000 Subject: [PATCH 24/27] Update README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b553889..1fa1698 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ Add the following to packages.yml ```yaml packages: - git: "https://github.com/EqualExperts/dbt-unit-testing" - revision: v0.3.2 + revision: v0.4.12 ``` [read the docs](https://docs.getdbt.com/docs/package-management) for more information on installing packages. From 3b2ffd9017740a2d91acdca6c39431acb3a52f4a Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Mon, 15 Apr 2024 12:06:02 +0100 Subject: [PATCH 25/27] Update dbt versions --- run-tests-helper.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/run-tests-helper.sh b/run-tests-helper.sh index 5e417bc..dbcd5a8 100644 --- a/run-tests-helper.sh +++ b/run-tests-helper.sh @@ -5,11 +5,11 @@ function run_tests() { local PROFILE=$2 if [ "$PROFILE" == "postgres" ]; then - VERSIONS="1.3.4 1.4.6 1.5.4 1.7.4" + VERSIONS="1.3.4 1.4.6 1.5.4 1.7.11" elif [ "$PROFILE" == "bigquery" ]; then - VERSIONS="1.7.2" + VERSIONS="1.7.7" elif [ "$PROFILE" == "snowflake" ]; then - VERSIONS="1.7.1" + VERSIONS="1.7.3" else echo "Invalid profile name: $PROFILE" exit 1 From 7187cfcf903009d300b4dc097af0e4944d9c8aed Mon Sep 17 00:00:00 2001 From: Patrick Ruoff Date: Fri, 9 Feb 2024 14:35:38 +0100 Subject: [PATCH 26/27] FEAT: make sql_from_csv externally callable --- macros/input_parsing.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macros/input_parsing.sql b/macros/input_parsing.sql index 59483a8..b22c25f 100644 --- a/macros/input_parsing.sql +++ b/macros/input_parsing.sql @@ -9,7 +9,7 @@ {% endmacro %} {% macro sql_from_csv(options={}) %} - {{ return (sql_from_csv_input(caller(), options)) }} + {{ return (dbt_unit_testing.sql_from_csv_input(caller(), options)) }} {% endmacro %} {% macro sql_from_csv_input(csv_table, options) %} From 9735b6eb4d056165f187782b11e8afb2dbe4ee1b Mon Sep 17 00:00:00 2001 From: Pedro Sousa Date: Mon, 15 Apr 2024 12:34:43 +0100 Subject: [PATCH 27/27] Update stale workflow to run on the 1st of every month --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index abde33c..30b121a 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -2,7 +2,7 @@ name: "Close Stale Issues and PRs" on: schedule: - - cron: "0 0 * * *" # Runs every day at midnight + - cron: "0 0 1 * *" # 12:00 AM on the 1st of every month workflow_dispatch: jobs: