diff --git a/tests/integration/flows/test_aselite_to_adls.py b/tests/integration/flows/test_aselite_to_adls.py index 48146c293..458bd785f 100644 --- a/tests/integration/flows/test_aselite_to_adls.py +++ b/tests/integration/flows/test_aselite_to_adls.py @@ -15,9 +15,6 @@ TMP_FILE_NAME = "test_flow.csv" MAIN_DF = None -df_task = ASELiteToDF() -file_to_adls_task = AzureDataLakeUpload() - def test_aselite_to_adls(): credentials_secret = PrefectSecret("aselite").run() diff --git a/tests/integration/tasks/test_duckdb.py b/tests/integration/tasks/test_duckdb.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/integration/tasks/test_genesys_task.py b/tests/integration/tasks/test_genesys.py similarity index 100% rename from tests/integration/tasks/test_genesys_task.py rename to tests/integration/tasks/test_genesys.py diff --git a/tests/integration/tasks/test_github.py b/tests/integration/tasks/test_github.py index 171dbfb65..1c5dfbc0a 100644 --- a/tests/integration/tasks/test_github.py +++ b/tests/integration/tasks/test_github.py @@ -2,13 +2,6 @@ from viadot.tasks.github import DownloadGitHubFile -# def test_github_clone_task(): -# clone_repo_task = CloneRepo() -# repo = "fishtown-analytics/dbt" -# repo_name = repo.split("/")[-1] -# clone_repo_task.run(repo=repo) -# assert os.path.exists(repo_name) - def test_download_github_file(): task = DownloadGitHubFile() diff --git a/tests/integration/tasks/test_sqlite_insert.py b/tests/integration/tasks/test_sqlite.py similarity index 100% rename from tests/integration/tasks/test_sqlite_insert.py rename to tests/integration/tasks/test_sqlite.py diff --git a/tests/unit/tasks/test_uk_carbon_intensity.py b/tests/unit/tasks/test_uk_carbon_intensity.py index fb96b8604..fb611e0bc 100644 --- a/tests/unit/tasks/test_uk_carbon_intensity.py +++ b/tests/unit/tasks/test_uk_carbon_intensity.py @@ -1,8 +1,7 @@ import os -import openpyxl import pytest -from openpyxl import load_workbook + from viadot.tasks.open_apis.uk_carbon_intensity import StatsToCSV, StatsToExcel @@ -22,22 +21,3 @@ def ukci_task_excel(): ukci_task_excel = StatsToExcel() yield ukci_task_excel os.remove(TEST_FILE_PATH_EXCEL) - - -# def test_uk_carbon_intensity_to_csv(ukci_task): -# ukci_task.run(path=TEST_FILE_PATH) -# if_exist = os.path.isfile(TEST_FILE_PATH) -# assert if_exist == True - - -# def test_uk_carbon_intensity_to_excel(ukci_task_excel): -# ukci_task_excel.run(path=TEST_FILE_PATH_EXCEL) -# if_exist = os.path.isfile(TEST_FILE_PATH_EXCEL) -# assert if_exist == True - - -# def test_uk_carbon_intensity_to_excel_contain(ukci_task_excel): -# ukci_task_excel.run(path=TEST_FILE_PATH_EXCEL) -# excel_file = load_workbook(TEST_FILE_PATH_EXCEL) -# value = excel_file["A1"].value -# assert value == "from" diff --git a/tests/unit/test_base.py b/tests/unit/test_base.py index c20336b29..f7069e181 100644 --- a/tests/unit/test_base.py +++ b/tests/unit/test_base.py @@ -81,51 +81,3 @@ def test_handle_if_empty(caplog): src._handle_if_empty(if_empty="fail") with pytest.raises(SKIP): src._handle_if_empty(if_empty="skip") - - -# def test_to_csv_append(): -# """Test whether `to_csv()` with the append option writes data of correct shape""" -# driver = "/usr/lib/x86_64-linux-gnu/odbc/libsqlite3odbc.so" -# db_name = "testfile.sqlite" -# server = "localhost" -# source = SQL( -# credentials=dict(driver=driver, db_name=db_name, server=server, user=None) -# ) - -# # Generate test table. -# df = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) -# source.create_table("test", dtypes={"a": "INT", "b": "INT"}, if_exists="replace") -# source.insert_into(TABLE, df) - -# # Write the table to a CSV three times in `append` mode. -# for i in range(3): -# source.to_csv(path=PATH, query="SELECT * FROM test", if_exists="append") - -# # Read the CSV and validate no. of rows and columns. -# out_df = pd.read_csv(PATH, sep="\t") - -# target_length = 3 * df.shape[0] -# target_width = df.shape[0] - -# actual_length = out_df.shape[0] -# actual_width = out_df.shape[1] - -# assert actual_length == target_length and actual_width == target_width - -# # Clean up. -# os.remove(PATH) - - -# GitHub changes the string and makes the test fail -# def test_conn_str(): -# s = SQL( -# driver=CREDENTIALS["driver"], -# server=CREDENTIALS["server"], -# db=CREDENTIALS["db_name"], -# user=CREDENTIALS["user"], -# pw=CREDENTIALS["password"], -# ) -# assert ( -# s.conn_str -# == "DRIVER=ODBC Driver 17 for SQL Server;SERVER=s123.database.windows.net;DATABASE=a-b-c;UID={my_user@example.com};PWD={a123;@4}" -# ) diff --git a/tests/unit/test_duckdb.py b/tests/unit/test_duckdb.py index 4cd5ee0e4..eb3800f82 100644 --- a/tests/unit/test_duckdb.py +++ b/tests/unit/test_duckdb.py @@ -85,7 +85,7 @@ def test_create_table_from_multiple_parquet(duckdb): duckdb.run(f"DROP SCHEMA {SCHEMA}") -def test__check_if_table_exists(duckdb, TEST_PARQUET_FILE_PATH): +def test_check_if_table_exists(duckdb, TEST_PARQUET_FILE_PATH): assert not duckdb._check_if_table_exists(table=TABLE, schema=SCHEMA) duckdb.create_table_from_parquet( schema=SCHEMA, table=TABLE, path=TEST_PARQUET_FILE_PATH diff --git a/tests/unit/test_supermetrics.py b/tests/unit/test_supermetrics.py index 36b1320ba..4dace1bd9 100644 --- a/tests/unit/test_supermetrics.py +++ b/tests/unit/test_supermetrics.py @@ -105,7 +105,7 @@ } -def test___get_col_names_google_analytics_pivoted(): +def test_get_col_names_google_analytics_pivoted(): columns = Supermetrics._get_col_names_google_analytics(response=RESPONSE_PIVOTED) assert columns == [ "Date", @@ -117,6 +117,6 @@ def test___get_col_names_google_analytics_pivoted(): ] -def test___get_col_names_google_analytics_pivoted_no_data(): +def test_get_col_names_google_analytics_pivoted_no_data(): with pytest.raises(ValueError): Supermetrics._get_col_names_google_analytics(response=RESPONSE_PIVOTED_NO_DATA)