diff --git a/.changes/unreleased/Under the Hood-20230830-160616.yaml b/.changes/unreleased/Under the Hood-20230830-160616.yaml new file mode 100644 index 000000000..018a94030 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230830-160616.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Remove dependency on hologram +time: 2023-08-30T16:06:16.444881-07:00 +custom: + Author: colin-rogers-dbt + Issue: "881" diff --git a/dbt/adapters/spark/column.py b/dbt/adapters/spark/column.py index bde49a492..a57fa0565 100644 --- a/dbt/adapters/spark/column.py +++ b/dbt/adapters/spark/column.py @@ -3,13 +3,12 @@ from dbt.adapters.base.column import Column from dbt.dataclass_schema import dbtClassMixin -from hologram import JsonDict Self = TypeVar("Self", bound="SparkColumn") @dataclass -class SparkColumn(dbtClassMixin, Column): # type: ignore +class SparkColumn(dbtClassMixin, Column): table_database: Optional[str] = None table_schema: Optional[str] = None table_name: Optional[str] = None @@ -63,7 +62,7 @@ def convert_table_stats(raw_stats: Optional[str]) -> Dict[str, Any]: table_stats[f"stats:{key}:include"] = True return table_stats - def to_column_dict(self, omit_none: bool = True, validate: bool = False) -> JsonDict: + def to_column_dict(self, omit_none: bool = True, validate: bool = False) -> Dict[str, Any]: original_dict = self.to_dict(omit_none=omit_none) # If there are stats, merge them into the root of the dict original_stats = original_dict.pop("table_stats", None) diff --git a/dbt/adapters/spark/connections.py b/dbt/adapters/spark/connections.py index beb2b2699..966f5584e 100644 --- a/dbt/adapters/spark/connections.py +++ b/dbt/adapters/spark/connections.py @@ -23,7 +23,7 @@ from datetime import datetime import sqlparams from dbt.contracts.connection import Connection -from hologram.helpers import StrEnum +from dbt.dataclass_schema import StrEnum from dataclasses import dataclass, field from typing import Any, Dict, Optional, Union, Tuple, List, Generator, Iterable, Sequence @@ -59,9 +59,10 @@ class SparkConnectionMethod(StrEnum): @dataclass class SparkCredentials(Credentials): - host: str - method: SparkConnectionMethod - database: Optional[str] # type: ignore + host: Optional[str] = None + schema: Optional[str] = None # type: ignore + method: SparkConnectionMethod = None # type: ignore + database: Optional[str] = None # type: ignore driver: Optional[str] = None cluster: Optional[str] = None endpoint: Optional[str] = None @@ -90,6 +91,13 @@ def cluster_id(self) -> Optional[str]: return self.cluster def __post_init__(self) -> None: + if self.method is None: + raise dbt.exceptions.DbtRuntimeError("Must specify `method` in profile") + if self.host is None: + raise dbt.exceptions.DbtRuntimeError("Must specify `host` in profile") + if self.schema is None: + raise dbt.exceptions.DbtRuntimeError("Must specify `schema` in profile") + # spark classifies database and schema as the same thing if self.database is not None and self.database != self.schema: raise dbt.exceptions.DbtRuntimeError( @@ -154,7 +162,7 @@ def type(self) -> str: @property def unique_field(self) -> str: - return self.host + return self.host # type: ignore def _connection_keys(self) -> Tuple[str, ...]: return "host", "port", "cluster", "endpoint", "schema", "organization"