diff --git a/src/sempy_labs/__init__.py b/src/sempy_labs/__init__.py index 97a0bd23..99b0f93e 100644 --- a/src/sempy_labs/__init__.py +++ b/src/sempy_labs/__init__.py @@ -34,7 +34,7 @@ # list_sqlendpoints, # list_tables, list_warehouses, - # list_workspace_role_assignments, + list_workspace_role_assignments, create_warehouse, update_item, ) @@ -113,7 +113,7 @@ #'list_sqlendpoints', #'list_tables', "list_warehouses", - #'list_workspace_role_assignments', + 'list_workspace_role_assignments', "create_warehouse", "update_item", "create_abfss_path", diff --git a/src/sempy_labs/_ai.py b/src/sempy_labs/_ai.py index a1592a45..237a8d11 100644 --- a/src/sempy_labs/_ai.py +++ b/src/sempy_labs/_ai.py @@ -14,6 +14,7 @@ def optimize_semantic_model(dataset: str, workspace: Optional[str] = None): from ._model_bpa import run_model_bpa from .directlake._fallback import check_fallback_reason from ._helper_functions import format_dax_object_name + from .tom import connect_semantic_model modelBPA = run_model_bpa( dataset=dataset, workspace=workspace, return_dataframe=True @@ -78,10 +79,7 @@ def generate_measure_descriptions( validModels = ["gpt-35-turbo", "gpt-35-turbo-16k", "gpt-4"] if gpt_model not in validModels: - print( - f"{icons.red_dot} The '{gpt_model}' model is not a valid model. Enter a gpt_model from this list: {validModels}." - ) - return + raise ValueError(f"{icons.red_dot} The '{gpt_model}' model is not a valid model. Enter a gpt_model from this list: {validModels}.") dfM = fabric.list_measures(dataset=dataset, workspace=workspace) @@ -116,8 +114,8 @@ def generate_measure_descriptions( ) # Update the model to use the new descriptions - tom_server = fabric.create_tom_server(readonly=False, workspace=workspace) - m = tom_server.Databases.GetByName(dataset).Model + #with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom: + # for t in m.Tables: # tName = t.Name @@ -173,48 +171,33 @@ def generate_aggs( numericTypes = ["Int64", "Double", "Decimal"] if any(value not in aggTypes for value in columns.values()): - print( - f"{icons.red_dot} Invalid aggregation type(s) have been specified in the 'columns' parameter. Valid aggregation types: {aggTypes}." - ) - return + raise ValueError(f"{icons.red_dot} Invalid aggregation type(s) have been specified in the 'columns' parameter. Valid aggregation types: {aggTypes}.") dfC = fabric.list_columns(dataset=dataset, workspace=workspace) dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) dfM = fabric.list_measures(dataset=dataset, workspace=workspace) dfR = fabric.list_relationships(dataset=dataset, workspace=workspace) if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()): - print( - f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode. This function is only relevant for Direct Lake semantic models." - ) - return - + raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode. This function is only relevant for Direct Lake semantic models.") + dfC_filtT = dfC[dfC["Table Name"] == table_name] if len(dfC_filtT) == 0: - print( - f"{icons.red_dot} The '{table_name}' table does not exist in the '{dataset}' semantic model within the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{table_name}' table does not exist in the '{dataset}' semantic model within the '{workspace}' workspace.") dfC_filt = dfC[ (dfC["Table Name"] == table_name) & (dfC["Column Name"].isin(columnValues)) ] if len(columns) != len(dfC_filt): - print( - f"{icons.red_dot} Columns listed in '{columnValues}' do not exist in the '{table_name}' table in the '{dataset}' semantic model within the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} Columns listed in '{columnValues}' do not exist in the '{table_name}' table in the '{dataset}' semantic model within the '{workspace}' workspace.") # Check if doing sum/count/min/max etc. on a non-number column for col, agg in columns.items(): dfC_col = dfC_filt[dfC_filt["Column Name"] == col] dataType = dfC_col["Data Type"].iloc[0] if agg in aggTypesAggregate and dataType not in numericTypes: - print( - f"{icons.red_dot} The '{col}' column in the '{table_name}' table is of '{dataType}' data type. Only columns of '{numericTypes}' data types can be aggregated as '{aggTypesAggregate}' aggregation types." - ) - return + raise ValueError(f"{icons.red_dot} The '{col}' column in the '{table_name}' table is of '{dataType}' data type. Only columns of '{numericTypes}' data types can be aggregated as '{aggTypesAggregate}' aggregation types.") # Create/update lakehouse delta agg table aggSuffix = "_agg" @@ -230,10 +213,7 @@ def generate_aggs( dfI_filt = dfI[(dfI["Id"] == sqlEndpointId)] if len(dfI_filt) == 0: - print( - f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter." - ) - return + raise ValueError(f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter.") lakehouseName = dfI_filt["Display Name"].iloc[0] lakehouse_id = resolve_lakehouse_id( @@ -284,7 +264,7 @@ def generate_aggs( # Create/update semantic model agg table tom_server = fabric.create_tom_server(readonly=False, workspace=workspace) m = tom_server.Databases.GetByName(dataset).Model - f"\n{icons.in_progress} Updating the '{dataset}' semantic model..." + print(f"\n{icons.in_progress} Updating the '{dataset}' semantic model...") dfC_agg = dfC[dfC["Table Name"] == aggTableName] if len(dfC_agg) == 0: diff --git a/src/sempy_labs/_clear_cache.py b/src/sempy_labs/_clear_cache.py index dff9fcbf..6aeced12 100644 --- a/src/sempy_labs/_clear_cache.py +++ b/src/sempy_labs/_clear_cache.py @@ -20,9 +20,7 @@ def clear_cache(dataset: str, workspace: Optional[str] = None): or if no lakehouse attached, resolves to the workspace of the notebook. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) datasetID = resolve_dataset_id(dataset=dataset, workspace=workspace) diff --git a/src/sempy_labs/_connections.py b/src/sempy_labs/_connections.py index de310ee6..f2f1baf0 100644 --- a/src/sempy_labs/_connections.py +++ b/src/sempy_labs/_connections.py @@ -60,16 +60,16 @@ def create_connection_cloud( if response.status_code == 200: o = response.json() new_data = { - "Connection Id": o["id"], - "Connection Name": o["name"], - "Connectivity Type": o["connectivityType"], - "Connection Type": o["connectionDetails"]["type"], - "Connection Path": o["connectionDetails"]["path"], - "Privacy Level": o["privacyLevel"], - "Credential Type": o["credentialDetails"]["credentialType"], - "Single Sign On Type": o["credentialDetails"]["singleSignOnType"], - "Connection Encryption": o["credentialDetails"]["connectionEncryption"], - "Skip Test Connection": o["credentialDetails"]["skipTestConnection"], + "Connection Id": o.get("id"), + "Connection Name": o.get("name"), + "Connectivity Type": o.get("connectivityType"), + "Connection Type": o.get("connectionDetails").get("type"), + "Connection Path": o.get("connectionDetails").get("path"), + "Privacy Level": o.get("privacyLevel"), + "Credential Type": o.get("credentialDetails").get("credentialType"), + "Single Sign On Type": o.get("credentialDetails").get("singleSignOnType"), + "Connection Encryption": o.get("credentialDetails").get("connectionEncryption"), + "Skip Test Connection": o.get("credentialDetails").get("skipTestConnection"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -135,17 +135,17 @@ def create_connection_on_prem( if response.status_code == 200: o = response.json() new_data = { - "Connection Id": o["id"], - "Connection Name": o["name"], - "Gateway ID": o["gatewayId"], - "Connectivity Type": o["connectivityType"], - "Connection Type": o["connectionDetails"]["type"], - "Connection Path": o["connectionDetails"]["path"], - "Privacy Level": o["privacyLevel"], - "Credential Type": o["credentialDetails"]["credentialType"], - "Single Sign On Type": o["credentialDetails"]["singleSignOnType"], - "Connection Encryption": o["credentialDetails"]["connectionEncryption"], - "Skip Test Connection": o["credentialDetails"]["skipTestConnection"], + "Connection Id": o.get("id"), + "Connection Name": o.get("name"), + "Gateway ID": o.get("gatewayId"), + "Connectivity Type": o.get("connectivityType"), + "Connection Type": o.get("connectionDetails").get("type"), + "Connection Path": o.get("connectionDetails").get("path"), + "Privacy Level": o.get("privacyLevel"), + "Credential Type": o.get("credentialDetails").get("credentialType"), + "Single Sign On Type": o.get("credentialDetails").get("singleSignOnType"), + "Connection Encryption": o.get("credentialDetails").get("connectionEncryption"), + "Skip Test Connection": o.get("credentialDetails").get("skipTestConnection"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -213,17 +213,17 @@ def create_connection_vnet( if response.status_code == 200: o = response.json() new_data = { - "Connection Id": o["id"], - "Connection Name": o["name"], - "Gateway ID": o["gatewayId"], - "Connectivity Type": o["connectivityType"], - "Connection Type": o["connectionDetails"]["type"], - "Connection Path": o["connectionDetails"]["path"], - "Privacy Level": o["privacyLevel"], - "Credential Type": o["credentialDetails"]["credentialType"], - "Single Sign On Type": o["credentialDetails"]["singleSignOnType"], - "Connection Encryption": o["credentialDetails"]["connectionEncryption"], - "Skip Test Connection": o["credentialDetails"]["skipTestConnection"], + "Connection Id": o.get("id"), + "Connection Name": o.get("name"), + "Gateway ID": o.get("gatewayId"), + "Connectivity Type": o.get("connectivityType"), + "Connection Type": o.get("connectionDetails").get("type"), + "Connection Path": o.get("connectionDetails").get("path"), + "Privacy Level": o.get("privacyLevel"), + "Credential Type": o.get("credentialDetails").get("credentialType"), + "Single Sign On Type": o.get("credentialDetails").get("singleSignOnType"), + "Connection Encryption": o.get("credentialDetails").get("connectionEncryption"), + "Skip Test Connection": o.get("credentialDetails").get("skipTestConnection"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) diff --git a/src/sempy_labs/_generate_semantic_model.py b/src/sempy_labs/_generate_semantic_model.py index 54d2c566..aace2e90 100644 --- a/src/sempy_labs/_generate_semantic_model.py +++ b/src/sempy_labs/_generate_semantic_model.py @@ -32,12 +32,10 @@ def create_blank_semantic_model( """ if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name() if compatibility_level < 1500: - print(f"{icons.red_dot} Compatiblity level must be at least 1500.") - return + raise ValueError(f"{icons.red_dot} Compatiblity level must be at least 1500.") tmsl = f""" {{ @@ -90,10 +88,7 @@ def create_semantic_model_from_bim( dfI_filt = dfI[(dfI["Display Name"] == dataset)] if len(dfI_filt) > 0: - print( - f"WARNING: '{dataset}' already exists as a semantic model in the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} '{dataset}' already exists as a semantic model in the '{workspace}' workspace.") client = fabric.FabricRestClient() defPBIDataset = {"version": "1.0", "settings": {}} @@ -131,7 +126,7 @@ def conv_b64(file): if response.status_code == 201: print( - f"The '{dataset}' semantic model has been created within the '{workspace}' workspace." + f"{icons.green_dot} The '{dataset}' semantic model has been created within the '{workspace}' workspace." ) print(response.json()) elif response.status_code == 202: @@ -144,7 +139,7 @@ def conv_b64(file): response_body = json.loads(response.content) response = client.get(f"/v1/operations/{operationId}/result") print( - f"The '{dataset}' semantic model has been created within the '{workspace}' workspace." + f"{icons.green_dot} The '{dataset}' semantic model has been created within the '{workspace}' workspace." ) print(response.json()) @@ -178,9 +173,7 @@ def deploy_semantic_model( """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) if new_dataset_workspace is None: new_dataset_workspace = workspace diff --git a/src/sempy_labs/_helper_functions.py b/src/sempy_labs/_helper_functions.py index 5e6aed45..ba95d555 100644 --- a/src/sempy_labs/_helper_functions.py +++ b/src/sempy_labs/_helper_functions.py @@ -420,16 +420,10 @@ def save_as_delta_table( write_mode = write_mode.lower() if write_mode not in writeModes: - print( - f"{icons.red_dot} Invalid 'write_type' parameter. Choose from one of the following values: {writeModes}." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'write_type' parameter. Choose from one of the following values: {writeModes}.") if " " in delta_table_name: - print( - f"{icons.red_dot} Invalid 'delta_table_name'. Delta tables in the lakehouse cannot have spaces in their names." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'delta_table_name'. Delta tables in the lakehouse cannot have spaces in their names.") dataframe.columns = dataframe.columns.str.replace(" ", "_") @@ -476,10 +470,7 @@ def language_validate(language: str): elif len(df_filt2) == 1: lang = df_filt2["Language"].iloc[0] else: - print( - f"The '{language}' language is not a valid language code. Please refer to this link for a list of valid language codes: {url}." - ) - return + raise ValueError(f"{icons.red_dot} The '{language}' language is not a valid language code. Please refer to this link for a list of valid language codes: {url}.") return lang diff --git a/src/sempy_labs/_list_functions.py b/src/sempy_labs/_list_functions.py index 53163e0c..c6538f14 100644 --- a/src/sempy_labs/_list_functions.py +++ b/src/sempy_labs/_list_functions.py @@ -9,6 +9,7 @@ import json, time from pyspark.sql import SparkSession from typing import Optional +import sempy_labs._icons as icons def get_object_level_security(dataset: str, workspace: Optional[str] = None): """ @@ -29,44 +30,44 @@ def get_object_level_security(dataset: str, workspace: Optional[str] = None): A pandas dataframe showing the object level security for the semantic model. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) - - tom_server = fabric.create_tom_server(readonly=True, workspace=workspace) - m = tom_server.Databases.GetByName(dataset).Model + from .tom import connect_semantic_model + if workspace is None: + workspace = fabric.resolve_workspace_name() + df = pd.DataFrame(columns=["Role Name", "Object Type", "Table Name", "Object Name"]) - for r in m.Roles: - for tp in r.TablePermissions: - if len(tp.FilterExpression) == 0: - columnCount = len(tp.ColumnPermissions) - objectType = "Table" - if columnCount == 0: - new_data = { - "Role Name": r.Name, - "Object Type": objectType, - "Table Name": tp.Name, - "Object Name": tp.Name, - } - df = pd.concat( - [df, pd.DataFrame(new_data, index=[0])], ignore_index=True - ) - else: - objectType = "Column" - for cp in tp.ColumnPermissions: + with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom: + + for r in tom.model.Roles: + for tp in r.TablePermissions: + if len(tp.FilterExpression) == 0: + columnCount = len(tp.ColumnPermissions) + objectType = "Table" + if columnCount == 0: new_data = { "Role Name": r.Name, "Object Type": objectType, "Table Name": tp.Name, - "Object Name": cp.Name, + "Object Name": tp.Name, } df = pd.concat( [df, pd.DataFrame(new_data, index=[0])], ignore_index=True ) + else: + objectType = "Column" + for cp in tp.ColumnPermissions: + new_data = { + "Role Name": r.Name, + "Object Type": objectType, + "Table Name": tp.Name, + "Object Name": cp.Name, + } + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) - return df + return df def list_tables(dataset: str, workspace: Optional[str] = None): @@ -88,12 +89,10 @@ def list_tables(dataset: str, workspace: Optional[str] = None): A pandas dataframe showing the semantic model's tables and their properties. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + from .tom import connect_semantic_model - tom_server = fabric.create_tom_server(readonly=True, workspace=workspace) - m = tom_server.Databases.GetByName(dataset).Model + if workspace is None: + workspace = fabric.resolve_workspace_name() df = pd.DataFrame( columns=[ @@ -107,32 +106,34 @@ def list_tables(dataset: str, workspace: Optional[str] = None): ] ) - for t in m.Tables: - tableType = "Table" - rPolicy = bool(t.RefreshPolicy) - sourceExpression = None - if str(t.CalculationGroup) != "None": - tableType = "Calculation Group" - else: - for p in t.Partitions: - if str(p.SourceType) == "Calculated": - tableType = "Calculated Table" + with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom: - if rPolicy: - sourceExpression = t.RefreshPolicy.SourceExpression + for t in tom.model.Tables: + tableType = "Table" + rPolicy = bool(t.RefreshPolicy) + sourceExpression = None + if str(t.CalculationGroup) != "None": + tableType = "Calculation Group" + else: + for p in t.Partitions: + if str(p.SourceType) == "Calculated": + tableType = "Calculated Table" - new_data = { - "Name": t.Name, - "Type": tableType, - "Hidden": t.IsHidden, - "Data Category": t.DataCategory, - "Description": t.Description, - "Refresh Policy": rPolicy, - "Source Expression": sourceExpression, - } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + if rPolicy: + sourceExpression = t.RefreshPolicy.SourceExpression - return df + new_data = { + "Name": t.Name, + "Type": tableType, + "Hidden": t.IsHidden, + "Data Category": t.DataCategory, + "Description": t.Description, + "Refresh Policy": rPolicy, + "Source Expression": sourceExpression, + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + return df def list_annotations(dataset: str, workspace: Optional[str] = None): @@ -154,12 +155,10 @@ def list_annotations(dataset: str, workspace: Optional[str] = None): A pandas dataframe showing the semantic model's annotations and their properties. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + from .tom import connect_semantic_model - tom_server = fabric.create_tom_server(readonly=True, workspace=workspace) - m = tom_server.Databases.GetByName(dataset).Model + if workspace is None: + workspace = fabric.resolve_workspace_name() df = pd.DataFrame( columns=[ @@ -171,183 +170,185 @@ def list_annotations(dataset: str, workspace: Optional[str] = None): ] ) - mName = m.Name - for a in m.Annotations: - objectType = "Model" - aName = a.Name - aValue = a.Value - new_data = { - "Object Name": mName, - "Parent Object Name": "N/A", - "Object Type": objectType, - "Annotation Name": aName, - "Annotation Value": aValue, - } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - for t in m.Tables: - objectType = "Table" - tName = t.Name - for ta in t.Annotations: - taName = ta.Name - taValue = ta.Value + with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom: + + mName = tom.model.Name + for a in tom.model.Annotations: + objectType = "Model" + aName = a.Name + aValue = a.Value new_data = { - "Object Name": tName, - "Parent Object Name": mName, + "Object Name": mName, + "Parent Object Name": "N/A", "Object Type": objectType, - "Annotation Name": taName, - "Annotation Value": taValue, + "Annotation Name": aName, + "Annotation Value": aValue, } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - for p in t.Partitions: - pName = p.Name - objectType = "Partition" - for pa in p.Annotations: - paName = pa.Name - paValue = pa.Value + for t in tom.model.Tables: + objectType = "Table" + tName = t.Name + for ta in t.Annotations: + taName = ta.Name + taValue = ta.Value new_data = { - "Object Name": pName, - "Parent Object Name": tName, + "Object Name": tName, + "Parent Object Name": mName, "Object Type": objectType, - "Annotation Name": paName, - "Annotation Value": paValue, + "Annotation Name": taName, + "Annotation Value": taValue, } - df = pd.concat( - [df, pd.DataFrame(new_data, index=[0])], ignore_index=True - ) - for c in t.Columns: - objectType = "Column" - cName = c.Name - for ca in c.Annotations: - caName = ca.Name - caValue = ca.Value + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + for p in t.Partitions: + pName = p.Name + objectType = "Partition" + for pa in p.Annotations: + paName = pa.Name + paValue = pa.Value + new_data = { + "Object Name": pName, + "Parent Object Name": tName, + "Object Type": objectType, + "Annotation Name": paName, + "Annotation Value": paValue, + } + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) + for c in t.Columns: + objectType = "Column" + cName = c.Name + for ca in c.Annotations: + caName = ca.Name + caValue = ca.Value + new_data = { + "Object Name": cName, + "Parent Object Name": tName, + "Object Type": objectType, + "Annotation Name": caName, + "Annotation Value": caValue, + } + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) + for ms in t.Measures: + objectType = "Measure" + measName = ms.Name + for ma in ms.Annotations: + maName = ma.Name + maValue = ma.Value + new_data = { + "Object Name": measName, + "Parent Object Name": tName, + "Object Type": objectType, + "Annotation Name": maName, + "Annotation Value": maValue, + } + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) + for h in t.Hierarchies: + objectType = "Hierarchy" + hName = h.Name + for ha in h.Annotations: + haName = ha.Name + haValue = ha.Value + new_data = { + "Object Name": hName, + "Parent Object Name": tName, + "Object Type": objectType, + "Annotation Name": haName, + "Annotation Value": haValue, + } + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) + for d in tom.model.DataSources: + dName = d.Name + objectType = "Data Source" + for da in d.Annotations: + daName = da.Name + daValue = da.Value new_data = { - "Object Name": cName, - "Parent Object Name": tName, + "Object Name": dName, + "Parent Object Name": mName, "Object Type": objectType, - "Annotation Name": caName, - "Annotation Value": caValue, + "Annotation Name": daName, + "Annotation Value": daValue, } - df = pd.concat( - [df, pd.DataFrame(new_data, index=[0])], ignore_index=True - ) - for ms in t.Measures: - objectType = "Measure" - measName = ms.Name - for ma in ms.Annotations: - maName = ma.Name - maValue = ma.Value + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + for r in tom.model.Relationships: + rName = r.Name + objectType = "Relationship" + for ra in r.Annotations: + raName = ra.Name + raValue = ra.Value new_data = { - "Object Name": measName, - "Parent Object Name": tName, + "Object Name": rName, + "Parent Object Name": mName, "Object Type": objectType, - "Annotation Name": maName, - "Annotation Value": maValue, + "Annotation Name": raName, + "Annotation Value": raValue, } - df = pd.concat( - [df, pd.DataFrame(new_data, index=[0])], ignore_index=True - ) - for h in t.Hierarchies: - objectType = "Hierarchy" - hName = h.Name - for ha in h.Annotations: - haName = ha.Name - haValue = ha.Value + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + for cul in tom.model.Cultures: + culName = cul.Name + objectType = "Translation" + for cula in cul.Annotations: + culaName = cula.Name + culaValue = cula.Value new_data = { - "Object Name": hName, - "Parent Object Name": tName, + "Object Name": culName, + "Parent Object Name": mName, "Object Type": objectType, - "Annotation Name": haName, - "Annotation Value": haValue, + "Annotation Name": culaName, + "Annotation Value": culaValue, } - df = pd.concat( - [df, pd.DataFrame(new_data, index=[0])], ignore_index=True - ) - for d in m.DataSources: - dName = d.Name - objectType = "Data Source" - for da in d.Annotations: - daName = da.Name - daValue = da.Value - new_data = { - "Object Name": dName, - "Parent Object Name": mName, - "Object Type": objectType, - "Annotation Name": daName, - "Annotation Value": daValue, - } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - for r in m.Relationships: - rName = r.Name - objectType = "Relationship" - for ra in r.Annotations: - raName = ra.Name - raValue = ra.Value - new_data = { - "Object Name": rName, - "Parent Object Name": mName, - "Object Type": objectType, - "Annotation Name": raName, - "Annotation Value": raValue, - } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - for cul in m.Cultures: - culName = cul.Name - objectType = "Translation" - for cula in cul.Annotations: - culaName = cula.Name - culaValue = cula.Value - new_data = { - "Object Name": culName, - "Parent Object Name": mName, - "Object Type": objectType, - "Annotation Name": culaName, - "Annotation Value": culaValue, - } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - for e in m.Expressions: - eName = e.Name - objectType = "Expression" - for ea in e.Annotations: - eaName = ea.Name - eaValue = ea.Value - new_data = { - "Object Name": eName, - "Parent Object Name": mName, - "Object Type": objectType, - "Annotation Name": eaName, - "Annotation Value": eaValue, - } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - for per in m.Perspectives: - perName = per.Name - objectType = "Perspective" - for pera in per.Annotations: - peraName = pera.Name - peraValue = pera.Value - new_data = { - "Object Name": perName, - "Parent Object Name": mName, - "Object Type": objectType, - "Annotation Name": peraName, - "Annotation Value": peraValue, - } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - for rol in m.Roles: - rolName = rol.Name - objectType = "Role" - for rola in rol.Annotations: - rolaName = rola.Name - rolaValue = rola.Value - new_data = { - "Object Name": rolName, - "Parent Object Name": mName, - "Object Type": objectType, - "Annotation Name": rolaName, - "Annotation Value": rolaValue, - } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + for e in tom.model.Expressions: + eName = e.Name + objectType = "Expression" + for ea in e.Annotations: + eaName = ea.Name + eaValue = ea.Value + new_data = { + "Object Name": eName, + "Parent Object Name": mName, + "Object Type": objectType, + "Annotation Name": eaName, + "Annotation Value": eaValue, + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + for per in tom.model.Perspectives: + perName = per.Name + objectType = "Perspective" + for pera in per.Annotations: + peraName = pera.Name + peraValue = pera.Value + new_data = { + "Object Name": perName, + "Parent Object Name": mName, + "Object Type": objectType, + "Annotation Name": peraName, + "Annotation Value": peraValue, + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + for rol in tom.model.Roles: + rolName = rol.Name + objectType = "Role" + for rola in rol.Annotations: + rolaName = rola.Name + rolaValue = rola.Value + new_data = { + "Object Name": rolName, + "Parent Object Name": mName, + "Object Type": objectType, + "Annotation Name": rolaName, + "Annotation Value": rolaValue, + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - return df + return df def list_columns( @@ -385,8 +386,7 @@ def list_columns( ) if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name() dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) @@ -493,24 +493,15 @@ def list_dashboards(workspace: Optional[str] = None): response = client.get(f"/v1.0/myorg/groups/{workspace_id}/dashboards") for v in response.json()["value"]: - dashboardID = v["id"] - displayName = v["displayName"] - isReadOnly = v["isReadOnly"] - webURL = v["webUrl"] - embedURL = v["embedUrl"] - dataClass = v["dataClassification"] - users = v["users"] - subs = v["subscriptions"] - new_data = { - "Dashboard ID": dashboardID, - "Dashboard Name": displayName, - "Read Only": isReadOnly, - "Web URL": webURL, - "Embed URL": embedURL, - "Data Classification": dataClass, - "Users": [users], - "Subscriptions": [subs], + "Dashboard ID": v.get("id"), + "Dashboard Name": v.get("displayName"), + "Read Only": v.get("isReadOnly"), + "Web URL": v.get("webUrl"), + "Embed URL": v.get("embedUrl"), + "Data Classification": v.get("dataClassification"), + "Users": [v.get("users")], + "Subscriptions": [v.get("subscriptions")], } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -554,27 +545,19 @@ def list_lakehouses(workspace: Optional[str] = None): client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/lakehouses/") - for v in response.json()["value"]: - lakehouseId = v["id"] - lakehouseName = v["displayName"] - lakehouseDesc = v["description"] - prop = v["properties"] - oneLakeTP = prop["oneLakeTablesPath"] - oneLakeFP = prop["oneLakeFilesPath"] - sqlEPProp = prop["sqlEndpointProperties"] - sqlEPCS = sqlEPProp["connectionString"] - sqlepid = sqlEPProp["id"] - sqlepstatus = sqlEPProp["provisioningStatus"] + for v in response.json()["value"]: + prop = v.get("properties") + sqlEPProp = prop.get("sqlEndpointProperties") new_data = { - "Lakehouse Name": lakehouseName, - "Lakehouse ID": lakehouseId, - "Description": lakehouseDesc, - "OneLake Tables Path": oneLakeTP, - "OneLake Files Path": oneLakeFP, - "SQL Endpoint Connection String": sqlEPCS, - "SQL Endpoint ID": sqlepid, - "SQL Endpoint Provisioning Status": sqlepstatus, + "Lakehouse Name": v.get("displayName"), + "Lakehouse ID": v.get("id"), + "Description": v.get("description"), + "OneLake Tables Path": prop.get("oneLakeTablesPath"), + "OneLake Files Path": prop.get("oneLakeFilesPath"), + "SQL Endpoint Connection String": sqlEPProp.get("connectionString"), + "SQL Endpoint ID": sqlEPProp.get("id"), + "SQL Endpoint Provisioning Status": sqlEPProp.get("provisioningStatus"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -614,22 +597,16 @@ def list_warehouses(workspace: Optional[str] = None): client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/warehouses/") - for v in response.json()["value"]: - warehouse_id = v["id"] - warehouse_name = v["displayName"] - desc = v["description"] - prop = v["properties"] - connInfo = prop["connectionInfo"] - createdDate = prop["createdDate"] - lastUpdate = prop["lastUpdatedTime"] + for v in response.json()["value"]: + prop = v.get("properties") new_data = { - "Warehouse Name": warehouse_name, - "Warehouse ID": warehouse_id, - "Description": desc, - "Connection Info": connInfo, - "Created Date": createdDate, - "Last Updated Time": lastUpdate, + "Warehouse Name": v.get("displayName"), + "Warehouse ID": v.get("id"), + "Description": v.get("description"), + "Connection Info": prop.get("connectionInfo"), + "Created Date": prop.get("createdDate"), + "Last Updated Time": prop.get("lastUpdatedTime"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -661,14 +638,11 @@ def list_sqlendpoints(workspace: Optional[str] = None): response = client.get(f"/v1/workspaces/{workspace_id}/sqlEndpoints/") for v in response.json()["value"]: - sql_id = v["id"] - lake_name = v["displayName"] - desc = v["description"] new_data = { - "SQL Endpoint ID": sql_id, - "SQL Endpoint Name": lake_name, - "Description": desc, + "SQL Endpoint ID": v.get("id"), + "SQL Endpoint Name": v.get("displayName"), + "Description": v.get("description"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -701,15 +675,12 @@ def list_mirroredwarehouses(workspace: Optional[str] = None): client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/mirroredWarehouses/") - for v in response.json()["value"]: - mirr_id = v["id"] - dbname = v["displayName"] - desc = v["description"] + for v in response.json()["value"]: new_data = { - "Mirrored Warehouse": dbname, - "Mirrored Warehouse ID": mirr_id, - "Description": desc, + "Mirrored Warehouse": v.get("displayName"), + "Mirrored Warehouse ID": v.get("id"), + "Description": v.get("description"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -750,24 +721,17 @@ def list_kqldatabases(workspace: Optional[str] = None): client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/kqlDatabases/") - for v in response.json()["value"]: - kql_id = v["id"] - kql_name = v["displayName"] - desc = v["description"] - prop = v["properties"] - eventId = prop["parentEventhouseItemId"] - qsURI = prop["queryServiceUri"] - isURI = prop["ingestionServiceUri"] - dbType = prop["kustoDatabaseType"] + for v in response.json()["value"]: + prop = v.get("properties") new_data = { - "KQL Database Name": kql_name, - "KQL Database ID": kql_id, - "Description": desc, - "Parent Eventhouse Item ID": eventId, - "Query Service URI": qsURI, - "Ingestion Service URI": isURI, - "Kusto Database Type": dbType, + "KQL Database Name": v.get("displayName"), + "KQL Database ID": v.get("id"), + "Description": v.get("description"), + "Parent Eventhouse Item ID": prop.get("parentEventhouseItemId"), + "Query Service URI": prop.get("queryServiceUri"), + "Ingestion Service URI": prop.get("ingestionServiceUri"), + "Kusto Database Type": prop.get("kustoDatabaseType"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -799,14 +763,11 @@ def list_kqlquerysets(workspace: Optional[str] = None): response = client.get(f"/v1/workspaces/{workspace_id}/kqlQuerysets/") for v in response.json()["value"]: - kql_id = v["id"] - kql_name = v["displayName"] - desc = v["description"] new_data = { - "KQL Queryset Name": kql_name, - "KQL Queryset ID": kql_id, - "Description": desc, + "KQL Queryset Name": v.get("displayName"), + "KQL Queryset ID": v.get("id"), + "Description": v.get("description"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -955,14 +916,11 @@ def list_mlexperiments(workspace: Optional[str] = None): response = client.get(f"/v1/workspaces/{workspace_id}/mlExperiments/") for v in response.json()["value"]: - model_id = v["id"] - modelName = v["displayName"] - desc = v["description"] new_data = { - "ML Experiment Name": modelName, - "ML Experiment ID": model_id, - "Description": desc, + "ML Experiment Name": v.get("displayName"), + "ML Experiment ID": v.get("id"), + "Description": v.get("description"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -994,14 +952,11 @@ def list_datamarts(workspace: Optional[str] = None): response = client.get(f"/v1/workspaces/{workspace_id}/datamarts/") for v in response.json()["value"]: - model_id = v["id"] - modelName = v["displayName"] - desc = v["description"] new_data = { - "Datamart Name": modelName, - "Datamart ID": model_id, - "Description": desc, + "Datamart Name": v.get("displayName"), + "Datamart ID": v.get("id"), + "Description": v.get("description"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -1044,7 +999,7 @@ def create_warehouse( if response.status_code == 201: print( - f"The '{warehouse}' warehouse has been created within the '{workspace}' workspace." + f"{icons.green_dot} The '{warehouse}' warehouse has been created within the '{workspace}' workspace." ) elif response.status_code == 202: operationId = response.headers["x-ms-operation-id"] @@ -1056,13 +1011,10 @@ def create_warehouse( response_body = json.loads(response.content) response = client.get(f"/v1/operations/{operationId}/result") print( - f"The '{warehouse}' warehouse has been created within the '{workspace}' workspace." + f"{icons.green_dot} The '{warehouse}' warehouse has been created within the '{workspace}' workspace." ) else: - print( - f"ERROR: Failed to create the '{warehouse}' warehouse within the '{workspace}' workspace." - ) - + raise ValueError(f"{icons.red_dot} Failed to create the '{warehouse}' warehouse within the '{workspace}' workspace.") def update_item( item_type: str, @@ -1107,19 +1059,15 @@ def update_item( item_type = item_type.replace(" ", "").capitalize() if item_type not in itemTypes.keys(): - print(f"The '{item_type}' is not a valid item type. ") - return - + raise ValueError(f"{icons.red_dot} The '{item_type}' is not a valid item type. ") + itemType = itemTypes[item_type] dfI = fabric.list_items(workspace=workspace, type=item_type) dfI_filt = dfI[(dfI["Display Name"] == current_name)] if len(dfI_filt) == 0: - print( - f"The '{current_name}' {item_type} does not exist within the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{current_name}' {item_type} does not exist within the '{workspace}' workspace.") itemId = dfI_filt["Id"].iloc[0] @@ -1135,17 +1083,14 @@ def update_item( if response.status_code == 200: if description is None: print( - f"The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}'" + f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}'" ) else: print( - f"The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}' and have a description of '{description}'" + f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}' and have a description of '{description}'" ) else: - print( - f"ERROR: The '{current_name}' {item_type} within the '{workspace}' workspace was not updateds." - ) - + raise ValueError(f"{icons.red_dot}: The '{current_name}' {item_type} within the '{workspace}' workspace was not updateds.") def list_relationships( dataset: str, workspace: Optional[str] = None, extended: Optional[bool] = False @@ -1171,8 +1116,7 @@ def list_relationships( """ if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name() dfR = fabric.list_relationships(dataset=dataset, workspace=workspace) @@ -1254,14 +1198,11 @@ def list_dataflow_storage_accounts(): response = client.get(f"/v1.0/myorg/dataflowStorageAccounts") for v in response.json()["value"]: - dfsaId = v["id"] - dfsaName = v["name"] - isEnabled = v["isEnabled"] new_data = { - "Dataflow Storage Account ID": dfsaId, - "Dataflow Storage Account Name": dfsaName, - "Enabled": isEnabled, + "Dataflow Storage Account ID": v.get("id"), + "Dataflow Storage Account Name": v.get("name"), + "Enabled": v.get("isEnabled"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -1359,10 +1300,10 @@ def list_workspace_role_assignments(workspace: Optional[str] = None): response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments") for i in response.json()["value"]: - user_name = i["principal"]["displayName"] - role_name = i["role"] - user_email = i["principal"]["userDetails"]["userPrincipalName"] - user_type = i["principal"]["type"] + user_name = i.get("principal").get("displayName") + role_name = i.get("role") + user_email = i.get("principal").get("userDetails").get("userPrincipalName") + user_type = i.get("principal").get("type") new_data = { "User Name": user_name, @@ -1585,8 +1526,8 @@ def list_shortcuts( ) if response.status_code == 200: for s in response.json()["value"]: - shortcutName = s["name"] - shortcutPath = s["path"] + shortcutName = s.get("name") + shortcutPath = s.get("path") source = list(s["target"].keys())[0] ( sourceLakehouseName, @@ -1597,17 +1538,17 @@ def list_shortcuts( subpath, ) = (None, None, None, None, None, None) if source == "oneLake": - sourceLakehouseId = s["target"][source]["itemId"] - sourcePath = s["target"][source]["path"] - sourceWorkspaceId = s["target"][source]["workspaceId"] + sourceLakehouseId = s.get("target").get(source).get("itemId") + sourcePath = s.get("target").get(source).get("path") + sourceWorkspaceId = s.get("target").get(source).get("workspaceId") sourceWorkspaceName = fabric.resolve_workspace_name(sourceWorkspaceId) sourceLakehouseName = resolve_lakehouse_name( sourceLakehouseId, sourceWorkspaceName ) else: - connectionId = s["target"][source]["connectionId"] - location = s["target"][source]["location"] - subpath = s["target"][source]["subpath"] + connectionId = s.get("target").get(source).get("connectionId") + location = s.get("target").get(source).get("location") + subpath = s.get("target").get(source).get("subpath") new_data = { "Shortcut Name": shortcutName, diff --git a/src/sempy_labs/_model_auto_build.py b/src/sempy_labs/_model_auto_build.py index f8f9e9e3..ef058388 100644 --- a/src/sempy_labs/_model_auto_build.py +++ b/src/sempy_labs/_model_auto_build.py @@ -41,9 +41,7 @@ def model_auto_build( """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) if lakehouse_workspace is None: lakehouse_workspace = workspace @@ -60,7 +58,7 @@ def model_auto_build( create_blank_semantic_model(dataset=dataset, workspace=workspace) - with connect_semantic_model(dataset=dataset, workspace=workspace) as tom: + with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom: # DL Only expr = get_shared_expression(lakehouse=lakehouse, workspace=lakehouse_workspace) diff --git a/src/sempy_labs/_model_bpa.py b/src/sempy_labs/_model_bpa.py index 87950d09..ed08d38f 100644 --- a/src/sempy_labs/_model_bpa.py +++ b/src/sempy_labs/_model_bpa.py @@ -11,7 +11,7 @@ from sempy_labs.lakehouse._lakehouse import lakehouse_attached from typing import List, Optional, Union from sempy._utils._log import log - +import sempy_labs._icons as icons def model_bpa_rules(): """ @@ -744,9 +744,7 @@ def run_model_bpa( message="This pattern is interpreted as a regular expression, and has match groups.", ) - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) if rules_dataframe is None: rules_dataframe = model_bpa_rules() @@ -1184,10 +1182,8 @@ def execute_rule(row): if export: lakeAttach = lakehouse_attached() if lakeAttach is False: - print( - f"In order to save the Best Practice Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." - ) - return + raise ValueError(f"{icons.red_dot} In order to save the Best Practice Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") + dfExport = finalDF.copy() delta_table_name = "modelbparesults" @@ -1230,7 +1226,7 @@ def execute_rule(row): spark_df = spark.createDataFrame(dfExport) spark_df.write.mode("append").format("delta").saveAsTable(delta_table_name) print( - f"\u2022 Model Best Practice Analyzer results for the '{dataset}' semantic model have been appended to the '{delta_table_name}' delta table." + f"{icons.green_dot} Model Best Practice Analyzer results for the '{dataset}' semantic model have been appended to the '{delta_table_name}' delta table." ) if return_dataframe: diff --git a/src/sempy_labs/_model_dependencies.py b/src/sempy_labs/_model_dependencies.py index d44b453d..3fb69eba 100644 --- a/src/sempy_labs/_model_dependencies.py +++ b/src/sempy_labs/_model_dependencies.py @@ -25,9 +25,7 @@ def get_measure_dependencies(dataset: str, workspace: Optional[str] = None): Shows all dependencies for all measures in the semantic model. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) dep = fabric.evaluate_dax( dataset=dataset, @@ -150,9 +148,7 @@ def get_model_calc_dependencies(dataset: str, workspace: Optional[str] = None): Shows all dependencies for all objects in the semantic model. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) dep = fabric.evaluate_dax( dataset=dataset, @@ -282,9 +278,7 @@ def measure_dependency_tree( """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) dfM = fabric.list_measures(dataset=dataset, workspace=workspace) dfM_filt = dfM[dfM["Measure Name"] == measure_name] diff --git a/src/sempy_labs/_one_lake_integration.py b/src/sempy_labs/_one_lake_integration.py index 03d2bf1f..056eacb3 100644 --- a/src/sempy_labs/_one_lake_integration.py +++ b/src/sempy_labs/_one_lake_integration.py @@ -42,10 +42,7 @@ def export_model_to_onelake( dfD_filt = dfD[dfD["Dataset Name"] == dataset] if len(dfD_filt) == 0: - print( - f"{icons.red_dot} The '{dataset}' semantic model does not exist in the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model does not exist in the '{workspace}' workspace.") tmsl = f""" {{ diff --git a/src/sempy_labs/_query_scale_out.py b/src/sempy_labs/_query_scale_out.py index 7579e32b..7957c633 100644 --- a/src/sempy_labs/_query_scale_out.py +++ b/src/sempy_labs/_query_scale_out.py @@ -44,10 +44,7 @@ def qso_sync(dataset: str, workspace: Optional[str] = None): f"{icons.green_dot} QSO sync initiated for the '{dataset}' semantic model within the '{workspace}' workspace." ) else: - print( - f"{icons.red_dot} QSO sync failed for the '{dataset}' semantic model within the '{workspace}' workspace." - ) - + raise ValueError(f"{icons.red_dot} QSO sync failed for the '{dataset}' semantic model within the '{workspace}' workspace.") def qso_sync_status(dataset: str, workspace: Optional[str] = None): """ @@ -189,7 +186,7 @@ def disable_qso(dataset: str, workspace: Optional[str] = None): ) return df else: - return f"{icons.red_dot} {response.status_code}" + raise ValueError(f"{icons.red_dot} {response.status_code}") def set_qso( @@ -256,16 +253,9 @@ def set_qso( ) return df else: - return f"{icons.red_dot} {response.status_code}" + raise ValueError(f"{icons.red_dot} {response.status_code}") else: - print( - f"{icons.red_dot} Failed to set the '{dataset}' semantic model within the '{workspace}' workspace to large semantic model storage format. This is a prerequisite for enabling Query Scale Out." - ) - print( - "https://learn.microsoft.com/power-bi/enterprise/service-premium-scale-out#prerequisites" - ) - return - + raise ValueError(f"{icons.red_dot} Failed to set the '{dataset}' semantic model within the '{workspace}' workspace to large semantic model storage format. This is a prerequisite for enabling Query Scale Out.\n\"https://learn.microsoft.com/power-bi/enterprise/service-premium-scale-out#prerequisites\"") def set_semantic_model_storage_format( dataset: str, storage_format: str, workspace: Optional[str] = None @@ -311,10 +301,7 @@ def set_semantic_model_storage_format( elif storage_format == "Small": request_body = {"targetStorageMode": "Abf"} else: - print( - f"{icons.red_dot} Invalid storage format value. Valid options: {storageFormats}." - ) - return + raise ValueError(f"{icons.red_dot} Invalid storage format value. Valid options: {storageFormats}.") client = fabric.PowerBIRestClient() response = client.patch( @@ -326,8 +313,7 @@ def set_semantic_model_storage_format( f"{icons.green_dot} Semantic model storage format set to '{storage_format}'." ) else: - return f"{icons.red_dot} {response.status_code}" - + raise ValueError(f"{icons.red_dot} {response.status_code}") def list_qso_settings(dataset: Optional[str] = None, workspace: Optional[str] = None): """ @@ -370,21 +356,17 @@ def list_qso_settings(dataset: Optional[str] = None, workspace: Optional[str] = client = fabric.PowerBIRestClient() response = client.get(f"/v1.0/myorg/groups/{workspace_id}/datasets") for v in response.json()["value"]: - tsm = v["targetStorageMode"] + tsm = v.get("targetStorageMode") if tsm == "Abf": sm = "Small" else: sm = "Large" new_data = { - "Dataset Id": v["id"], - "Dataset Name": v["name"], + "Dataset Id": v.get("id"), + "Dataset Name": v.get("name"), "Storage Mode": sm, - "QSO Auto Sync Enabled": v["queryScaleOutSettings"][ - "autoSyncReadOnlyReplicas" - ], - "QSO Max Read Only Replicas": v["queryScaleOutSettings"][ - "maxReadOnlyReplicas" - ], + "QSO Auto Sync Enabled": v.get("queryScaleOutSettings").get("autoSyncReadOnlyReplicas"), + "QSO Max Read Only Replicas": v.get("queryScaleOutSettings").get("maxReadOnlyReplicas"), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -444,4 +426,4 @@ def set_workspace_default_storage_format( f"{icons.green_dot} The default storage format for the '{workspace}' workspace has been updated to '{storage_format}." ) else: - print(f"{icons.red_dot} {response.status_code}") + raise ValueError(f"{icons.red_dot} {response.status_code}") diff --git a/src/sempy_labs/_refresh_semantic_model.py b/src/sempy_labs/_refresh_semantic_model.py index 333ca6eb..e0e3208b 100644 --- a/src/sempy_labs/_refresh_semantic_model.py +++ b/src/sempy_labs/_refresh_semantic_model.py @@ -41,9 +41,7 @@ def refresh_semantic_model( or if no lakehouse attached, resolves to the workspace of the notebook. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) if refresh_type is None: refresh_type = "full" @@ -81,10 +79,7 @@ def extract_names(partition): ] if refresh_type not in refreshTypes: - print( - f"{icons.red_dot} Invalid refresh type. Refresh type must be one of these values: {refreshTypes}." - ) - return + raise ValueError(f"{icons.red_dot} Invalid refresh type. Refresh type must be one of these values: {refreshTypes}.") if len(objects) == 0: requestID = fabric.refresh_dataset( @@ -119,10 +114,7 @@ def extract_names(partition): if status == "Completed": break elif status == "Failed": - print( - f"{icons.red_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has failed." - ) - return + raise ValueError(f"{icons.red_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has failed.") elif status == "Cancelled": print( f"{icons.yellow_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has been cancelled." @@ -163,10 +155,8 @@ def cancel_dataset_refresh( if request_id is None: if len(rr_filt) == 0: - print( - f"{icons.red_dot} There are no active Enhanced API refreshes of the '{dataset}' semantic model within the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} There are no active Enhanced API refreshes of the '{dataset}' semantic model within the '{workspace}' workspace.") + request_id = rr_filt["Request Id"].iloc[0] dataset_id = resolve_dataset_id(dataset=dataset, workspace=workspace) diff --git a/src/sempy_labs/_vertipaq.py b/src/sempy_labs/_vertipaq.py index 35b7ac8d..75a09902 100644 --- a/src/sempy_labs/_vertipaq.py +++ b/src/sempy_labs/_vertipaq.py @@ -56,9 +56,7 @@ def vertipaq_analyzer( "ignore", message="createDataFrame attempted Arrow optimization*" ) - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) if lakehouse_workspace is None: lakehouse_workspace = workspace @@ -102,9 +100,7 @@ def vertipaq_analyzer( dfI_filt = dfI[(dfI["Id"] == sqlEndpointId)] if len(dfI_filt) == 0: - print( - f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter." - ) + raise ValueError(f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter.") else: lakehouseName = dfI_filt["Display Name"].iloc[0] @@ -437,10 +433,7 @@ def vertipaq_analyzer( if export in ["table", "zip"]: lakeAttach = lakehouse_attached() if lakeAttach is False: - print( - f"{icons.red_dot} In order to save the Vertipaq Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." - ) - return + raise ValueError(f"{icons.red_dot} In order to save the Vertipaq Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") if export == "table": spark = SparkSession.builder.getOrCreate() diff --git a/src/sempy_labs/directlake/_directlake_schema_compare.py b/src/sempy_labs/directlake/_directlake_schema_compare.py index dcf12bcd..3b3ae8ef 100644 --- a/src/sempy_labs/directlake/_directlake_schema_compare.py +++ b/src/sempy_labs/directlake/_directlake_schema_compare.py @@ -40,9 +40,7 @@ def direct_lake_schema_compare( or if no lakehouse attached, resolves to the workspace of the notebook. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) if lakehouse_workspace is None: lakehouse_workspace = workspace @@ -57,14 +55,10 @@ def direct_lake_schema_compare( dfI_filt = dfI[(dfI["Id"] == sqlEndpointId)] if len(dfI_filt) == 0: - print( - f"{icons.red_dot} The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified." - ) - return + raise ValueError(f"{icons.red_dot} The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified.") if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()): - print(f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake mode.") - return + raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake mode.") dfT = list_tables(dataset, workspace) dfC = fabric.list_columns(dataset=dataset, workspace=workspace) diff --git a/src/sempy_labs/directlake/_directlake_schema_sync.py b/src/sempy_labs/directlake/_directlake_schema_sync.py index b5ef01e5..d8c3dbac 100644 --- a/src/sempy_labs/directlake/_directlake_schema_sync.py +++ b/src/sempy_labs/directlake/_directlake_schema_sync.py @@ -46,8 +46,7 @@ def direct_lake_schema_sync( import Microsoft.AnalysisServices.Tabular as TOM import System - if workspace is None: - workspace = fabric.resolve_workspace_name() + workspace = fabric.resolve_workspace_name(workspace) if lakehouse_workspace is None: lakehouse_workspace = workspace @@ -62,10 +61,7 @@ def direct_lake_schema_sync( dfI_filt = dfI[(dfI["Id"] == sqlEndpointId)] if len(dfI_filt) == 0: - print( - f"{icons.red_dot} The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified." - ) - return + raise ValueError(f"{icons.red_dot} The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified.") dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) dfP_filt = dfP[dfP["Source Type"] == "Entity"] diff --git a/src/sempy_labs/directlake/_fallback.py b/src/sempy_labs/directlake/_fallback.py index 8cabb740..63436cd7 100644 --- a/src/sempy_labs/directlake/_fallback.py +++ b/src/sempy_labs/directlake/_fallback.py @@ -23,17 +23,13 @@ def check_fallback_reason(dataset: str, workspace: Optional[str] = None): The tables in the semantic model and their fallback reason. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) dfP_filt = dfP[dfP["Mode"] == "DirectLake"] if len(dfP_filt) == 0: - print( - f"{icons.yellow_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models." - ) + raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models.") else: df = fabric.evaluate_dax( dataset=dataset, diff --git a/src/sempy_labs/directlake/_get_shared_expression.py b/src/sempy_labs/directlake/_get_shared_expression.py index b2aa6f1f..9cc83bf6 100644 --- a/src/sempy_labs/directlake/_get_shared_expression.py +++ b/src/sempy_labs/directlake/_get_shared_expression.py @@ -1,9 +1,6 @@ import sempy import sempy.fabric as fabric -from sempy_labs._helper_functions import ( - resolve_lakehouse_name, - resolve_workspace_name_and_id, -) +from sempy_labs._helper_functions import resolve_lakehouse_name from sempy_labs._list_functions import list_lakehouses from typing import Optional import sempy_labs._icons as icons @@ -30,7 +27,7 @@ def get_shared_expression( Shows the expression which can be used to connect a Direct Lake semantic model to its SQL Endpoint. """ - (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) + workspace = fabric.resolve_workspace_name(workspace) if lakehouse is None: lakehouse_id = fabric.get_lakehouse_id() lakehouse = resolve_lakehouse_name(lakehouse_id) @@ -43,10 +40,7 @@ def get_shared_expression( provStatus = lakeDetail["SQL Endpoint Provisioning Status"].iloc[0] if provStatus == "InProgress": - print( - f"{icons.red_dot} The SQL Endpoint for the '{lakehouse}' lakehouse within the '{workspace}' workspace has not yet been provisioned. Please wait until it has been provisioned." - ) - return + raise ValueError(f"{icons.red_dot} The SQL Endpoint for the '{lakehouse}' lakehouse within the '{workspace}' workspace has not yet been provisioned. Please wait until it has been provisioned.") sh = ( 'let\n\tdatabase = Sql.Database("' diff --git a/src/sempy_labs/directlake/_guardrails.py b/src/sempy_labs/directlake/_guardrails.py index 8bdf7f15..c944cf68 100644 --- a/src/sempy_labs/directlake/_guardrails.py +++ b/src/sempy_labs/directlake/_guardrails.py @@ -44,9 +44,7 @@ def get_sku_size(workspace: Optional[str] = None): The SKU size for a workspace. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) dfC = fabric.list_capacities() dfW = fabric.list_workspaces().sort_values(by="Name", ascending=True) diff --git a/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py b/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py index c7d7252f..f61af2fe 100644 --- a/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py +++ b/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py @@ -27,8 +27,7 @@ def list_direct_lake_model_calc_tables(dataset: str, workspace: Optional[str] = A pandas dataframe showing the calculated tables which were migrated to Direct Lake and whose DAX expressions are stored as model annotations. """ - if workspace is None: - workspace = fabric.resolve_workspace_name() + workspace = fabric.resolve_workspace_name(workspace) df = pd.DataFrame(columns=["Table Name", "Source Expression"]) @@ -39,7 +38,7 @@ def list_direct_lake_model_calc_tables(dataset: str, workspace: Optional[str] = is_direct_lake = tom.is_direct_lake() if not is_direct_lake: - print(f"{icons.yellow_dot} The '{dataset}' semantic model is not in Direct Lake mode.") + raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake mode.") else: dfA = list_annotations(dataset, workspace) dfT = list_tables(dataset, workspace) diff --git a/src/sempy_labs/directlake/_show_unsupported_directlake_objects.py b/src/sempy_labs/directlake/_show_unsupported_directlake_objects.py index 160c1dc9..b0c50b7b 100644 --- a/src/sempy_labs/directlake/_show_unsupported_directlake_objects.py +++ b/src/sempy_labs/directlake/_show_unsupported_directlake_objects.py @@ -30,8 +30,7 @@ def show_unsupported_direct_lake_objects( pd.options.mode.chained_assignment = None - if workspace is None: - workspace = fabric.resolve_workspace_name() + workspace = fabric.resolve_workspace_name(workspace) dfT = list_tables(dataset, workspace) dfC = fabric.list_columns(dataset=dataset, workspace=workspace) diff --git a/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py b/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py index f51e3ce7..44b70b24 100644 --- a/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +++ b/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py @@ -40,7 +40,7 @@ def update_direct_lake_model_lakehouse_connection( """ - (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) + workspace = fabric.resolve_workspace_name(workspace) if lakehouse_workspace is None: lakehouse_workspace = workspace @@ -54,17 +54,13 @@ def update_direct_lake_model_lakehouse_connection( dfI_filt = dfI[(dfI["Display Name"] == lakehouse)] if len(dfI_filt) == 0: - print( - f"{icons.red_dot} The '{lakehouse}' lakehouse does not exist within the '{lakehouse_workspace}' workspace. Therefore it cannot be used to support the '{dataset}' semantic model within the '{workspace}' workspace." - ) + raise ValueError(f"{icons.red_dot} The '{lakehouse}' lakehouse does not exist within the '{lakehouse_workspace}' workspace. Therefore it cannot be used to support the '{dataset}' semantic model within the '{workspace}' workspace.") dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) dfP_filt = dfP[dfP["Mode"] == "DirectLake"] if len(dfP_filt) == 0: - print( - f"{icons.yellow_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models." - ) + raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models.") else: with connect_semantic_model( dataset=dataset, readonly=False, workspace=workspace diff --git a/src/sempy_labs/directlake/_update_directlake_partition_entity.py b/src/sempy_labs/directlake/_update_directlake_partition_entity.py index 654a9c9e..c3819298 100644 --- a/src/sempy_labs/directlake/_update_directlake_partition_entity.py +++ b/src/sempy_labs/directlake/_update_directlake_partition_entity.py @@ -53,20 +53,14 @@ def update_direct_lake_partition_entity( entity_name = [entity_name] if len(table_name) != len(entity_name): - print( - f"{icons.red_dot} The 'table_name' and 'entity_name' arrays must be of equal length." - ) - return + raise ValueError(f"{icons.red_dot} The 'table_name' and 'entity_name' arrays must be of equal length.") with connect_semantic_model( dataset=dataset, readonly=False, workspace=workspace ) as tom: if not tom.is_direct_lake(): - print( - f"{icons.yellow_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode." - ) - return + raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode.") for tName in table_name: i = table_name.index(tName) diff --git a/src/sempy_labs/directlake/_warm_cache.py b/src/sempy_labs/directlake/_warm_cache.py index 15c9f4d9..33e69db7 100644 --- a/src/sempy_labs/directlake/_warm_cache.py +++ b/src/sempy_labs/directlake/_warm_cache.py @@ -45,10 +45,7 @@ def warm_direct_lake_cache_perspective( dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()): - print( - f"{icons.red_dot} The '{dataset}' semantic model in the '{workspace}' workspace is not in Direct Lake mode. This function is specifically for semantic models in Direct Lake mode." - ) - return + raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model in the '{workspace}' workspace is not in Direct Lake mode. This function is specifically for semantic models in Direct Lake mode.") dfPersp = fabric.list_perspectives(dataset=dataset, workspace=workspace) dfPersp["DAX Object Name"] = format_dax_object_name( @@ -57,10 +54,8 @@ def warm_direct_lake_cache_perspective( dfPersp_filt = dfPersp[dfPersp["Perspective Name"] == perspective] if len(dfPersp_filt) == 0: - print( - f"{icons.red_dot} The '{perspective} perspective does not exist or contains no objects within the '{dataset}' semantic model in the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{perspective} perspective does not exist or contains no objects within the '{dataset}' semantic model in the '{workspace}' workspace.") + dfPersp_c = dfPersp_filt[dfPersp_filt["Object Type"] == "Column"] column_values = dfPersp_c["DAX Object Name"].tolist() @@ -166,6 +161,8 @@ def warm_direct_lake_cache_isresident( Returns a pandas dataframe showing the columns that have been put into memory. """ + workspace = fabric.resolve_workspace_name(workspace) + dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()): raise ValueError( diff --git a/src/sempy_labs/lakehouse/_get_lakehouse_tables.py b/src/sempy_labs/lakehouse/_get_lakehouse_tables.py index 1a8a6e86..25f9fdfd 100644 --- a/src/sempy_labs/lakehouse/_get_lakehouse_tables.py +++ b/src/sempy_labs/lakehouse/_get_lakehouse_tables.py @@ -174,10 +174,8 @@ def get_lakehouse_tables( if export: lakeAttach = lakehouse_attached() if lakeAttach is False: - print( - f"{icons.red_dot} In order to save the report.json file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." - ) - return + raise ValueError(f"{icons.red_dot} In order to save the report.json file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") + spark = SparkSession.builder.getOrCreate() lakehouse_id = fabric.get_lakehouse_id() diff --git a/src/sempy_labs/lakehouse/_lakehouse.py b/src/sempy_labs/lakehouse/_lakehouse.py index 2c64af8f..fb65f53c 100644 --- a/src/sempy_labs/lakehouse/_lakehouse.py +++ b/src/sempy_labs/lakehouse/_lakehouse.py @@ -50,9 +50,7 @@ def optimize_lakehouse_tables( from sempy_labs.lakehouse._get_lakehouse_tables import get_lakehouse_tables from delta import DeltaTable - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) if lakehouse is None: lakehouse_id = fabric.get_lakehouse_id() diff --git a/src/sempy_labs/lakehouse/_shortcuts.py b/src/sempy_labs/lakehouse/_shortcuts.py index 384809bd..7eca093e 100644 --- a/src/sempy_labs/lakehouse/_shortcuts.py +++ b/src/sempy_labs/lakehouse/_shortcuts.py @@ -114,10 +114,7 @@ def create_shortcut( sourceValues = list(source_titles.keys()) if source not in sourceValues: - print( - f"{icons.red_dot} The 'source' parameter must be one of these values: {sourceValues}." - ) - return + raise ValueError(f"{icons.red_dot} The 'source' parameter must be one of these values: {sourceValues}.") sourceTitle = source_titles[source] @@ -195,4 +192,4 @@ def delete_shortcut( f"{icons.green_dot} The '{shortcut_name}' shortcut in the '{lakehouse}' within the '{workspace}' workspace has been deleted." ) else: - print(f"{icons.red_dot} The '{shortcut_name}' has not been deleted.") + raise ValueError(f"{icons.red_dot} The '{shortcut_name}' has not been deleted.") diff --git a/src/sempy_labs/migration/_create_pqt_file.py b/src/sempy_labs/migration/_create_pqt_file.py index ce98f1a1..10a76db6 100644 --- a/src/sempy_labs/migration/_create_pqt_file.py +++ b/src/sempy_labs/migration/_create_pqt_file.py @@ -11,7 +11,7 @@ @log def create_pqt_file( - dataset: str, workspace: Optional[str] = None, file_name: Optional[str] = None + dataset: str, workspace: Optional[str] = None, file_name: Optional[str] = 'PowerQueryTemplate' ): """ Dynamically generates a `Power Query Template `_ file based on the semantic model. The .pqt file is saved within the Files section of your lakehouse. @@ -24,25 +24,16 @@ def create_pqt_file( The Fabric workspace name. Defaults to None which resolves to the workspace of the attached lakehouse or if no lakehouse attached, resolves to the workspace of the notebook. - file_name : str, default=None + file_name : str, default='PowerQueryTemplate' The name of the Power Query Template file to be generated. - Defaults to None which resolves to 'PowerQueryTemplate'. """ - if file_name is None: - file_name = "PowerQueryTemplate" - lakeAttach = lakehouse_attached() if lakeAttach is False: - print( - f"{icons.red_dot} In order to run the 'create_pqt_file' function, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." - ) - return + raise ValueError(f"{icons.red_dot} In order to run the 'create_pqt_file' function, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) + workspace = fabric.resolve_workspace_name(workspace) folderPath = "/lakehouse/default/Files" subFolderPath = os.path.join(folderPath, "pqtnewfolder") diff --git a/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py b/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py index 33ffc339..21dfd60f 100644 --- a/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +++ b/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py @@ -38,11 +38,7 @@ def migrate_model_objects_to_semantic_model( import Microsoft.AnalysisServices.Tabular as TOM import System - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) - else: - workspace_id = fabric.resolve_workspace_id(workspace) + workspace = fabric.resolve_workspace_name(workspace) if new_dataset_workspace is None: new_dataset_workspace = workspace diff --git a/src/sempy_labs/migration/_migration_validation.py b/src/sempy_labs/migration/_migration_validation.py index 36f26f1b..9286c783 100644 --- a/src/sempy_labs/migration/_migration_validation.py +++ b/src/sempy_labs/migration/_migration_validation.py @@ -36,6 +36,10 @@ def migration_validation( A pandas dataframe showing a list of objects and whether they were successfully migrated. Also shows the % of objects which were migrated successfully. """ + workspace = fabric.resolve_workspace_name(workspace) + if new_dataset_workspace is None: + new_dataset_workspace = workspace + dfA = list_semantic_model_objects(dataset=dataset, workspace=workspace) dfB = list_semantic_model_objects( dataset=new_dataset, workspace=new_dataset_workspace diff --git a/src/sempy_labs/migration/_refresh_calc_tables.py b/src/sempy_labs/migration/_refresh_calc_tables.py index c413d13f..a8ffff61 100644 --- a/src/sempy_labs/migration/_refresh_calc_tables.py +++ b/src/sempy_labs/migration/_refresh_calc_tables.py @@ -30,6 +30,8 @@ def refresh_calc_tables(dataset: str, workspace: Optional[str] = None): timeout = datetime.timedelta(minutes=1) success = False + workspace = fabric.resolve_workspace_name(workspace) + while not success: try: with connect_semantic_model( diff --git a/src/sempy_labs/report/_generate_report.py b/src/sempy_labs/report/_generate_report.py index d29776f1..9fc352de 100644 --- a/src/sempy_labs/report/_generate_report.py +++ b/src/sempy_labs/report/_generate_report.py @@ -41,10 +41,7 @@ def create_report_from_reportjson( dfI_model = dfI_m[(dfI_m["Display Name"] == dataset)] if len(dfI_model) == 0: - print( - f"{icons.red_dot} The '{dataset}' semantic model does not exist in the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model does not exist in the '{workspace}' workspace.") datasetId = dfI_model["Id"].iloc[0] @@ -172,8 +169,7 @@ def update_report_from_reportjson( dfR_filt = dfR[(dfR["Name"] == report) & (dfR["Report Type"] == "PowerBIReport")] if len(dfR_filt) == 0: - print(f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace.") - return + raise ValueError(f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace.") reportId = dfR_filt["Id"].iloc[0] client = fabric.FabricRestClient() diff --git a/src/sempy_labs/report/_report_functions.py b/src/sempy_labs/report/_report_functions.py index d87dee67..1d89fa36 100644 --- a/src/sempy_labs/report/_report_functions.py +++ b/src/sempy_labs/report/_report_functions.py @@ -55,10 +55,7 @@ def get_report_json( dfI_filt = dfI[(dfI["Display Name"] == report)] if len(dfI_filt) == 0: - print( - f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace.") itemId = dfI_filt["Id"].iloc[0] response = client.post( @@ -74,10 +71,7 @@ def get_report_json( if save_to_file_name is not None: lakeAttach = lakehouse_attached() if lakeAttach is False: - print( - f"{icons.red_dot} In order to save the report.json file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." - ) - return + raise ValueError(f"{icons.red_dot} In order to save the report.json file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") lakehouse_id = fabric.get_lakehouse_id() lakehouse = resolve_lakehouse_name(lakehouse_id, workspace) @@ -191,10 +185,7 @@ def export_report( lakeAttach = lakehouse_attached() if lakeAttach is False: - print( - f"{icons.red_dot} In order to run the 'export_report' function, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." - ) - return + raise ValueError(f"{icons.red_dot} In order to run the 'export_report' function, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) @@ -204,15 +195,10 @@ def export_report( visual_name = [visual_name] if bookmark_name is not None and (page_name is not None or visual_name is not None): - print( - f"{icons.red_dot} If the 'bookmark_name' parameter is set, the 'page_name' and 'visual_name' parameters must not be set." - ) - return + raise ValueError(f"{icons.red_dot} If the 'bookmark_name' parameter is set, the 'page_name' and 'visual_name' parameters must not be set.") + if visual_name is not None and page_name is None: - print( - f"{icons.red_dot} If the 'visual_name' parameter is set, the 'page_name' parameter must be set." - ) - return + raise ValueError(f"{icons.red_dot} If the 'visual_name' parameter is set, the 'page_name' parameter must be set.") validFormats = { "ACCESSIBLEPDF": ".pdf", @@ -235,10 +221,7 @@ def export_report( fileExt = validFormats.get(export_format) if fileExt is None: - print( - f"{icons.red_dot} The '{export_format}' format is not a valid format for exporting Power BI reports. Please enter a valid format. Options: {validFormats}" - ) - return + raise ValueError(f"{icons.red_dot} The '{export_format}' format is not a valid format for exporting Power BI reports. Please enter a valid format. Options: {validFormats}") if file_name is None: file_name = report + fileExt @@ -255,10 +238,7 @@ def export_report( ] if len(dfI_filt) == 0: - print( - f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace.") reportType = dfI_filt["Type"].iloc[0] @@ -279,23 +259,15 @@ def export_report( ] if reportType == "Report" and export_format in paginatedOnly: - print( - f"{icons.red_dot} The '{export_format}' format is only supported for paginated reports." - ) - return + raise ValueError(f"{icons.red_dot} The '{export_format}' format is only supported for paginated reports.") + if reportType == "PaginatedReport" and export_format in pbiOnly: - print( - f"{icons.red_dot} The '{export_format}' format is only supported for Power BI reports." - ) - return + raise ValueError(f"{icons.red_dot} The '{export_format}' format is only supported for Power BI reports.") if reportType == "PaginatedReport" and ( bookmark_name is not None or page_name is not None or visual_name is not None ): - print( - f"{icons.red_dot} Export for paginated reports does not support bookmarks/pages/visuals. Those parameters must not be set for paginated reports." - ) - return + raise ValueError(f"{icons.red_dot} Export for paginated reports does not support bookmarks/pages/visuals. Those parameters must not be set for paginated reports.") reportId = dfI_filt["Id"].iloc[0] client = fabric.PowerBIRestClient() @@ -332,19 +304,15 @@ def export_report( for page in page_name: dfPage_filt = dfPage[dfPage["Page ID"] == page] if len(dfPage_filt) == 0: - print( - f"{icons.red_dot} The '{page}' page does not exist in the '{report}' report within the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{page}' page does not exist in the '{report}' report within the '{workspace}' workspace.") + page_dict = {"pageName": page} request_body["powerBIReportConfiguration"]["pages"].append(page_dict) elif page_name is not None and visual_name is not None: if len(page_name) != len(visual_name): - print( - f"{icons.red_dot} Each 'visual_name' must map to a single 'page_name'." - ) - return + raise ValueError(f"{icons.red_dot} Each 'visual_name' must map to a single 'page_name'.") + if reportType == "Report": request_body = {"format": export_format, "powerBIReportConfiguration": {}} @@ -356,10 +324,8 @@ def export_report( (dfVisual["Page ID"] == page) & (dfVisual["Visual ID"] == visual) ] if len(dfVisual_filt) == 0: - print( - f"{icons.red_dot} The '{visual}' visual does not exist on the '{page}' in the '{report}' report within the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{visual}' visual does not exist on the '{page}' in the '{report}' report within the '{workspace}' workspace.") + page_dict = {"pageName": page, "visualName": visual} request_body["powerBIReportConfiguration"]["pages"].append(page_dict) a += 1 @@ -393,9 +359,7 @@ def export_report( ) response_body = json.loads(response.content) if response_body["status"] == "Failed": - print( - f"{icons.red_dot} The export for the '{report}' report within the '{workspace}' workspace in the '{export_format}' format has failed." - ) + raise ValueError(f"{icons.red_dot} The export for the '{report}' report within the '{workspace}' workspace in the '{export_format}' format has failed.") else: response = client.get( f"/v1.0/myorg/groups/{workspace_id}/reports/{reportId}/exports/{exportId}/file" @@ -447,10 +411,7 @@ def clone_report( dfI_filt = dfI[(dfI["Display Name"] == report)] if len(dfI_filt) == 0: - print( - f"{icons.red_dot} The '{report}' report does not exist within the '{workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{report}' report does not exist within the '{workspace}' workspace.") reportId = resolve_report_id(report, workspace) @@ -462,8 +423,8 @@ def clone_report( dfW_filt = dfW[dfW["Name"] == target_workspace] if len(dfW_filt) == 0: - print(f"{icons.red_dot} The '{workspace}' is not a valid workspace.") - return + raise ValueError(f"{icons.red_dot} The '{workspace}' is not a valid workspace.") + target_workspace_id = dfW_filt["Id"].iloc[0] if target_dataset is None: @@ -478,10 +439,8 @@ def clone_report( dfD_filt = dfD[dfD["Dataset Name"] == target_dataset] if len(dfD_filt) == 0: - print( - f"{icons.red_dot} The '{target_dataset}' target dataset does not exist in the '{target_workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{target_dataset}' target dataset does not exist in the '{target_workspace}' workspace.") + target_dataset_id = dfD_filt["Dataset Id"].iloc[0] client = fabric.PowerBIRestClient() @@ -508,9 +467,7 @@ def clone_report( f"{icons.green_dot} The '{report}' report has been successfully cloned as the '{cloned_report}' report within the '{target_workspace}' workspace using the '{target_dataset}' semantic model." ) else: - print( - f"{icons.red_dot} POST request failed with status code: {response.status_code}" - ) + raise ValueError(f"{icons.red_dot} POST request failed with status code: {response.status_code}") def launch_report(report: str, workspace: Optional[str] = None): @@ -573,14 +530,14 @@ def list_report_pages(report: str, workspace: Optional[str] = None): reportJson = get_report_json(report=report, workspace=workspace) for section in reportJson["sections"]: - pageID = section["name"] - pageName = section["displayName"] + pageID = section.get("name") + pageName = section.get("displayName") # pageFilters = section['filters'] - pageWidth = section["width"] - pageHeight = section["height"] + pageWidth = section.get("width") + pageHeight = section.get("height") visualCount = len(section["visualContainers"]) pageHidden = False - pageConfig = section["config"] + pageConfig = section.get("config") pageConfigJson = json.loads(pageConfig) try: diff --git a/src/sempy_labs/report/_report_rebind.py b/src/sempy_labs/report/_report_rebind.py index d5ed5559..0d444f01 100644 --- a/src/sempy_labs/report/_report_rebind.py +++ b/src/sempy_labs/report/_report_rebind.py @@ -61,10 +61,7 @@ def report_rebind( f"{icons.green_dot} The '{report}' report has been successfully rebinded to the '{dataset}' semantic model." ) else: - print( - f"{icons.red_dot} The '{report}' report within the '{report_workspace}' workspace failed to rebind to the '{dataset}' semantic model within the '{dataset_workspace}' workspace." - ) - + raise ValueError(f"{icons.red_dot} The '{report}' report within the '{report_workspace}' workspace failed to rebind to the '{dataset}' semantic model within the '{dataset_workspace}' workspace.") @log def report_rebind_all( @@ -103,11 +100,7 @@ def report_rebind_all( """ - if dataset_workspace is None: - dataset_workspace_id = fabric.get_workspace_id() - dataset_workspace = fabric.resolve_workspace_name(dataset_workspace_id) - else: - dataset_workspace_id = fabric.resolve_workspace_id(dataset_workspace) + dataset_workspace = fabric.resolve_workspace_name() if new_dataset_workpace is None: new_dataset_workpace = dataset_workspace diff --git a/src/sempy_labs/tom/_model.py b/src/sempy_labs/tom/_model.py index b2c5a211..34f3253d 100644 --- a/src/sempy_labs/tom/_model.py +++ b/src/sempy_labs/tom/_model.py @@ -658,22 +658,16 @@ def add_hierarchy( import Microsoft.AnalysisServices.Tabular as TOM if isinstance(columns, str): - print( - f"{icons.red_dot} The 'levels' parameter must be a list. For example: ['Continent', 'Country', 'City']" - ) - return + raise ValueError(f"{icons.red_dot} The 'levels' parameter must be a list. For example: ['Continent', 'Country', 'City']") + if len(columns) == 1: - print(f"{icons.red_dot} There must be at least 2 levels in order to create a hierarchy.") - return + raise ValueError(f"{icons.red_dot} There must be at least 2 levels in order to create a hierarchy.") if levels is None: levels = columns if len(columns) != len(levels): - print( - f"{icons.red_dot} If specifying level names, you must specify a level for each column." - ) - return + raise ValueError(f"{icons.red_dot} If specifying level names, you must specify a level for each column.") obj = TOM.Hierarchy() obj.Name = hierarchy_name @@ -1003,9 +997,7 @@ def set_alternate_of( import System if base_column is not None and base_table is None: - print( - f"{icons.red_dot} If you specify the base table you must also specify the base column" - ) + raise ValueError(f"{icons.red_dot} If you specify the base table you must also specify the base column") summarization_type = ( summarization_type.replace(" ", "") @@ -1015,10 +1007,7 @@ def set_alternate_of( summarizationTypes = ["Sum", "GroupBy", "Count", "Min", "Max"] if summarization_type not in summarizationTypes: - print( - f"{icons.red_dot} The 'summarization_type' parameter must be one of the following valuse: {summarizationTypes}." - ) - return + raise ValueError(f"{icons.red_dot} The 'summarization_type' parameter must be one of the following valuse: {summarizationTypes}.") ao = TOM.AlternateOf() ao.Summarization = System.Enum.Parse(TOM.SummarizationType, summarization_type) @@ -1277,10 +1266,7 @@ def in_perspective( objectType = object.ObjectType if objectType not in validObjects: - print( - f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}." - ) - return + raise ValueError(f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}.") object.Model.Perspectives[perspective_name] @@ -1331,15 +1317,12 @@ def add_to_perspective( objectType = object.ObjectType if objectType not in validObjects: - print( - f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}." - ) - return + raise ValueError(f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}.") + try: object.Model.Perspectives[perspective_name] except: - print(f"{icons.red_dot} The '{perspective_name}' perspective does not exist.") - return + raise ValueError(f"{icons.red_dot} The '{perspective_name}' perspective does not exist.") # try: if objectType == TOM.ObjectType.Table: @@ -1393,15 +1376,12 @@ def remove_from_perspective( objectType = object.ObjectType if objectType not in validObjects: - print( - f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}." - ) - return + raise ValueError(f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}.") + try: object.Model.Perspectives[perspective_name] except: - print(f"{icons.red_dot} The '{perspective_name}' perspective does not exist.") - return + raise ValueError(f"{icons.red_dot} The '{perspective_name}' perspective does not exist.") # try: if objectType == TOM.ObjectType.Table: @@ -1474,8 +1454,7 @@ def set_translation( ] # , 'Level' if object.ObjectType not in validObjects: - print(f"{icons.red_dot} Translations can only be set to {validObjects}.") - return + raise ValueError(f"{icons.red_dot} Translations can only be set to {validObjects}.") mapping = { "Name": TOM.TranslatedProperty.Caption, @@ -1488,10 +1467,7 @@ def set_translation( try: object.Model.Cultures[language] except: - print( - f"{icons.red_dot} The '{language}' translation language does not exist in the semantic model." - ) - return + raise ValueError(f"{icons.red_dot} The '{language}' translation language does not exist in the semantic model.") object.Model.Cultures[language].ObjectTranslations.SetTranslation( object, prop, value @@ -1955,11 +1931,8 @@ def mark_as_date_table(self, table_name: str, column_name: str): t = self.model.Tables[table_name] c = t.Columns[column_name] if c.DataType != TOM.DataType.DateTime: - print( - f"{icons.red_dot} The column specified in the 'column_name' parameter in this function must be of DateTime data type." - ) - return - + raise ValueError(f"{icons.red_dot} The column specified in the 'column_name' parameter in this function must be of DateTime data type.") + daxQuery = f""" define measure '{table_name}'[test] = var mn = MIN('{table_name}'[{column_name}]) @@ -1978,10 +1951,7 @@ def mark_as_date_table(self, table_name: str, column_name: str): ) value = df["1"].iloc[0] if value != "1": - print( - f"{icons.red_dot} The '{column_name}' within the '{table_name}' table does not contain contiguous date values." - ) - return + raise ValueError(f"{icons.red_dot} The '{column_name}' within the '{table_name}' table does not contain contiguous date values.") # Mark as a date table t.DataCategory = "Time" @@ -2176,10 +2146,7 @@ def set_kpi( # https://github.com/m-kovalsky/Tabular/blob/master/KPI%20Graphics.md if measure_name == target: - print( - f"{icons.red_dot} The 'target' parameter cannot be the same measure as the 'measure_name' parameter." - ) - return + raise ValueError(f"{icons.red_dot} The 'target' parameter cannot be the same measure as the 'measure_name' parameter.") if status_graphic is None: status_graphic = "Three Circles Colored" @@ -2192,43 +2159,31 @@ def set_kpi( status_type = status_type.title().replace(" ", "") if status_type not in valid_status_types: - print( - f"{icons.red_dot} '{status_type}' is an invalid status_type. Please choose from these options: {valid_status_types}." - ) - return + raise ValueError(f"{icons.red_dot} '{status_type}' is an invalid status_type. Please choose from these options: {valid_status_types}.") if status_type in ["Linear", "LinearReversed"]: if upper_bound is not None or lower_mid_bound is not None: - print( - f"{icons.red_dot} The 'upper_mid_bound' and 'lower_mid_bound' parameters are not used in the 'Linear' and 'LinearReversed' status types. Make sure these parameters are set to None." - ) - return + raise ValueError(f"{icons.red_dot} The 'upper_mid_bound' and 'lower_mid_bound' parameters are not used in the 'Linear' and 'LinearReversed' status types. Make sure these parameters are set to None.") + elif upper_bound <= lower_bound: - print(f"{icons.red_dot} The upper_bound must be greater than the lower_bound.") - return + raise ValueError(f"{icons.red_dot} The upper_bound must be greater than the lower_bound.") if status_type in ["Centered", "CenteredReversed"]: if upper_mid_bound is None or lower_mid_bound is None: - print( - f"{icons.red_dot} The 'upper_mid_bound' and 'lower_mid_bound' parameters are necessary in the 'Centered' and 'CenteredReversed' status types." - ) - return + raise ValueError(f"{icons.red_dot} The 'upper_mid_bound' and 'lower_mid_bound' parameters are necessary in the 'Centered' and 'CenteredReversed' status types.") elif upper_bound <= upper_mid_bound: - print(f"{icons.red_dot} The upper_bound must be greater than the upper_mid_bound.") + raise ValueError(f"{icons.red_dot} The upper_bound must be greater than the upper_mid_bound.") elif upper_mid_bound <= lower_mid_bound: - print(f"{icons.red_dot} The upper_mid_bound must be greater than the lower_mid_bound.") + raise ValueError(f"{icons.red_dot} The upper_mid_bound must be greater than the lower_mid_bound.") elif lower_mid_bound <= lower_bound: - print(f"{icons.red_dot} The lower_mid_bound must be greater than the lower_bound.") + raise ValueError(f"{icons.red_dot} The lower_mid_bound must be greater than the lower_bound.") try: table_name = next( m.Parent.Name for m in self.all_measures() if m.Name == measure_name ) except: - print( - f"{icons.red_dot} The '{measure_name}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'." - ) - return + raise ValueError(f"{icons.red_dot} The '{measure_name}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'.") graphics = [ "Cylinder", @@ -2251,10 +2206,7 @@ def set_kpi( ] if status_graphic not in graphics: - print( - f"{icons.red_dot} The '{status_graphic}' status graphic is not valid. Please choose from these options: {graphics}." - ) - return + raise ValueError(f"{icons.red_dot} The '{status_graphic}' status graphic is not valid. Please choose from these options: {graphics}.") measure_target = True @@ -2270,10 +2222,8 @@ def set_kpi( if m.Name == target ) except: - print( - f"{icons.red_dot} The '{target}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'." - ) - + raise ValueError(f"{icons.red_dot} The '{target}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'.") + if measure_target: expr = f"var x = [{measure_name}]/[{target}]\nreturn" else: @@ -2396,10 +2346,7 @@ def set_summarize_by( ) if value not in values: - print( - f"{icons.red_dot} '{value}' is not a valid value for the SummarizeBy property. These are the valid values: {values}." - ) - return + raise ValueError(f"{icons.red_dot} '{value}' is not a valid value for the SummarizeBy property. These are the valid values: {values}.") self.model.Tables[table_name].Columns[column_name].SummarizeBy = ( System.Enum.Parse(TOM.AggregateFunction, value) @@ -2428,10 +2375,7 @@ def set_direct_lake_behavior(self, direct_lake_behavior: str): dlValues = ["Automatic", "DirectLakeOnly", "DirectQueryOnly"] if direct_lake_behavior not in dlValues: - print( - f"{icons.red_dot} The 'direct_lake_behavior' parameter must be one of these values: {dlValues}." - ) - return + raise ValueError(f"{icons.red_dot} The 'direct_lake_behavior' parameter must be one of these values: {dlValues}.") self.model.DirectLakeBehavior = System.Enum.Parse( TOM.DirectLakeBehavior, direct_lake_behavior @@ -2529,13 +2473,10 @@ def add_field_parameter(self, table_name: str, objects: List[str]): import Microsoft.AnalysisServices.Tabular as TOM if isinstance(objects, str): - print(f"{icons.red_dot} The 'objects' parameter must be a list of columns/measures.") - return + raise ValueError(f"{icons.red_dot} The 'objects' parameter must be a list of columns/measures.") + if len(objects) == 1: - print( - f"{icons.red_dot} There must be more than one object (column/measure) within the objects parameter." - ) - return + raise ValueError(f"{icons.red_dot} There must be more than one object (column/measure) within the objects parameter.") expr = "" i = 0 @@ -2571,10 +2512,7 @@ def add_field_parameter(self, table_name: str, objects: List[str]): ) success = True if not success: - print( - f"{icons.red_dot} The '{obj}' object was not found in the '{self._dataset}' semantic model." - ) - return + raise ValueError(f"{icons.red_dot} The '{obj}' object was not found in the '{self._dataset}' semantic model.") else: i += 1 @@ -3244,26 +3182,16 @@ def update_incremental_refresh_policy( rolling_window_granularity = rolling_window_granularity.capitalize() if incremental_granularity not in incGran: - print( - f"{icons.red_dot} Invalid 'incremental_granularity' value. Please choose from the following options: {incGran}." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'incremental_granularity' value. Please choose from the following options: {incGran}.") + if rolling_window_granularity not in incGran: - print( - f"{icons.red_dot} Invalid 'rolling_window_granularity' value. Please choose from the following options: {incGran}." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'rolling_window_granularity' value. Please choose from the following options: {incGran}.") if rolling_window_periods < 1: - print( - f"{icons.red_dot} Invalid 'rolling_window_periods' value. Must be a value greater than 0." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'rolling_window_periods' value. Must be a value greater than 0.") + if incremental_periods < 1: - print( - f"{icons.red_dot} Invalid 'incremental_periods' value. Must be a value greater than 0." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'incremental_periods' value. Must be a value greater than 0.") t = self.model.Tables[table_name] @@ -3271,10 +3199,7 @@ def update_incremental_refresh_policy( dc = t.Columns[detect_data_changes_column] if dc.DataType != TOM.DataType.DateTime: - print( - f"{icons.red_dot} Invalid 'detect_data_changes_column' parameter. This column must be of DateTime data type." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'detect_data_changes_column' parameter. This column must be of DateTime data type.") rp = TOM.BasicRefreshPolicy() rp.IncrementalPeriods = incremental_periods @@ -3354,26 +3279,16 @@ def add_incremental_refresh_policy( rolling_window_granularity = rolling_window_granularity.capitalize() if incremental_granularity not in incGran: - print( - f"{icons.red_dot} Invalid 'incremental_granularity' value. Please choose from the following options: {incGran}." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'incremental_granularity' value. Please choose from the following options: {incGran}.") + if rolling_window_granularity not in incGran: - print( - f"{icons.red_dot} Invalid 'rolling_window_granularity' value. Please choose from the following options: {incGran}." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'rolling_window_granularity' value. Please choose from the following options: {incGran}.") if rolling_window_periods < 1: - print( - f"{icons.red_dot} Invalid 'rolling_window_periods' value. Must be a value greater than 0." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'rolling_window_periods' value. Must be a value greater than 0.") + if incremental_periods < 1: - print( - f"{icons.red_dot} Invalid 'incremental_periods' value. Must be a value greater than 0." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'incremental_periods' value. Must be a value greater than 0.") date_format = "%m/%d/%Y" @@ -3388,10 +3303,7 @@ def add_incremental_refresh_policy( end_day = date_obj_end.day if date_obj_end <= date_obj_start: - print( - f"{icons.red_dot} Invalid 'start_date' or 'end_date'. The 'end_date' must be after the 'start_date'." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'start_date' or 'end_date'. The 'end_date' must be after the 'start_date'.") t = self.model.Tables[table_name] @@ -3400,20 +3312,14 @@ def add_incremental_refresh_policy( dType = c.DataType if dType != TOM.DataType.DateTime: - print( - f"{icons.red_dot} The {fcName} column is of '{dType}' data type. The column chosen must be of DateTime data type." - ) - return + raise ValueError(f"{icons.red_dot} The {fcName} column is of '{dType}' data type. The column chosen must be of DateTime data type.") if detect_data_changes_column is not None: dc = t.Columns[detect_data_changes_column] dcType = dc.DataType if dcType != TOM.DataType.DateTime: - print( - f"{icons.red_dot} Invalid 'detect_data_changes_column' parameter. This column must be of DateTime data type." - ) - return + raise ValueError(f"{icons.red_dot} Invalid 'detect_data_changes_column' parameter. This column must be of DateTime data type.") # Start changes: @@ -3421,10 +3327,8 @@ def add_incremental_refresh_policy( i = 0 for p in t.Partitions: if p.SourceType != TOM.PartitionSourceType.M: - print( - f"{icons.red_dot} Invalid partition source type. Incremental refresh can only be set up if the table's partition is an M-partition." - ) - return + raise ValueError(f"{icons.red_dot} Invalid partition source type. Incremental refresh can only be set up if the table's partition is an M-partition.") + elif i == 0: text = p.Expression text = text.rstrip() @@ -3440,8 +3344,7 @@ def add_incremental_refresh_policy( print(text_before_last_match) else: - print(f"{icons.red_dot} Invalid M-partition expression.") - return + raise ValueError(f"{icons.red_dot} Invalid M-partition expression.") endExpr = f'#"Filtered Rows IR" = Table.SelectRows({obj}, each [{column_name}] >= RangeStart and [{column_name}] <= RangeEnd)\n#"Filtered Rows IR"' finalExpr = text_before_last_match + endExpr @@ -3537,15 +3440,9 @@ def set_data_coverage_definition( ht = self.is_hybrid_table(table_name=table_name) if not ht: - print( - f"{icons.red_dot} The `data coverage definition `_ property is only applicable to `hybrid tables `_. See the documentation: {doc}." - ) - return + raise ValueError(f"{icons.red_dot} The `data coverage definition `_ property is only applicable to `hybrid tables `_. See the documentation: {doc}.") if p.Mode != TOM.ModeType.DirectQuery: - print( - f"{icons.red_dot} The `data coverage definition `_ property is only applicable to the DirectQuery partition of a `hybrid table `_. See the documentation: {doc}." - ) - return + raise ValueError(f"{icons.red_dot} The `data coverage definition `_ property is only applicable to the DirectQuery partition of a `hybrid table `_. See the documentation: {doc}.") dcd = TOM.DataCoverageDefinition() dcd.Expression = expression @@ -3572,10 +3469,7 @@ def set_encoding_hint(self, table_name: str, column_name: str, value: str): value = value.capitalize() if value not in values: - print( - f"{icons.red_dot} Invalid encoding hint value. Please choose from these options: {values}." - ) - return + raise ValueError(f"{icons.red_dot} Invalid encoding hint value. Please choose from these options: {values}.") self.model.Tables[table_name].Columns[column_name].EncodingHint = ( System.Enum.Parse(TOM.EncodingHintType, value) @@ -3617,10 +3511,7 @@ def set_data_type(self, table_name: str, column_name: str, value: str): value = "Boolean" if value not in values: - print( - f"{icons.red_dot} Invalid data type. Please choose from these options: {values}." - ) - return + raise ValueError(f"{icons.red_dot} Invalid data type. Please choose from these options: {values}.") self.model.Tables[table_name].Columns[column_name].DataType = System.Enum.Parse( TOM.DataType, value @@ -3652,10 +3543,7 @@ def add_time_intelligence( for t in time_intel: t = t.capitalize() if t not in [time_intel_options]: - print( - f"{icons.red_dot} The '{t}' time intelligence variation is not supported. Valid options: {time_intel_options}." - ) - return + raise ValueError(f"{icons.red_dot} The '{t}' time intelligence variation is not supported. Valid options: {time_intel_options}.") # Validate measure and extract table name for m in self.all_measures(): @@ -3663,17 +3551,11 @@ def add_time_intelligence( table_name = m.Parent.Name if table_name is None: - print( - f"{icons.red_dot} The '{measure_name}' is not a valid measure in the '{self._dataset}' semantic model within the '{self._workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{measure_name}' is not a valid measure in the '{self._dataset}' semantic model within the '{self._workspace}' workspace.") # Validate date table if not self.is_date_table(date_table): - print( - f"{icons.red_dot} The '{date_table}' table is not a valid date table in the '{self._dataset}' wemantic model within the '{self._workspace}' workspace." - ) - return + raise ValueError(f"{icons.red_dot} The '{date_table}' table is not a valid date table in the '{self._dataset}' wemantic model within the '{self._workspace}' workspace.") # Extract date key from date table for c in self.all_columns():