From 94ce4f0be45820481232cb203098fa71d672fb03 Mon Sep 17 00:00:00 2001 From: Michael Kovalsky Date: Thu, 13 Jun 2024 11:38:55 +0300 Subject: [PATCH] added icons to more functions, fixed regex issues, renamed run_dax function name, fixed update_directlake_partitionentity parameters. --- src/sempy_labs/__init__.py | 4 +- src/sempy_labs/_ai.py | 50 +- src/sempy_labs/_clear_cache.py | 1 + src/sempy_labs/_dax.py | 2 +- src/sempy_labs/_icons.py | 5 + src/sempy_labs/_list_functions.py | 2 +- src/sempy_labs/_model_bpa.py | 20 +- src/sempy_labs/_one_lake_integration.py | 18 +- src/sempy_labs/_tom.py | 454 +++++++++--------- src/sempy_labs/_translations.py | 6 +- src/sempy_labs/_vertipaq.py | 12 +- .../directlake/_directlake_schema_compare.py | 14 +- .../directlake/_directlake_schema_sync.py | 10 +- src/sempy_labs/directlake/_fallback.py | 4 +- .../directlake/_get_directlake_lakehouse.py | 4 +- .../directlake/_get_shared_expression.py | 4 +- src/sempy_labs/directlake/_guardrails.py | 6 +- .../_list_directlake_model_calc_tables.py | 6 +- ...e_directlake_model_lakehouse_connection.py | 12 +- .../_update_directlake_partition_entity.py | 29 +- src/sempy_labs/directlake/_warm_cache.py | 2 +- .../lakehouse/_get_lakehouse_tables.py | 8 +- src/sempy_labs/lakehouse/_lakehouse.py | 4 +- .../_migrate_calctables_to_lakehouse.py | 36 +- .../_migrate_calctables_to_semantic_model.py | 4 +- ...migrate_model_objects_to_semantic_model.py | 50 +- ...igrate_tables_columns_to_semantic_model.py | 6 +- .../migration/_migration_validation.py | 10 +- .../migration/_refresh_calc_tables.py | 4 +- src/sempy_labs/report/_generate_report.py | 20 +- 30 files changed, 412 insertions(+), 395 deletions(-) diff --git a/src/sempy_labs/__init__.py b/src/sempy_labs/__init__.py index 9a6a5d86..349adbf9 100644 --- a/src/sempy_labs/__init__.py +++ b/src/sempy_labs/__init__.py @@ -5,7 +5,7 @@ # create_connection_vnet, # create_connection_on_prem # ) -from sempy_labs._dax import run_dax +from sempy_labs._dax import evaluate_dax_impersonation from sempy_labs._generate_semantic_model import ( create_blank_semantic_model, create_semantic_model_from_bim, @@ -89,7 +89,7 @@ # create_connection_cloud, # create_connection_vnet, # create_connection_on_prem, - "run_dax", + "evaluate_dax_impersonation", "create_blank_semantic_model", "create_semantic_model_from_bim", #'deploy_semantic_model', diff --git a/src/sempy_labs/_ai.py b/src/sempy_labs/_ai.py index 48638e27..908b75ab 100644 --- a/src/sempy_labs/_ai.py +++ b/src/sempy_labs/_ai.py @@ -6,7 +6,7 @@ from pyspark.sql import SparkSession from typing import List, Optional, Union from IPython.display import display - +import sempy_labs._icons as icons def optimize_semantic_model(dataset: str, workspace: Optional[str] = None): @@ -39,7 +39,7 @@ def optimize_semantic_model(dataset: str, workspace: Optional[str] = None): if len(fallback_filt) > 0: print( - f"The '{dataset}' semantic model is a Direct Lake semantic model which contains views. Since views always fall back to DirectQuery, it is recommended to only use lakehouse tables and not views." + f"{icons.yellow_dot} The '{dataset}' semantic model is a Direct Lake semantic model which contains views. Since views always fall back to DirectQuery, it is recommended to only use lakehouse tables and not views." ) # Potential model reduction estimate @@ -56,11 +56,11 @@ def optimize_semantic_model(dataset: str, workspace: Optional[str] = None): totSize = df["Total Size"].sum() if len(df_filt) > 0: print( - f"Potential savings of {totSize} bytes from following the '{rule}' rule." + f"{icons.yellow_dot} Potential savings of {totSize} bytes from following the '{rule}' rule." ) display(df_filt) else: - print(f"The '{rule}' rule has been followed.") + print(f"{icons.green_dot} The '{rule}' rule has been followed.") def generate_measure_descriptions( @@ -78,7 +78,7 @@ def generate_measure_descriptions( validModels = ["gpt-35-turbo", "gpt-35-turbo-16k", "gpt-4"] if gpt_model not in validModels: print( - f"The '{gpt_model}' model is not a valid model. Enter a gpt_model from this list: {validModels}." + f"{icons.red_dot} The '{gpt_model}' model is not a valid model. Enter a gpt_model from this list: {validModels}." ) return @@ -173,7 +173,7 @@ def generate_aggs( if any(value not in aggTypes for value in columns.values()): print( - f"Invalid aggregation type(s) have been specified in the 'columns' parameter. Valid aggregation types: {aggTypes}." + f"{icons.red_dot} Invalid aggregation type(s) have been specified in the 'columns' parameter. Valid aggregation types: {aggTypes}." ) return @@ -183,7 +183,7 @@ def generate_aggs( dfR = fabric.list_relationships(dataset=dataset, workspace=workspace) if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()): print( - f"The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode. This function is only relevant for Direct Lake semantic models." + f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode. This function is only relevant for Direct Lake semantic models." ) return @@ -191,7 +191,7 @@ def generate_aggs( if len(dfC_filtT) == 0: print( - f"The '{table_name}' table does not exist in the '{dataset}' semantic model within the '{workspace}' workspace." + f"{icons.red_dot} The '{table_name}' table does not exist in the '{dataset}' semantic model within the '{workspace}' workspace." ) return @@ -201,7 +201,7 @@ def generate_aggs( if len(columns) != len(dfC_filt): print( - f"Columns listed in '{columnValues}' do not exist in the '{table_name}' table in the '{dataset}' semantic model within the '{workspace}' workspace." + f"{icons.red_dot} Columns listed in '{columnValues}' do not exist in the '{table_name}' table in the '{dataset}' semantic model within the '{workspace}' workspace." ) return @@ -211,7 +211,7 @@ def generate_aggs( dataType = dfC_col["Data Type"].iloc[0] if agg in aggTypesAggregate and dataType not in numericTypes: print( - f"The '{col}' column in the '{table_name}' table is of '{dataType}' data type. Only columns of '{numericTypes}' data types can be aggregated as '{aggTypesAggregate}' aggregation types." + f"{icons.red_dot} The '{col}' column in the '{table_name}' table is of '{dataType}' data type. Only columns of '{numericTypes}' data types can be aggregated as '{aggTypesAggregate}' aggregation types." ) return @@ -230,7 +230,7 @@ def generate_aggs( if len(dfI_filt) == 0: print( - f"The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter." + f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter." ) return @@ -278,16 +278,16 @@ def generate_aggs( delta_table_name=aggLakeTName, ) spark_df.write.mode("overwrite").format("delta").save(aggFilePath) - f"The '{aggLakeTName}' table has been created/updated in the lakehouse." + f"{icons.green_dot} The '{aggLakeTName}' table has been created/updated in the lakehouse." # Create/update semantic model agg table tom_server = fabric.create_tom_server(readonly=False, workspace=workspace) m = tom_server.Databases.GetByName(dataset).Model - f"\nUpdating the '{dataset}' semantic model..." + f"\n{icons.in_progress} Updating the '{dataset}' semantic model..." dfC_agg = dfC[dfC["Table Name"] == aggTableName] if len(dfC_agg) == 0: - print(f"Creating the '{aggTableName}' table...") + print(f"{icons.in_progress} Creating the '{aggTableName}' table...") exp = m.Expressions["DatabaseQuery"] tbl = TOM.Table() tbl.Name = aggTableName @@ -318,15 +318,15 @@ def generate_aggs( tbl.Columns.Add(col) print( - f"The '{aggTableName}'[{cName}] column has been added to the '{dataset}' semantic model." + f"{icons.green_dot} The '{aggTableName}'[{cName}] column has been added to the '{dataset}' semantic model." ) m.Tables.Add(tbl) print( - f"The '{aggTableName}' table has been added to the '{dataset}' semantic model." + f"{icons.green_dot} The '{aggTableName}' table has been added to the '{dataset}' semantic model." ) else: - print(f"Updating the '{aggTableName}' table's columns...") + print(f"{icons.in_progress} Updating the '{aggTableName}' table's columns...") # Remove existing columns for t in m.Tables: tName = t.Name @@ -347,12 +347,12 @@ def generate_aggs( col.DataType = System.Enum.Parse(TOM.DataType, dType) m.Tables[aggTableName].Columns.Add(col) - print(f"The '{aggTableName}'[{cName}] column has been added.") + print(f"{icons.green_dot} The '{aggTableName}'[{cName}] column has been added.") # Create relationships relMap = {"m": "Many", "1": "One", "0": "None"} - print(f"\nGenerating necessary relationships...") + print(f"\n{icons.in_progress} Generating necessary relationships...") for i, r in dfR.iterrows(): fromTable = r["From Table"] fromColumn = r["From Column"] @@ -384,27 +384,27 @@ def generate_aggs( rel.FromColumn = m.Tables[aggTableName].Columns[fromColumn] m.Relationships.Add(rel) print( - f"'{aggTableName}'[{fromColumn}] -> '{toTable}'[{toColumn}] relationship has been added." + f"{icons.green_dot} '{aggTableName}'[{fromColumn}] -> '{toTable}'[{toColumn}] relationship has been added." ) except: print( - f"'{aggTableName}'[{fromColumn}] -> '{toTable}'[{toColumn}] relationship has not been created." + f"{icons.red_dot} '{aggTableName}'[{fromColumn}] -> '{toTable}'[{toColumn}] relationship has not been created." ) elif toTable == table_name: try: rel.ToColumn = m.Tables[aggTableName].Columns[toColumn] m.Relationships.Add(rel) print( - f"'{fromTable}'[{fromColumn}] -> '{aggTableName}'[{toColumn}] relationship has been added." + f"{icons.green_dot} '{fromTable}'[{fromColumn}] -> '{aggTableName}'[{toColumn}] relationship has been added." ) except: print( - f"'{fromTable}'[{fromColumn}] -> '{aggTableName}'[{toColumn}] relationship has not been created." + f"{icons.red_dot} '{fromTable}'[{fromColumn}] -> '{aggTableName}'[{toColumn}] relationship has not been created." ) f"Relationship creation is complete." # Create IF measure - f"\nCreating measure to check if the agg table can be used..." + f"\n{icons.in_progress} Creating measure to check if the agg table can be used..." aggChecker = "IF(" dfR_filt = dfR[ (dfR["From Table"] == table_name) & (~dfR["From Column"].isin(columnValues)) @@ -419,7 +419,7 @@ def generate_aggs( print(aggChecker) # Todo: add IFISFILTERED clause for columns - f"\n Creating the base measures in the agg table..." + f"\n{icons.in_progress} Creating the base measures in the agg table..." # Create base agg measures dep = fabric.evaluate_dax( dataset=dataset, diff --git a/src/sempy_labs/_clear_cache.py b/src/sempy_labs/_clear_cache.py index cab8d8db..470baa40 100644 --- a/src/sempy_labs/_clear_cache.py +++ b/src/sempy_labs/_clear_cache.py @@ -8,6 +8,7 @@ def clear_cache(dataset: str, workspace: Optional[str] = None): """ Clears the cache of a semantic model. + See `here `_ for documentation. Parameters ---------- diff --git a/src/sempy_labs/_dax.py b/src/sempy_labs/_dax.py index 75b29f94..39dfefe2 100644 --- a/src/sempy_labs/_dax.py +++ b/src/sempy_labs/_dax.py @@ -7,7 +7,7 @@ @log -def run_dax( +def evaluate_dax_impersonation( dataset: str, dax_query: str, user_name: Optional[str] = None, diff --git a/src/sempy_labs/_icons.py b/src/sempy_labs/_icons.py index 2547eb5f..112de5f5 100644 --- a/src/sempy_labs/_icons.py +++ b/src/sempy_labs/_icons.py @@ -2,3 +2,8 @@ yellow_dot = "\U0001F7E1" red_dot = "\U0001F534" in_progress = "⌛" +checked = "\u2611" +unchecked = "\u2610" +start_bold = "\033[1m" +end_bold = "\033[0m" +bullet = "\u2022" diff --git a/src/sempy_labs/_list_functions.py b/src/sempy_labs/_list_functions.py index 941dd7e4..28de4352 100644 --- a/src/sempy_labs/_list_functions.py +++ b/src/sempy_labs/_list_functions.py @@ -1308,7 +1308,7 @@ def list_kpis(dataset: str, workspace: Optional[str] = None): ] ) - for t in tom.model.Tables: + for t in tom._model.Tables: for m in t.Measures: if m.KPI is not None: new_data = { diff --git a/src/sempy_labs/_model_bpa.py b/src/sempy_labs/_model_bpa.py index dfd3a61e..bfbcf774 100644 --- a/src/sempy_labs/_model_bpa.py +++ b/src/sempy_labs/_model_bpa.py @@ -1008,7 +1008,7 @@ def run_model_bpa( dfM["Referenced By"].fillna(0, inplace=True) dfM["Referenced By"] = dfM["Referenced By"].fillna(0).astype(int) - pattern = "[^\( ][a-zA-Z0-9_()-]+\[[^\[]+\]|'[^']+'\[[^\[]+\]|\[[^\[]+\]" + pattern = r"[^\( ][a-zA-Z0-9_()-]+\[[^\[]+\]|'[^']+'\[[^\[]+\]|\[[^\[]+\]" dfM["Has Fully Qualified Measure Reference"] = False dfM["Has Unqualified Column Reference"] = False @@ -1041,15 +1041,15 @@ def run_model_bpa( dfM_filt = dfM[ dfM["Measure Expression"].str.contains( - "(?i)USERELATIONSHIP\s*\(\s*'*" - + fromTable - + "'*\[" - + fromColumn - + "\]\s*,\s*'*" - + toTable - + "'*\[" - + toColumn - + "\]", + r"(?i)USERELATIONSHIP\s*\(\s*'*" + + re.escape(fromTable) + + r"'*\[" + + re.escape(fromColumn) + + r"\]\s*,\s*'*" + + re.escape(toTable) + + r"'*\[" + + re.escape(toColumn) + + r"\]", regex=True, ) ] diff --git a/src/sempy_labs/_one_lake_integration.py b/src/sempy_labs/_one_lake_integration.py index cf22e4f7..dc155c47 100644 --- a/src/sempy_labs/_one_lake_integration.py +++ b/src/sempy_labs/_one_lake_integration.py @@ -3,7 +3,7 @@ from typing import Optional from sempy._utils._log import log from sempy_labs._helper_functions import resolve_workspace_name_and_id - +import sempy_labs._icons as icons @log def export_model_to_onelake( @@ -42,7 +42,7 @@ def export_model_to_onelake( if len(dfD_filt) == 0: print( - f"The '{dataset}' semantic model does not exist in the '{workspace}' workspace." + f"{icons.red_dot} The '{dataset}' semantic model does not exist in the '{workspace}' workspace." ) return @@ -64,11 +64,11 @@ def export_model_to_onelake( try: fabric.execute_tmsl(script=tmsl, workspace=workspace) print( - f"The '{dataset}' semantic model's tables have been exported as delta tables to the '{workspace}' workspace.\n" + f"{icons.green_dot} The '{dataset}' semantic model's tables have been exported as delta tables to the '{workspace}' workspace.\n" ) except: print( - f"ERROR: The '{dataset}' semantic model's tables have not been exported as delta tables to the '{workspace}' workspace." + f"{icons.red_dot} The '{dataset}' semantic model's tables have not been exported as delta tables to the '{workspace}' workspace." ) print( f"Make sure you enable OneLake integration for the '{dataset}' semantic model. Follow the instructions here: https://learn.microsoft.com/power-bi/enterprise/onelake-integration-overview#enable-onelake-integration" @@ -83,14 +83,14 @@ def export_model_to_onelake( if len(dfI_filt) == 0: print( - f"The '{destination_lakehouse}' lakehouse does not exist within the '{destination_workspace}' workspace." + f"{icons.red_dot} The '{destination_lakehouse}' lakehouse does not exist within the '{destination_workspace}' workspace." ) # Create lakehouse destination_lakehouse_id = fabric.create_lakehouse( display_name=destination_lakehouse, workspace=destination_workspace ) print( - f"The '{destination_lakehouse}' lakehouse has been created within the '{destination_workspace}' workspace.\n" + f"{icons.green_dot} The '{destination_lakehouse}' lakehouse has been created within the '{destination_workspace}' workspace.\n" ) else: destination_lakehouse_id = dfI_filt["Id"].iloc[0] @@ -122,7 +122,7 @@ def export_model_to_onelake( client = fabric.FabricRestClient() - print("Creating shortcuts...\n") + print(f"{icons.in_progress} Creating shortcuts...\n") for tableName in tables: tablePath = "Tables/" + tableName shortcutName = tableName.replace(" ", "") @@ -145,11 +145,11 @@ def export_model_to_onelake( ) if response.status_code == 201: print( - f"\u2022 The shortcut '{shortcutName}' was created in the '{destination_lakehouse}' lakehouse within the '{destination_workspace}' workspace. It is based on the '{tableName}' table in the '{dataset}' semantic model within the '{workspace}' workspace.\n" + f"{icons.bullet} The shortcut '{shortcutName}' was created in the '{destination_lakehouse}' lakehouse within the '{destination_workspace}' workspace. It is based on the '{tableName}' table in the '{dataset}' semantic model within the '{workspace}' workspace.\n" ) else: print(response.status_code) except: print( - f"ERROR: Failed to create a shortcut for the '{tableName}' table." + f"{icons.red_dot} Failed to create a shortcut for the '{tableName}' table." ) diff --git a/src/sempy_labs/_tom.py b/src/sempy_labs/_tom.py index c5a163ce..574af6be 100644 --- a/src/sempy_labs/_tom.py +++ b/src/sempy_labs/_tom.py @@ -17,32 +17,26 @@ import Microsoft.AnalysisServices.Tabular as TOM -checked = "\u2611" -unchecked = "\u2610" -start_bold = "\033[1m" -end_bold = "\033[0m" - - class TOMWrapper: """ Convenience wrapper around the TOM object model for a semantic model. Always use connect_semantic_model function to make sure the TOM object is initialized correctly. """ - dataset: str - workspace: str - readonly: bool - tables_added: List[str] + _dataset: str + _workspace: str + _readonly: bool + _tables_added: List[str] def __init__(self, dataset, workspace, readonly): - self.dataset = dataset - self.workspace = workspace - self.readonly = readonly - self.tables_added = [] + self._dataset = dataset + self._workspace = workspace + self._readonly = readonly + self._tables_added = [] - self.tom_server = fabric.create_tom_server( + self._tom_server = fabric.create_tom_server( readonly=readonly, workspace=workspace ) - self.model = self.tom_server.Databases.GetByName(dataset).Model + self._model = self._tom_server.Databases.GetByName(dataset).Model def all_columns(self): """ @@ -58,7 +52,7 @@ def all_columns(self): """ import Microsoft.AnalysisServices.Tabular as TOM - for t in self.model.Tables: + for t in self._model.Tables: for c in t.Columns: if c.Type != TOM.ColumnType.RowNumber: yield c @@ -77,7 +71,7 @@ def all_calculated_columns(self): """ import Microsoft.AnalysisServices.Tabular as TOM - for t in self.model.Tables: + for t in self._model.Tables: for c in t.Columns: if c.Type == TOM.ColumnType.Calculated: yield c @@ -96,7 +90,7 @@ def all_calculated_tables(self): """ import Microsoft.AnalysisServices.Tabular as TOM - for t in self.model.Tables: + for t in self._model.Tables: if any(p.SourceType == TOM.ColumnType.Calculated for p in t.Partitions): yield t @@ -113,7 +107,7 @@ def all_calculation_groups(self): All calculation groups within the semantic model. """ - for t in self.model.Tables: + for t in self._model.Tables: if t.CalculationGroup is not None: yield t @@ -130,7 +124,7 @@ def all_measures(self): All measures within the semantic model. """ - for t in self.model.Tables: + for t in self._model.Tables: for m in t.Measures: yield m @@ -147,7 +141,7 @@ def all_partitions(self): All partitions within the semantic model. """ - for t in self.model.Tables: + for t in self._model.Tables: for p in t.Partitions: yield p @@ -164,7 +158,7 @@ def all_hierarchies(self): All hierarchies within the semantic model. """ - for t in self.model.Tables: + for t in self._model.Tables: for h in t.Hierarchies: yield h @@ -181,7 +175,7 @@ def all_levels(self): All levels within the semantic model. """ - for t in self.model.Tables: + for t in self._model.Tables: for h in t.Hierarchies: for l in h.Levels: yield l @@ -199,7 +193,7 @@ def all_calculation_items(self): All calculation items within the semantic model. """ - for t in self.model.Tables: + for t in self._model.Tables: if t.CalculationGroup is not None: for ci in t.CalculationGroup.CalculationItems: yield ci @@ -217,7 +211,7 @@ def all_rls(self): All row level security expressions within the semantic model. """ - for r in self.model.Roles: + for r in self._model.Roles: for tp in r.TablePermissions: yield tp @@ -264,7 +258,7 @@ def add_measure( if display_folder is not None: obj.DisplayFolder = display_folder - self.model.Tables[table_name].Measures.Add(obj) + self._model.Tables[table_name].Measures.Add(obj) def add_calculated_table_column( self, @@ -340,7 +334,7 @@ def add_calculated_table_column( obj.DisplayFolder = display_folder if data_category is not None: obj.DataCategory = data_category - self.model.Tables[table_name].Columns.Add(obj) + self._model.Tables[table_name].Columns.Add(obj) def add_data_column( self, @@ -416,7 +410,7 @@ def add_data_column( obj.DisplayFolder = display_folder if data_category is not None: obj.DataCategory = data_category - self.model.Tables[table_name].Columns.Add(obj) + self._model.Tables[table_name].Columns.Add(obj) def add_calculated_column( self, @@ -459,7 +453,7 @@ def add_calculated_column( Marks the column as the primary key of the table. summarize_by : str, default=None Sets the value for the Summarize By property of the column. - Defaults to None resolves to 'Default'. + Defaults to None which resolves to 'Default'. """ import Microsoft.AnalysisServices.Tabular as TOM import System @@ -492,7 +486,7 @@ def add_calculated_column( obj.DisplayFolder = display_folder if data_category is not None: obj.DataCategory = data_category - self.model.Tables[table_name].Columns.Add(obj) + self._model.Tables[table_name].Columns.Add(obj) def add_calculation_item( self, @@ -504,7 +498,7 @@ def add_calculation_item( description: Optional[str] = None, ): """ - Adds a calculation item to a calculation group within a semantic model. + Adds a `calculation item `_ to a `calculation group `_ within a semantic model. Parameters ---------- @@ -533,7 +527,7 @@ def add_calculation_item( obj.Description = description if format_string_expression is not None: obj.FormatStringDefinition = fsd.Expression = format_string_expression - self.model.Tables[table_name].CalculationGroup.CalculationItems.Add(obj) + self._model.Tables[table_name].CalculationGroup.CalculationItems.Add(obj) def add_role( self, @@ -565,7 +559,7 @@ def add_role( obj.ModelPermission = System.Enum.Parse(TOM.ModelPermission, model_permission) if description is not None: obj.Description = description - self.model.Roles.Add(obj) + self._model.Roles.Add(obj) def set_rls(self, role_name: str, table_name: str, filter_expression: str): """ @@ -583,15 +577,15 @@ def set_rls(self, role_name: str, table_name: str, filter_expression: str): import Microsoft.AnalysisServices.Tabular as TOM tp = TOM.TablePermission() - tp.Table = self.model.Tables[table_name] + tp.Table = self._model.Tables[table_name] tp.FilterExpression = filter_expression try: - self.model.Roles[role_name].TablePermissions[ + self._model.Roles[role_name].TablePermissions[ table_name ].FilterExpression = filter_expression except: - self.model.Roles[role_name].TablePermissions.Add(tp) + self._model.Roles[role_name].TablePermissions.Add(tp) def set_ols( self, role_name: str, table_name: str, column_name: str, permission: str @@ -621,14 +615,14 @@ def set_ols( return cp = TOM.ColumnPermission() - cp.Column = self.model.Tables[table_name].Columns[column_name] + cp.Column = self._model.Tables[table_name].Columns[column_name] cp.MetadataPermission = System.Enum.Parse(TOM.MetadataPermission, permission) try: - self.model.Roles[role_name].TablePermissions[table_name].ColumnPermissions[ + self._model.Roles[role_name].TablePermissions[table_name].ColumnPermissions[ column_name ].MetadataPermission = System.Enum.Parse(TOM.MetadataPermission, permission) except: - self.model.Roles[role_name].TablePermissions[ + self._model.Roles[role_name].TablePermissions[ table_name ].ColumnPermissions.Add(cp) @@ -642,7 +636,7 @@ def add_hierarchy( hierarchy_hidden: Optional[bool] = False, ): """ - Adds a hierarchy to a table within a semantic model. + Adds a `hierarchy `_ to a table within a semantic model. Parameters ---------- @@ -652,7 +646,7 @@ def add_hierarchy( Name of the hierarchy. columns : List[str] Names of the columns to use within the hierarchy. - levels : List[str], default=None + `levels `_ : List[str], default=None Names of the levels to use within the hierarhcy (instead of the column names). hierarchy_description : str, default=None A description of the hierarchy. @@ -663,11 +657,11 @@ def add_hierarchy( if isinstance(columns, str): print( - f"The 'levels' parameter must be a list. For example: ['Continent', 'Country', 'City']" + f"{icons.red_dot} The 'levels' parameter must be a list. For example: ['Continent', 'Country', 'City']" ) return if len(columns) == 1: - print(f"There must be at least 2 levels in order to create a hierarchy.") + print(f"{icons.red_dot} There must be at least 2 levels in order to create a hierarchy.") return if levels is None: @@ -675,7 +669,7 @@ def add_hierarchy( if len(columns) != len(levels): print( - f"If specifying level names, you must specify a level for each column." + f"{icons.red_dot} If specifying level names, you must specify a level for each column." ) return @@ -684,14 +678,14 @@ def add_hierarchy( obj.IsHidden = hierarchy_hidden if hierarchy_description is not None: obj.Description = hierarchy_description - self.model.Tables[table_name].Hierarchies.Add(obj) + self._model.Tables[table_name].Hierarchies.Add(obj) for col in columns: lvl = TOM.Level() - lvl.Column = self.model.Tables[table_name].Columns[col] + lvl.Column = self._model.Tables[table_name].Columns[col] lvl.Name = levels[columns.index(col)] lvl.Ordinal = columns.index(col) - self.model.Tables[table_name].Hierarchies[hierarchy_name].Levels.Add(lvl) + self._model.Tables[table_name].Hierarchies[hierarchy_name].Levels.Add(lvl) def add_relationship( self, @@ -707,7 +701,7 @@ def add_relationship( rely_on_referential_integrity: Optional[bool] = False, ): """ - Adds a relationship to a semantic model. + Adds a `relationship `_ to a semantic model. Parameters ---------- @@ -752,11 +746,11 @@ def add_relationship( cross_filtering_behavior = cross_filtering_behavior.replace("direct", "Direct") rel = TOM.SingleColumnRelationship() - rel.FromColumn = self.model.Tables[from_table].Columns[from_column] + rel.FromColumn = self._model.Tables[from_table].Columns[from_column] rel.FromCardinality = System.Enum.Parse( TOM.RelationshipEndCardinality, from_cardinality ) - rel.ToColumn = self.model.Tables[to_table].Columns[to_column] + rel.ToColumn = self._model.Tables[to_table].Columns[to_column] rel.ToCardinality = System.Enum.Parse( TOM.RelationshipEndCardinality, to_cardinality ) @@ -769,7 +763,7 @@ def add_relationship( ) rel.RelyOnReferentialIntegrity = rely_on_referential_integrity - self.model.Relationships.Add(rel) + self._model.Relationships.Add(rel) def add_calculation_group( self, @@ -779,7 +773,7 @@ def add_calculation_group( hidden: Optional[bool] = False, ): """ - Adds a calculation group to a semantic model. + Adds a `calculation group `_ to a semantic model. Parameters ---------- @@ -825,14 +819,14 @@ def add_calculation_group( # col.SortByColumn = m.Tables[name].Columns[sortCol] tbl.Columns.Add(col2) - self.model.DiscourageImplicitMeasures = True - self.model.Tables.Add(tbl) + self._model.DiscourageImplicitMeasures = True + self._model.Tables.Add(tbl) def add_expression( self, name: str, expression: str, description: Optional[str] = None ): """ - Adds an expression to a semantic model. + Adds an `expression `_ to a semantic model. Parameters ---------- @@ -852,11 +846,11 @@ def add_expression( exp.Kind = TOM.ExpressionKind.M exp.Expression = expression - self.model.Expressions.Add(exp) + self._model.Expressions.Add(exp) def add_translation(self, language: str): """ - Adds a translation language (culture) to a semantic model. + Adds a `translation language `_ (culture) to a semantic model. Parameters ---------- @@ -869,13 +863,13 @@ def add_translation(self, language: str): cul.Name = language try: - self.model.Cultures.Add(cul) + self._model.Cultures.Add(cul) except: pass def add_perspective(self, perspective_name: str): """ - Adds a perspective to a semantic model. + Adds a `perspective `_ to a semantic model. Parameters ---------- @@ -886,7 +880,7 @@ def add_perspective(self, perspective_name: str): persp = TOM.Perspective() persp.Name = perspective_name - self.model.Perspectives.Add(persp) + self._model.Perspectives.Add(persp) def add_m_partition( self, @@ -910,18 +904,22 @@ def add_m_partition( mode : str, default=None The query mode for the partition. Defaults to None which resolves to 'Import'. + `Valid mode values `_ description : str, default=None A description for the partition. """ import Microsoft.AnalysisServices.Tabular as TOM import System - mode = ( - mode.title() - .replace("query", "Query") - .replace(" ", "") - .replace("lake", "Lake") - ) + if mode is None: + mode = "Default" + else: + mode = ( + mode.title() + .replace("query", "Query") + .replace(" ", "") + .replace("lake", "Lake") + ) mp = TOM.MPartitionSource() mp.Expression = expression @@ -929,12 +927,10 @@ def add_m_partition( p.Name = partition_name p.Source = mp if description is not None: - p.Description = description - if mode is None: - mode = "Default" + p.Description = description p.Mode = System.Enum.Parse(TOM.ModeType, mode) - self.model.Tables[table_name].Partitions.Add(p) + self._model.Tables[table_name].Partitions.Add(p) def add_entity_partition( self, @@ -964,7 +960,7 @@ def add_entity_partition( ep.Name = table_name ep.EntityName = entity_name if expression is None: - ep.ExpressionSource = self.model.Expressions["DatabaseQuery"] + ep.ExpressionSource = self._model.Expressions["DatabaseQuery"] else: ep.ExpressionSource = expression p = TOM.Partition() @@ -974,7 +970,7 @@ def add_entity_partition( if description is not None: p.Description = description - self.model.Tables[table_name].Partitions.Add(p) + self._model.Tables[table_name].Partitions.Add(p) def set_alternate_of( self, @@ -985,7 +981,7 @@ def set_alternate_of( base_column: Optional[str] = None, ): """ - Sets the 'alternate of' property on a column. + Sets the `alternate of `_ property on a column. Parameters ---------- @@ -1006,7 +1002,7 @@ def set_alternate_of( if base_column is not None and base_table is None: print( - f"ERROR: If you specify the base table you must also specify the base column" + f"{icons.red_dot} If you specify the base table you must also specify the base column" ) summarization_type = ( @@ -1018,28 +1014,28 @@ def set_alternate_of( summarizationTypes = ["Sum", "GroupBy", "Count", "Min", "Max"] if summarization_type not in summarizationTypes: print( - f"The 'summarization_type' parameter must be one of the following valuse: {summarizationTypes}." + f"{icons.red_dot} The 'summarization_type' parameter must be one of the following valuse: {summarizationTypes}." ) return ao = TOM.AlternateOf() ao.Summarization = System.Enum.Parse(TOM.SummarizationType, summarization_type) if base_column is not None: - ao.BaseColumn = self.model.Tables[base_table].Columns[base_column] + ao.BaseColumn = self._model.Tables[base_table].Columns[base_column] else: - ao.BaseTable = self.model.Tables[base_table] + ao.BaseTable = self._model.Tables[base_table] - self.model.Tables[table_name].Columns[column_name].AlternateOf = ao + self._model.Tables[table_name].Columns[column_name].AlternateOf = ao # Hide agg table and columns - t = self.model.Tables[table_name] + t = self._model.Tables[table_name] t.IsHidden = True for c in t.Columns: c.IsHidden = True def remove_alternate_of(self, table_name: str, column_name: str): """ - Removes the 'alternate of' property on a column. + Removes the `alternate of `_ property on a column. Parameters ---------- @@ -1053,13 +1049,13 @@ def remove_alternate_of(self, table_name: str, column_name: str): """ - self.model.Tables[table_name].Columns[column_name].AlternateOf = None + self._model.Tables[table_name].Columns[column_name].AlternateOf = None def get_annotations( self, object ) -> "Microsoft.AnalysisServices.Tabular.Annotation": """ - Shows all annotations for a given object within a semantic model. + Shows all `annotations `_ for a given object within a semantic model. Parameters ---------- @@ -1081,7 +1077,7 @@ def get_annotations( def set_annotation(self, object, name: str, value: str): """ - Sets an annotation on an object within the semantic model. + Sets an `annotation `_ on an object within the semantic model. Parameters ---------- @@ -1105,7 +1101,7 @@ def set_annotation(self, object, name: str, value: str): def get_annotation_value(self, object, name: str): """ - Obtains the annotation value for a given annotation on an object within the semantic model. + Obtains the `annotation `_ value for a given annotation on an object within the semantic model. Parameters ---------- @@ -1124,7 +1120,7 @@ def get_annotation_value(self, object, name: str): def remove_annotation(self, object, name: str): """ - Removes an annotation on an object within the semantic model. + Removes an `annotation `_ on an object within the semantic model. Parameters ---------- @@ -1138,7 +1134,7 @@ def remove_annotation(self, object, name: str): def clear_annotations(self, object): """ - Removes all annotations on an object within the semantic model. + Removes all `annotations `_ on an object within the semantic model. Parameters ---------- @@ -1152,7 +1148,7 @@ def get_extended_properties( self, object ) -> "Microsoft.AnalysisServices.Tabular.ExtendedProperty": """ - Retrieves all extended properties on an object within the semantic model. + Retrieves all `extended properties `_ on an object within the semantic model. Parameters ---------- @@ -1172,7 +1168,7 @@ def set_extended_property( self, object, extended_property_type: str, name: str, value: str ): """ - Sets an extended property on an object within the semantic model. + Sets an `extended property `_ on an object within the semantic model. Parameters ---------- @@ -1205,7 +1201,7 @@ def set_extended_property( def get_extended_property_value(self, object, name: str): """ - Retrieves the value of an extended property for an object within the semantic model. + Retrieves the value of an `extended property `_ for an object within the semantic model. Parameters ---------- @@ -1224,7 +1220,7 @@ def get_extended_property_value(self, object, name: str): def remove_extended_property(self, object, name: str): """ - Removes an extended property on an object within the semantic model. + Removes an `extended property `_ on an object within the semantic model. Parameters ---------- @@ -1238,7 +1234,7 @@ def remove_extended_property(self, object, name: str): def clear_extended_properties(self, object): """ - Removes all extended properties on an object within the semantic model. + Removes all `extended properties `_ on an object within the semantic model. Parameters ---------- @@ -1254,7 +1250,7 @@ def in_perspective( perspective_name: str, ): """ - Indicates whether an object is contained within a given perspective. + Indicates whether an object is contained within a given `perspective `_. Parameters ---------- @@ -1280,7 +1276,7 @@ def in_perspective( if objectType not in validObjects: print( - f"Only the following object types are valid for perspectives: {validObjects}." + f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}." ) return @@ -1313,7 +1309,7 @@ def add_to_perspective( perspective_name: str, ): """ - Adds an object to a perspective. + Adds an object to a `perspective `_. Parameters ---------- @@ -1334,13 +1330,13 @@ def add_to_perspective( if objectType not in validObjects: print( - f"Only the following object types are valid for perspectives: {validObjects}." + f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}." ) return try: object.Model.Perspectives[perspective_name] except: - print(f"The '{perspective_name}' perspective does not exist.") + print(f"{icons.red_dot} The '{perspective_name}' perspective does not exist.") return # try: @@ -1375,7 +1371,7 @@ def remove_from_perspective( perspective_name: str, ): """ - Removes an object from a perspective. + Removes an object from a `perspective `_. Parameters ---------- @@ -1396,13 +1392,13 @@ def remove_from_perspective( if objectType not in validObjects: print( - f"Only the following object types are valid for perspectives: {validObjects}." + f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}." ) return try: object.Model.Perspectives[perspective_name] except: - print(f"The '{perspective_name}' perspective does not exist.") + print(f"{icons.red_dot} The '{perspective_name}' perspective does not exist.") return # try: @@ -1449,7 +1445,7 @@ def set_translation( value: str, ): """ - Sets a translation value for an object's property. + Sets a `translation `_ value for an object's property. Parameters ---------- @@ -1476,7 +1472,7 @@ def set_translation( ] # , 'Level' if object.ObjectType not in validObjects: - print(f"Translations can only be set to {validObjects}.") + print(f"{icons.red_dot} Translations can only be set to {validObjects}.") return mapping = { @@ -1491,7 +1487,7 @@ def set_translation( object.Model.Cultures[language] except: print( - f"The '{language}' translation language does not exist in the semantic model." + f"{icons.red_dot} The '{language}' translation language does not exist in the semantic model." ) return @@ -1505,7 +1501,7 @@ def remove_translation( language: str, ): """ - Removes an object's translation value. + Removes an object's `translation `_ value. Parameters ---------- @@ -1579,7 +1575,7 @@ def remove_object(self, object): def used_in_relationships(self, object: Union["TOM.Table", "TOM.Column"]): """ - Shows all relationships in which a table/column is used. + Shows all `relationships `_ in which a table/column is used. Parameters ---------- @@ -1596,11 +1592,11 @@ def used_in_relationships(self, object: Union["TOM.Table", "TOM.Column"]): objType = object.ObjectType if objType == TOM.ObjectType.Table: - for r in self.model.Relationships: + for r in self._model.Relationships: if r.FromTable.Name == object.Name or r.ToTable.Name == object.Name: yield r # , 'Table' elif objType == TOM.ObjectType.Column: - for r in self.model.Relationships: + for r in self._model.Relationships: if ( r.FromTable.Name == object.Parent.Name and r.FromColumn.Name == object.Name @@ -1612,7 +1608,7 @@ def used_in_relationships(self, object: Union["TOM.Table", "TOM.Column"]): def used_in_levels(self, column: "TOM.Column"): """ - Shows all levels in which a column is used. + Shows all `levels `_ in which a column is used. Parameters ---------- @@ -1638,7 +1634,7 @@ def used_in_levels(self, column: "TOM.Column"): def used_in_hierarchies(self, column: "TOM.Column"): """ - Shows all hierarchies in which a column is used. + Shows all `hierarchies `_ in which a column is used. Parameters ---------- @@ -1681,7 +1677,7 @@ def used_in_sort_by(self, column: "TOM.Column"): objType = column.ObjectType if objType == TOM.ObjectType.Column: - for c in self.model.Tables[column.Parent.Name].Columns: + for c in self._model.Tables[column.Parent.Name].Columns: if c.SortByColumn == column: yield c @@ -1691,7 +1687,7 @@ def used_in_rls( dependencies: pd.DataFrame, ): """ - Identifies the filter expressions which reference a given object. + Identifies the row level security `filter expressions `_ which reference a given object. Parameters ---------- @@ -1717,7 +1713,7 @@ def used_in_rls( & (df_filt["Referenced Table"] == object.Name) ] tbls = fil["Table Name"].unique().tolist() - for t in self.model.Tables: + for t in self._model.Tables: if t.Name in tbls: yield t elif objType == TOM.ObjectType.Column: @@ -1774,7 +1770,7 @@ def used_in_data_coverage_definition( & (df_filt["Referenced Table"] == object.Name) ] tbls = fil["Table Name"].unique().tolist() - for t in self.model.Tables: + for t in self._model.Tables: if t.Name in tbls: yield t elif objType == TOM.ObjectType.Column: @@ -1829,7 +1825,7 @@ def used_in_calc_item( & (df_filt["Referenced Table"] == object.Name) ] tbls = fil["Table Name"].unique().tolist() - for t in self.model.Tables: + for t in self._model.Tables: if t.Name in tbls: yield t elif objType == TOM.ObjectType.Column: @@ -1855,7 +1851,7 @@ def used_in_calc_item( def hybrid_tables(self): """ - Outputs the hybrid tables within a semantic model. + Outputs the `hybrid tables `_ within a semantic model. Parameters ---------- @@ -1867,14 +1863,14 @@ def hybrid_tables(self): """ import Microsoft.AnalysisServices.Tabular as TOM - for t in self.model.Tables: + for t in self._model.Tables: if any(p.Mode == TOM.ModeType.Import for p in t.Partitions): if any(p.Mode == TOM.ModeType.DirectQuery for p in t.Partitions): yield t def date_tables(self): """ - Outputs the tables which are marked as date tables within a semantic model. + Outputs the tables which are marked as `date tables `_ within a semantic model. Parameters ---------- @@ -1886,7 +1882,7 @@ def date_tables(self): """ import Microsoft.AnalysisServices.Tabular as TOM - for t in self.model.Tables: + for t in self._model.Tables: if t.DataCategory == "Time": if any( c.IsKey and c.DataType == TOM.DataType.DateTime for c in t.Columns @@ -1895,7 +1891,7 @@ def date_tables(self): def is_hybrid_table(self, table_name: str): """ - Identifies if a table is a hybrid table. + Identifies if a table is a `hybrid table `_. Parameters ---------- @@ -1913,11 +1909,11 @@ def is_hybrid_table(self, table_name: str): if any( p.Mode == TOM.ModeType.Import - for p in self.model.Tables[table_name].Partitions + for p in self._model.Tables[table_name].Partitions ): if any( p.Mode == TOM.ModeType.DirectQuery - for p in self.model.Tables[table_name].Partitions + for p in self._model.Tables[table_name].Partitions ): isHybridTable = True @@ -1925,7 +1921,7 @@ def is_hybrid_table(self, table_name: str): def is_date_table(self, table_name: str): """ - Identifies if a table is marked as a date table. + Identifies if a table is marked as a `date tables `_. Parameters ---------- @@ -1940,7 +1936,7 @@ def is_date_table(self, table_name: str): import Microsoft.AnalysisServices.Tabular as TOM isDateTable = False - t = self.model.Tables[table_name] + t = self._model.Tables[table_name] if t.DataCategory == "Time": if any(c.IsKey and c.DataType == TOM.DataType.DateTime for c in t.Columns): @@ -1950,7 +1946,7 @@ def is_date_table(self, table_name: str): def mark_as_date_table(self, table_name: str, column_name: str): """ - Marks a table as a date table. + Marks a table as a `date table `_. Parameters ---------- @@ -1961,7 +1957,7 @@ def mark_as_date_table(self, table_name: str, column_name: str): """ import Microsoft.AnalysisServices.Tabular as TOM - t = self.model.Tables[table_name] + t = self._model.Tables[table_name] c = t.Columns[column_name] if c.DataType != TOM.DataType.DateTime: print( @@ -1983,7 +1979,7 @@ def mark_as_date_table(self, table_name: str, column_name: str): ) """ df = fabric.evaluate_dax( - dataset=self.dataset, workspace=self.workspace, dax_string=daxQuery + dataset=self._dataset, workspace=self._workspace, dax_string=daxQuery ) value = df["1"].iloc[0] if value != "1": @@ -2001,7 +1997,7 @@ def mark_as_date_table(self, table_name: str, column_name: str): def has_aggs(self): """ - Identifies if a semantic model has any aggregations. + Identifies if a semantic model has any `aggregations `_. Parameters ---------- @@ -2022,7 +2018,7 @@ def has_aggs(self): def is_agg_table(self, table_name: str): """ - Identifies if a table has aggregations. + Identifies if a table has `aggregations `_. Parameters ---------- @@ -2035,13 +2031,13 @@ def is_agg_table(self, table_name: str): Indicates if the table has any aggregations. """ - t = self.model.Tables[table_name] + t = self._model.Tables[table_name] return any(c.AlternateOf is not None for c in t.Columns) def has_hybrid_table(self): """ - Identifies if a semantic model has a hybrid table. + Identifies if a semantic model has a `hybrid table `_. Parameters ---------- @@ -2054,7 +2050,7 @@ def has_hybrid_table(self): hasHybridTable = False - for t in self.model.Tables: + for t in self._model.Tables: if self.is_hybrid_table(table_name=t.Name): hasHybridTable = True @@ -2062,7 +2058,7 @@ def has_hybrid_table(self): def has_date_table(self): """ - Identifies if a semantic model has a table marked as a date table. + Identifies if a semantic model has a table marked as a `date table `_. Parameters ---------- @@ -2075,7 +2071,7 @@ def has_date_table(self): hasDateTable = False - for t in self.model.Tables: + for t in self._model.Tables: if self.is_date_table(table_name=t.Name): hasDateTable = True @@ -2083,7 +2079,7 @@ def has_date_table(self): def is_direct_lake(self): """ - Identifies if a semantic model is in Direct Lake mode. + Identifies if a semantic model is in `Direct Lake `_ mode. Parameters ---------- @@ -2096,13 +2092,13 @@ def is_direct_lake(self): return any( p.Mode == TOM.ModeType.DirectLake - for t in self.model.Tables + for t in self._model.Tables for p in t.Partitions ) def is_field_parameter(self, table_name: str): """ - Identifies if a table is a field parameter. + Identifies if a table is a `field parameter `_. Parameters ---------- @@ -2116,7 +2112,7 @@ def is_field_parameter(self, table_name: str): """ import Microsoft.AnalysisServices.Tabular as TOM - t = self.model.Tables[table_name] + t = self._model.Tables[table_name] return ( any( @@ -2134,7 +2130,7 @@ def is_field_parameter(self, table_name: str): def is_auto_date_table(self, table_name: str): """ - Identifies if a table is an auto-date table. + Identifies if a table is an `auto date/time table `_. Parameters ---------- @@ -2150,7 +2146,7 @@ def is_auto_date_table(self, table_name: str): isAutoDate = False - t = self.model.Tables[table_name] + t = self._model.Tables[table_name] if t.Name.startswith("LocalDateTable_") or t.Name.startswith( "DateTableTemplate_" @@ -2174,7 +2170,7 @@ def set_kpi( status_graphic: Optional[str] = None, ): """ - Sets the properties to add/update a KPI for a measure. + Sets the properties to add/update a `KPI `_ for a measure. Parameters ---------- @@ -2203,7 +2199,7 @@ def set_kpi( if measure_name == target: print( - f"The 'target' parameter cannot be the same measure as the 'measure_name' parameter." + f"{icons.red_dot} The 'target' parameter cannot be the same measure as the 'measure_name' parameter." ) return @@ -2218,32 +2214,32 @@ def set_kpi( if status_type not in statusType: print( - f"'{status_type}' is an invalid status_type. Please choose from these options: {statusType}." + f"{icons.red_dot} '{status_type}' is an invalid status_type. Please choose from these options: {statusType}." ) return if status_type in ["Linear", "LinearReversed"]: if upper_bound is not None or lower_mid_bound is not None: print( - f"The 'upper_mid_bound' and 'lower_mid_bound' parameters are not used in the 'Linear' and 'LinearReversed' status types. Make sure these parameters are set to None." + f"{icons.red_dot} The 'upper_mid_bound' and 'lower_mid_bound' parameters are not used in the 'Linear' and 'LinearReversed' status types. Make sure these parameters are set to None." ) return elif upper_bound <= lower_bound: - print(f"The upper_bound must be greater than the lower_bound.") + print(f"{icons.red_dot} The upper_bound must be greater than the lower_bound.") return if status_type in ["Centered", "CenteredReversed"]: if upper_mid_bound is None or lower_mid_bound is None: print( - f"The 'upper_mid_bound' and 'lower_mid_bound' parameters are necessary in the 'Centered' and 'CenteredReversed' status types." + f"{icons.red_dot} The 'upper_mid_bound' and 'lower_mid_bound' parameters are necessary in the 'Centered' and 'CenteredReversed' status types." ) return elif upper_bound <= upper_mid_bound: - print(f"The upper_bound must be greater than the upper_mid_bound.") + print(f"{icons.red_dot} The upper_bound must be greater than the upper_mid_bound.") elif upper_mid_bound <= lower_mid_bound: - print(f"The upper_mid_bound must be greater than the lower_mid_bound.") + print(f"{icons.red_dot} The upper_mid_bound must be greater than the lower_mid_bound.") elif lower_mid_bound <= lower_bound: - print(f"The lower_mid_bound must be greater than the lower_bound.") + print(f"{icons.red_dot} The lower_mid_bound must be greater than the lower_bound.") try: table_name = next( @@ -2251,7 +2247,7 @@ def set_kpi( ) except: print( - f"The '{measure_name}' measure does not exist in the '{self.dataset}' semantic model within the '{self.workspace}'." + f"{icons.red_dot} The '{measure_name}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'." ) return @@ -2277,7 +2273,7 @@ def set_kpi( if status_graphic not in graphics: print( - f"The '{status_graphic}' status graphic is not valid. Please choose from these options: {graphics}." + f"{icons.red_dot} The '{status_graphic}' status graphic is not valid. Please choose from these options: {graphics}." ) return @@ -2296,7 +2292,7 @@ def set_kpi( ) except: print( - f"The '{target}' measure does not exist in the '{self.dataset}' semantic model within the '{self.workspace}'." + f"{icons.red_dot} The '{target}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'." ) if measure_target: @@ -2318,7 +2314,7 @@ def set_kpi( kpi.StatusGraphic = status_graphic kpi.StatusExpression = expr - ms = self.model.Tables[table_name].Measures[measure_name] + ms = self._model.Tables[table_name].Measures[measure_name] try: ms.KPI.TargetExpression = tgt ms.KPI.StatusGraphic = status_graphic @@ -2328,7 +2324,7 @@ def set_kpi( def set_aggregations(self, table_name: str, agg_table_name: str): """ - Sets the aggregations (alternate of) for all the columns in an aggregation table based on a base table. + Sets the `aggregations `_ (alternate of) for all the columns in an aggregation table based on a base table. Parameters ---------- @@ -2342,7 +2338,7 @@ def set_aggregations(self, table_name: str, agg_table_name: str): """ - for c in self.model.Tables[agg_table_name].Columns: + for c in self._model.Tables[agg_table_name].Columns: dataType = c.DataType @@ -2367,7 +2363,7 @@ def set_is_available_in_mdx( self, table_name: str, column_name: str, value: Optional[bool] = False ): """ - Sets the IsAvailableInMdx property on a column. + Sets the `IsAvailableInMDX `_ property on a column. Parameters ---------- @@ -2379,13 +2375,13 @@ def set_is_available_in_mdx( The IsAvailableInMdx property value. """ - self.model.Tables[table_name].Columns[column_name].IsAvailableInMdx = value + self._model.Tables[table_name].Columns[column_name].IsAvailableInMdx = value def set_summarize_by( self, table_name: str, column_name: str, value: Optional[str] = None ): """ - Sets the SummarizeBy property on a column. + Sets the `SummarizeBy `_ property on a column. Parameters ---------- @@ -2422,17 +2418,17 @@ def set_summarize_by( if value not in values: print( - f"'{value}' is not a valid value for the SummarizeBy property. These are the valid values: {values}." + f"{icons.red_dot} '{value}' is not a valid value for the SummarizeBy property. These are the valid values: {values}." ) return - self.model.Tables[table_name].Columns[column_name].SummarizeBy = ( + self._model.Tables[table_name].Columns[column_name].SummarizeBy = ( System.Enum.Parse(TOM.AggregateFunction, value) ) def set_direct_lake_behavior(self, direct_lake_behavior: str): """ - Sets the Direct Lake Behavior property for a semantic model. + Sets the `Direct Lake Behavior `_ property for a semantic model. Parameters ---------- @@ -2454,11 +2450,11 @@ def set_direct_lake_behavior(self, direct_lake_behavior: str): if direct_lake_behavior not in dlValues: print( - f"The 'direct_lake_behavior' parameter must be one of these values: {dlValues}." + f"{icons.red_dot} The 'direct_lake_behavior' parameter must be one of these values: {dlValues}." ) return - self.model.DirectLakeBehavior = System.Enum.Parse( + self._model.DirectLakeBehavior = System.Enum.Parse( TOM.DirectLakeBehavior, direct_lake_behavior ) @@ -2492,7 +2488,7 @@ def add_table( if data_category is not None: t.DataCategory = data_category t.Hidden = hidden - self.model.Tables.Add(t) + self._model.Tables.Add(t) def add_calculated_table( self, @@ -2535,11 +2531,11 @@ def add_calculated_table( t.DataCategory = data_category t.Hidden = hidden t.Partitions.Add(par) - self.model.Tables.Add(t) + self._model.Tables.Add(t) def add_field_parameter(self, table_name: str, objects: List[str]): """ - Adds a table to the semantic model. + Adds a `field parameter `_ to the semantic model. Parameters ---------- @@ -2552,11 +2548,11 @@ def add_field_parameter(self, table_name: str, objects: List[str]): """ if isinstance(objects, str): - print(f"The 'objects' parameter must be a list of columns/measures.") + print(f"{icons.red_dot} The 'objects' parameter must be a list of columns/measures.") return if len(objects) == 1: print( - f"There must be more than one object (column/measure) within the objects parameter." + f"{icons.red_dot} There must be more than one object (column/measure) within the objects parameter." ) return @@ -2595,7 +2591,7 @@ def add_field_parameter(self, table_name: str, objects: List[str]): success = True if not success: print( - f"The '{obj}' object was not found in the '{self.dataset}' semantic model." + f"{icons.red_dot} The '{obj}' object was not found in the '{self._dataset}' semantic model." ) return else: @@ -2632,7 +2628,7 @@ def add_field_parameter(self, table_name: str, objects: List[str]): self.set_extended_property( self=self, - object=self.model.Tables[table_name].Columns[col2], + object=self._model.Tables[table_name].Columns[col2], extended_property_type="Json", name="ParameterMetadata", value='{"version":3,"kind":2}', @@ -2640,23 +2636,23 @@ def add_field_parameter(self, table_name: str, objects: List[str]): rcd = TOM.RelatedColumnDetails() gpc = TOM.GroupByColumn() - gpc.GroupingColumn = self.model.Tables[table_name].Columns[col2] + gpc.GroupingColumn = self._model.Tables[table_name].Columns[col2] rcd.GroupByColumns.Add(gpc) # Update column properties - self.model.Tables[table_name].Columns[col2].SortByColumn = self.model.Tables[ + self._model.Tables[table_name].Columns[col2].SortByColumn = self._model.Tables[ table_name ].Columns[col3] - self.model.Tables[table_name].Columns[table_name].RelatedColumnDetails = rcd + self._model.Tables[table_name].Columns[table_name].RelatedColumnDetails = rcd - self.tables_added.append(table_name) + self._tables_added.append(table_name) def remove_vertipaq_annotations(self): """ - Removes the annotations set using the [set_vertipaq_annotations] function. + Removes the annotations set using the set_vertipaq_annotations function. """ - for t in self.model.Tables: + for t in self._model.Tables: for a in t.Annotations: if a.Name.startswith("Vertipaq_"): self.remove_annotation(object=t, name=a.Name) @@ -2672,7 +2668,7 @@ def remove_vertipaq_annotations(self): for a in p.Annotations: if a.Name.startswith("Vertipaq_"): self.remove_annotation(object=p, name=a.Name) - for r in self.model.Relationships: + for r in self._model.Relationships: for a in r.Annotations: if a.Name.startswith("Veripaq_"): self.remove_annotation(object=r, name=a.Name) @@ -2683,10 +2679,10 @@ def set_vertipaq_annotations(self): """ dfT = fabric.list_tables( - dataset=self.dataset, workspace=self.workspace, extended=True + dataset=self._dataset, workspace=self._workspace, extended=True ) dfC = fabric.list_columns( - dataset=self.dataset, workspace=self.workspace, extended=True + dataset=self._dataset, workspace=self._workspace, extended=True ) # intList = ['Total Size']#, 'Data Size', 'Dictionary Size', 'Hierarchy Size'] dfCSum = dfC.groupby(["Table Name"])["Total Size"].sum().reset_index() @@ -2698,19 +2694,19 @@ def set_vertipaq_annotations(self): how="inner", ) dfP = fabric.list_partitions( - dataset=self.dataset, workspace=self.workspace, extended=True + dataset=self._dataset, workspace=self._workspace, extended=True ) dfP["Records per Segment"] = round( dfP["Record Count"] / dfP["Segment Count"], 2 ) dfH = fabric.list_hierarchies( - dataset=self.dataset, workspace=self.workspace, extended=True + dataset=self._dataset, workspace=self._workspace, extended=True ) dfR = list_relationships( - dataset=self.dataset, workspace=self.workspace, extended=True + dataset=self._dataset, workspace=self._workspace, extended=True ) - for t in self.model.Tables: + for t in self._model.Tables: dfT_filt = dfTable[dfTable["Name"] == t.Name] rowCount = str(dfT_filt["Row Count"].iloc[0]) totalSize = str(dfT_filt["Total Size"].iloc[0]) @@ -2758,17 +2754,17 @@ def set_vertipaq_annotations(self): ] usedSize = str(dfH_filt["Used Size"].iloc[0]) self.set_annotation(object=h, name="Vertipaq_UsedSize", value=usedSize) - for r in self.model.Relationships: + for r in self._model.Relationships: dfR_filt = dfR[dfR["Relationship Name"] == r.Name] relSize = str(dfR_filt["Used Size"].iloc[0]) self.set_annotation(object=r, name="Vertipaq_UsedSize", value=relSize) try: - runId = self.get_annotation_value(object=self.model, name="Vertipaq_Run") + runId = self.get_annotation_value(object=self._model, name="Vertipaq_Run") runId = str(int(runId) + 1) except: runId = "1" - self.set_annotation(object=self.model, name="Vertipaq_Run", value=runId) + self.set_annotation(object=self._model, name="Vertipaq_Run", value=runId) def row_count(self, object: Union["TOM.Partition", "TOM.Table"]): """ @@ -2998,7 +2994,7 @@ def depends_on(self, object, dependencies: pd.DataFrame): for c in self.all_columns(): if format_dax_object_name(c.Parent.Name, c.Name) in cols: yield c - for t in self.model.Tables: + for t in self._model.Tables: if t.Name in tbls: yield t @@ -3049,7 +3045,7 @@ def referenced_by(self, object, dependencies: pd.DataFrame): for c in self.all_columns(): if format_dax_object_name(c.Parent.Name, c.Name) in cols: yield c - for t in self.model.Tables: + for t in self._model.Tables: if t.Name in tbls: yield t @@ -3100,7 +3096,7 @@ def unqualified_columns(self, object: "TOM.Column", dependencies: pd.DataFrame): import Microsoft.AnalysisServices.Tabular as TOM def create_pattern(a, b): - return r"(? 0: @@ -3138,7 +3134,7 @@ def is_direct_lake_using_view(self): def has_incremental_refresh_policy(self, table_name: str): """ - Identifies whether a table has an incremental refresh policy. + Identifies whether a table has an `incremental refresh `_ policy. Parameters ---------- @@ -3152,7 +3148,7 @@ def has_incremental_refresh_policy(self, table_name: str): """ hasRP = False - rp = self.model.Tables[table_name].RefreshPolicy + rp = self._model.Tables[table_name].RefreshPolicy if rp is not None: hasRP = True @@ -3161,7 +3157,7 @@ def has_incremental_refresh_policy(self, table_name: str): def show_incremental_refresh_policy(self, table_name: str): """ - Prints the incremental refresh policy for a table. + Prints the `incremental refresh `_ policy for a table. Parameters ---------- @@ -3169,11 +3165,11 @@ def show_incremental_refresh_policy(self, table_name: str): Name of the table. """ - rp = self.model.Tables[table_name].RefreshPolicy + rp = self._model.Tables[table_name].RefreshPolicy if rp is None: print( - f"The '{table_name}' table in the '{self.dataset}' semantic model within the '{self.workspace}' workspace does not have an incremental refresh policy." + f"{icons.yellow_dot} The '{table_name}' table in the '{self._dataset}' semantic model within the '{self._workspace}' workspace does not have an incremental refresh policy." ) else: print(f"Table Name: {table_name}") @@ -3181,33 +3177,33 @@ def show_incremental_refresh_policy(self, table_name: str): icGran = str(rp.IncrementalGranularity).lower() if rp.RollingWindowPeriods > 1: print( - f"Archive data starting {start_bold}{rp.RollingWindowPeriods} {rwGran}s{end_bold} before refresh date." + f"Archive data starting {icons.start_bold}{rp.RollingWindowPeriods} {rwGran}s{icons.end_bold} before refresh date." ) else: print( - f"Archive data starting {start_bold}{rp.RollingWindowPeriods} {rwGran}{end_bold} before refresh date." + f"Archive data starting {icons.start_bold}{rp.RollingWindowPeriods} {rwGran}{icons.end_bold} before refresh date." ) if rp.IncrementalPeriods > 1: print( - f"Incrementally refresh data {start_bold}{rp.IncrementalPeriods} {icGran}s{end_bold} before refresh date." + f"Incrementally refresh data {icons.start_bold}{rp.IncrementalPeriods} {icGran}s{icons.end_bold} before refresh date." ) else: print( - f"Incrementally refresh data {start_bold}{rp.IncrementalPeriods} {icGran}{end_bold} before refresh date." + f"Incrementally refresh data {icons.start_bold}{rp.IncrementalPeriods} {icGran}{icons.end_bold} before refresh date." ) if rp.Mode == TOM.RefreshPolicyMode.Hybrid: print( - f"{checked} Get the latest data in real time with DirectQuery (Premium only)" + f"{icons.checked} Get the latest data in real time with DirectQuery (Premium only)" ) else: print( - f"{unchecked} Get the latest data in real time with DirectQuery (Premium only)" + f"{icons.unchecked} Get the latest data in real time with DirectQuery (Premium only)" ) if rp.IncrementalPeriodsOffset == -1: - print(f"{checked} Only refresh complete days") + print(f"{icons.checked} Only refresh complete days") else: - print(f"{unchecked} Only refresh complete days") + print(f"{icons.unchecked} Only refresh complete days") if len(rp.PollingExpression) > 0: pattern = r"\[([^\]]+)\]" match = re.search(pattern, rp.PollingExpression) @@ -3215,10 +3211,10 @@ def show_incremental_refresh_policy(self, table_name: str): col = match[0][1:-1] fullCol = format_dax_object_name(table_name, col) print( - f"{checked} Detect data changes: {start_bold}{fullCol}{end_bold}" + f"{icons.checked} Detect data changes: {icons.start_bold}{fullCol}{icons.end_bold}" ) else: - print(f"{unchecked} Detect data changes") + print(f"{icons.unchecked} Detect data changes") def update_incremental_refresh_policy( self, @@ -3231,7 +3227,7 @@ def update_incremental_refresh_policy( detect_data_changes_column: Optional[str] = None, ): """ - Updates the incremental refresh policy for a table within a semantic model. + Updates the `incremental refresh `_ policy for a table within a semantic model. Parameters ---------- @@ -3287,7 +3283,7 @@ def update_incremental_refresh_policy( ) return - t = self.model.Tables[table_name] + t = self._model.Tables[table_name] if detect_data_changes_column is not None: dc = t.Columns[detect_data_changes_column] @@ -3339,7 +3335,7 @@ def add_incremental_refresh_policy( detect_data_changes_column: Optional[str] = None, ): """ - Adds anincremental refresh policy for a table within a semantic model. + Adds an `incremental refresh `_ policy for a table within a semantic model. Parameters ---------- @@ -3415,7 +3411,7 @@ def add_incremental_refresh_policy( ) return - t = self.model.Tables[table_name] + t = self._model.Tables[table_name] c = t.Columns[column_name] fcName = format_dax_object_name(table_name, column_name) @@ -3514,7 +3510,7 @@ def apply_refresh_policy( max_parallelism: Optional[int] = 0, ): """ - Applies the incremental refresh policy for a table within a semantic model. + `Applies the incremental refresh `_ policy for a table within a semantic model. Parameters ---------- @@ -3528,7 +3524,7 @@ def apply_refresh_policy( The degree of parallelism during the refresh execution. """ - self.model.Tables[table_name].ApplyRefreshPolicy( + self._model.Tables[table_name].ApplyRefreshPolicy( effectiveDate=effective_date, refresh=refresh, maxParallelism=max_parallelism, @@ -3538,7 +3534,7 @@ def set_data_coverage_definition( self, table_name: str, partition_name: str, expression: str ): """ - Sets the data coverage definition for a partition. + Sets the `data coverage definition `_ for a partition. Parameters ---------- @@ -3553,19 +3549,19 @@ def set_data_coverage_definition( doc = "https://learn.microsoft.com/analysis-services/tom/table-partitions?view=asallproducts-allversions" - t = self.model.Tables[table_name] + t = self._model.Tables[table_name] p = t.Partitions[partition_name] ht = self.is_hybrid_table(table_name=table_name) if not ht: print( - f"The data coverage definition property is only applicable to hybrid tables. See the documentation: {doc}." + f"{icons.red_dot} The `data coverage definition `_ property is only applicable to `hybrid tables `_. See the documentation: {doc}." ) return if p.Mode != TOM.ModeType.DirectQuery: print( - f"The data coverage definition property is only applicable to the DirectQuery partition of a hybrid table. See the documentation: {doc}." + f"{icons.red_dot} The `data coverage definition `_ property is only applicable to the DirectQuery partition of a `hybrid table `_. See the documentation: {doc}." ) return @@ -3575,7 +3571,7 @@ def set_data_coverage_definition( def set_encoding_hint(self, table_name: str, column_name: str, value: str): """ - Sets the encoding hint for a column. + Sets the `encoding hint `_ for a column. Parameters ---------- @@ -3599,13 +3595,13 @@ def set_encoding_hint(self, table_name: str, column_name: str, value: str): ) return - self.model.Tables[table_name].Columns[column_name].EncodingHint = ( + self._model.Tables[table_name].Columns[column_name].EncodingHint = ( System.Enum.Parse(TOM.EncodingHintType, value) ) def set_data_type(self, table_name: str, column_name: str, value: str): """ - Sets the data type for a column. + Sets the `data type `_ for a column. Parameters ---------- @@ -3643,7 +3639,7 @@ def set_data_type(self, table_name: str, column_name: str, value: str): ) return - self.model.Tables[table_name].Columns[column_name].DataType = System.Enum.Parse( + self._model.Tables[table_name].Columns[column_name].DataType = System.Enum.Parse( TOM.DataType, value ) @@ -3674,7 +3670,7 @@ def add_time_intelligence( t = t.capitalize() if t not in [time_intel_options]: print( - f"The '{t}' time intelligence variation is not supported. Valid options: {time_intel_options}." + f"{icons.red_dot} The '{t}' time intelligence variation is not supported. Valid options: {time_intel_options}." ) return @@ -3685,14 +3681,14 @@ def add_time_intelligence( if table_name is None: print( - f"The '{measure_name}' is not a valid measure in the '{self.dataset}' semantic model within the '{self.workspace}' workspace." + f"{icons.red_dot} The '{measure_name}' is not a valid measure in the '{self._dataset}' semantic model within the '{self._workspace}' workspace." ) return # Validate date table if not self.is_date_table(date_table): print( - f"{icons.red_dot} The '{date_table}' table is not a valid date table in the '{self.dataset}' wemantic model within the '{self.workspace}' workspace." + f"{icons.red_dot} The '{date_table}' table is not a valid date table in the '{self._dataset}' wemantic model within the '{self._workspace}' workspace." ) return @@ -3713,18 +3709,18 @@ def add_time_intelligence( ) def close(self): - if not self.readonly and self.model is not None: - self.model.SaveChanges() + if not self._readonly and self._model is not None: + self._model.SaveChanges() - if len(self.tables_added) > 0: + if len(self._tables_added) > 0: refresh_semantic_model( - dataset=self.dataset, - tables=self.tables_added, - workspace=self.workspace, + dataset=self._dataset, + tables=self._tables_added, + workspace=self._workspace, ) - self.model = None + self._model = None - self.tom_server.Dispose() + self._tom_server.Dispose() @log diff --git a/src/sempy_labs/_translations.py b/src/sempy_labs/_translations.py index 5db33bc4..c501d39a 100644 --- a/src/sempy_labs/_translations.py +++ b/src/sempy_labs/_translations.py @@ -49,7 +49,7 @@ def translate_semantic_model( ) as tom: if exclude_characters is None: - for o in tom.model.Tables: + for o in tom._model.Tables: new_data = { "Object Type": "Table", "Name": o.Name, @@ -102,7 +102,7 @@ def translate_semantic_model( [dfPrep, pd.DataFrame(new_data, index=[0])], ignore_index=True ) else: - for o in tom.model.Tables: + for o in tom._model.Tables: oName = o.Name oDescription = o.Description for s in exclude_characters: @@ -211,7 +211,7 @@ def translate_semantic_model( tom.add_translation(language=lang) print(f"{icons.in_progress} Translating into the '{lang}' language...") - for t in tom.model.Tables: + for t in tom._model.Tables: if t.IsHidden == False: if clm == "Name": df_filt = df_panda[ diff --git a/src/sempy_labs/_vertipaq.py b/src/sempy_labs/_vertipaq.py index c73dded0..3d2d670a 100644 --- a/src/sempy_labs/_vertipaq.py +++ b/src/sempy_labs/_vertipaq.py @@ -14,7 +14,7 @@ from sempy_labs.lakehouse._lakehouse import lakehouse_attached from typing import List, Optional, Union from sempy._utils._log import log - +import sempy_labs._icons as icons @log def vertipaq_analyzer( @@ -103,7 +103,7 @@ def vertipaq_analyzer( if len(dfI_filt) == 0: print( - f"The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter." + f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter." ) else: lakehouseName = dfI_filt["Display Name"].iloc[0] @@ -438,7 +438,7 @@ def vertipaq_analyzer( lakeAttach = lakehouse_attached() if lakeAttach == False: print( - f"In order to save the Vertipaq Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." + f"{icons.red_dot} In order to save the Vertipaq Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." ) return @@ -472,7 +472,7 @@ def vertipaq_analyzer( "export_Model": ["Model", export_Model], } - print(f"Saving Vertipaq Analyzer to delta tables in the lakehouse...\n") + print(f"{icons.in_progress} Saving Vertipaq Analyzer to delta tables in the lakehouse...\n") now = datetime.datetime.now() for key, (obj, df) in dfMap.items(): df["Timestamp"] = now @@ -491,7 +491,7 @@ def vertipaq_analyzer( spark_df = spark.createDataFrame(df) spark_df.write.mode("append").format("delta").saveAsTable(delta_table_name) print( - f"\u2022 Vertipaq Analyzer results for '{obj}' have been appended to the '{delta_table_name}' delta table." + f"{icons.bullet} Vertipaq Analyzer results for '{obj}' have been appended to the '{delta_table_name}' delta table." ) ### Export vertipaq to zip file within the lakehouse @@ -532,7 +532,7 @@ def vertipaq_analyzer( if os.path.exists(filePath): os.remove(filePath) print( - f"The Vertipaq Analyzer info for the '{dataset}' semantic model in the '{workspace}' workspace has been saved to the 'Vertipaq Analyzer/{zipFileName}' in the default lakehouse attached to this notebook." + f"{icons.green_dot} The Vertipaq Analyzer info for the '{dataset}' semantic model in the '{workspace}' workspace has been saved to the 'Vertipaq Analyzer/{zipFileName}' in the default lakehouse attached to this notebook." ) diff --git a/src/sempy_labs/directlake/_directlake_schema_compare.py b/src/sempy_labs/directlake/_directlake_schema_compare.py index d34ef558..31d3cdac 100644 --- a/src/sempy_labs/directlake/_directlake_schema_compare.py +++ b/src/sempy_labs/directlake/_directlake_schema_compare.py @@ -10,7 +10,7 @@ from sempy_labs.lakehouse._get_lakehouse_columns import get_lakehouse_columns from sempy_labs._list_functions import list_tables from typing import Optional - +import sempy_labs._icons as icons def direct_lake_schema_compare( dataset: str, @@ -56,12 +56,12 @@ def direct_lake_schema_compare( if len(dfI_filt) == 0: print( - f"The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified." + f"{icons.red_dot} The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified." ) return if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()): - print(f"The '{dataset}' semantic model is not in Direct Lake mode.") + print(f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake mode.") return dfT = list_tables(dataset, workspace) @@ -92,19 +92,19 @@ def direct_lake_schema_compare( if len(missingtbls) == 0: print( - f"All tables exist in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." + f"{icons.green_dot} All tables exist in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." ) else: print( - f"The following tables exist in the '{dataset}' semantic model within the '{workspace}' workspace but do not exist in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." + f"{icons.yellow_dot} The following tables exist in the '{dataset}' semantic model within the '{workspace}' workspace but do not exist in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." ) display(missingtbls) if len(missingcols) == 0: print( - f"All columns exist in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." + f"{icons.green_dot} All columns exist in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." ) else: print( - f"The following columns exist in the '{dataset}' semantic model within the '{workspace}' workspace but do not exist in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." + f"{icons.yellow_dot} The following columns exist in the '{dataset}' semantic model within the '{workspace}' workspace but do not exist in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." ) display(missingcols) diff --git a/src/sempy_labs/directlake/_directlake_schema_sync.py b/src/sempy_labs/directlake/_directlake_schema_sync.py index e81961a6..788743d2 100644 --- a/src/sempy_labs/directlake/_directlake_schema_sync.py +++ b/src/sempy_labs/directlake/_directlake_schema_sync.py @@ -9,7 +9,7 @@ ) from typing import Optional from sempy._utils._log import log - +import sempy_labs._icons as icons @log def direct_lake_schema_sync( @@ -63,7 +63,7 @@ def direct_lake_schema_sync( if len(dfI_filt) == 0: print( - f"The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified." + f"{icons.red_dot} The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified." ) return @@ -113,16 +113,16 @@ def direct_lake_schema_sync( col.DataType = System.Enum.Parse(TOM.DataType, dt) except: print( - f"ERROR: '{dType}' data type is not mapped properly to the semantic model data types." + f"{icons.red_dot} '{dType}' data type is not mapped properly to the semantic model data types." ) return m.Tables[tName].Columns.Add(col) print( - f"The '{lakeCName}' column has been added to the '{tName}' table as a '{dt}' data type within the '{dataset}' semantic model within the '{workspace}' workspace." + f"{icons.green_dot} The '{lakeCName}' column has been added to the '{tName}' table as a '{dt}' data type within the '{dataset}' semantic model within the '{workspace}' workspace." ) else: print( - f"The {fullColName} column exists in the lakehouse but not in the '{tName}' table in the '{dataset}' semantic model within the '{workspace}' workspace." + f"{icons.yellow_dot} The {fullColName} column exists in the lakehouse but not in the '{tName}' table in the '{dataset}' semantic model within the '{workspace}' workspace." ) m.SaveChanges() diff --git a/src/sempy_labs/directlake/_fallback.py b/src/sempy_labs/directlake/_fallback.py index 38886b6a..022a416a 100644 --- a/src/sempy_labs/directlake/_fallback.py +++ b/src/sempy_labs/directlake/_fallback.py @@ -2,7 +2,7 @@ import sempy.fabric as fabric import numpy as np from typing import List, Optional, Union - +import sempy_labs._icons as icons def check_fallback_reason(dataset: str, workspace: Optional[str] = None): """ @@ -32,7 +32,7 @@ def check_fallback_reason(dataset: str, workspace: Optional[str] = None): if len(dfP_filt) == 0: print( - f"The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models." + f"{icons.yellow_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models." ) else: df = fabric.evaluate_dax( diff --git a/src/sempy_labs/directlake/_get_directlake_lakehouse.py b/src/sempy_labs/directlake/_get_directlake_lakehouse.py index 2ba51cb3..92d4ff7b 100644 --- a/src/sempy_labs/directlake/_get_directlake_lakehouse.py +++ b/src/sempy_labs/directlake/_get_directlake_lakehouse.py @@ -8,7 +8,7 @@ from typing import Optional, Tuple from uuid import UUID from sempy_labs._helper_functions import resolve_workspace_name_and_id - +import sempy_labs._icons as icons def get_direct_lake_lakehouse( dataset: str, @@ -55,7 +55,7 @@ def get_direct_lake_lakehouse( if len(dfP_filt) == 0: raise ValueError( - f"ERROR: The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode." + f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode." ) sqlEndpointId = get_direct_lake_sql_endpoint(dataset, workspace) diff --git a/src/sempy_labs/directlake/_get_shared_expression.py b/src/sempy_labs/directlake/_get_shared_expression.py index fd1119bf..0f836ffb 100644 --- a/src/sempy_labs/directlake/_get_shared_expression.py +++ b/src/sempy_labs/directlake/_get_shared_expression.py @@ -6,7 +6,7 @@ ) from sempy_labs._list_functions import list_lakehouses from typing import Optional - +import sempy_labs._icons as icons def get_shared_expression( lakehouse: Optional[str] = None, workspace: Optional[str] = None @@ -44,7 +44,7 @@ def get_shared_expression( if provStatus == "InProgress": print( - f"The SQL Endpoint for the '{lakehouse}' lakehouse within the '{workspace}' workspace has not yet been provisioned. Please wait until it has been provisioned." + f"{icons.red_dot} The SQL Endpoint for the '{lakehouse}' lakehouse within the '{workspace}' workspace has not yet been provisioned. Please wait until it has been provisioned." ) return diff --git a/src/sempy_labs/directlake/_guardrails.py b/src/sempy_labs/directlake/_guardrails.py index 1849289b..32937b57 100644 --- a/src/sempy_labs/directlake/_guardrails.py +++ b/src/sempy_labs/directlake/_guardrails.py @@ -6,8 +6,8 @@ def get_direct_lake_guardrails(): """ - Shows the guardrails for when Direct Lake semantic models will fallback to Direct Query based on Microsoft's online documentation. - + Shows the guardrails for when Direct Lake semantic models will fallback to Direct Query based on Microsoft's `online documentation `_. + Parameters ---------- @@ -65,7 +65,7 @@ def get_sku_size(workspace: Optional[str] = None): def get_directlake_guardrails_for_sku(sku_size: str): """ Shows the guardrails for Direct Lake based on the SKU used by your workspace's capacity. - *Use the result of the 'get_sku_size' function as an input for this function's skuSize parameter.* + * Use the result of the 'get_sku_size' function as an input for this function's sku_size parameter.* Parameters ---------- diff --git a/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py b/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py index 77a0463e..392e3aca 100644 --- a/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py +++ b/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py @@ -4,12 +4,12 @@ from sempy_labs._list_functions import list_tables, list_annotations from typing import Optional from sempy._utils._log import log - +import sempy_labs._icons as icons @log def list_direct_lake_model_calc_tables(dataset: str, workspace: Optional[str] = None): """ - Shows the calculated tables and their respective DAX expression for a Direct Lake model (which has been migrated from import/DirectQuery. + Shows the calculated tables and their respective DAX expression for a Direct Lake model (which has been migrated from import/DirectQuery). Parameters ---------- @@ -36,7 +36,7 @@ def list_direct_lake_model_calc_tables(dataset: str, workspace: Optional[str] = dfP_filt = dfP[dfP["Mode"] == "DirectLake"] if len(dfP_filt) == 0: - print(f"The '{dataset}' semantic model is not in Direct Lake mode.") + print(f"{icons.yellow_dot} The '{dataset}' semantic model is not in Direct Lake mode.") else: dfA = list_annotations(dataset, workspace) dfT = list_tables(dataset, workspace) diff --git a/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py b/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py index 98391029..2140e183 100644 --- a/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +++ b/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py @@ -7,7 +7,7 @@ ) from sempy_labs._tom import connect_semantic_model from typing import List, Optional, Union - +import sempy_labs._icons as icons def update_direct_lake_model_lakehouse_connection( dataset: str, @@ -54,7 +54,7 @@ def update_direct_lake_model_lakehouse_connection( if len(dfI_filt) == 0: print( - f"The '{lakehouse}' lakehouse does not exist within the '{lakehouse_workspace}' workspace. Therefore it cannot be used to support the '{dataset}' semantic model within the '{workspace}' workspace." + f"{icons.red_dot} The '{lakehouse}' lakehouse does not exist within the '{lakehouse_workspace}' workspace. Therefore it cannot be used to support the '{dataset}' semantic model within the '{workspace}' workspace." ) dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) @@ -62,7 +62,7 @@ def update_direct_lake_model_lakehouse_connection( if len(dfP_filt) == 0: print( - f"The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models." + f"{icons.yellow_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models." ) else: with connect_semantic_model( @@ -71,11 +71,11 @@ def update_direct_lake_model_lakehouse_connection( shEx = get_shared_expression(lakehouse, lakehouse_workspace) try: - tom.model.Expressions["DatabaseQuery"].Expression = shEx + tom._model.Expressions["DatabaseQuery"].Expression = shEx print( - f"The expression in the '{dataset}' semantic model has been updated to point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace." + f"{icons.green_dot} The expression in the '{dataset}' semantic model has been updated to point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace." ) except: print( - f"ERROR: The expression in the '{dataset}' semantic model was not updated." + f"{icons.red_dot} The expression in the '{dataset}' semantic model was not updated." ) diff --git a/src/sempy_labs/directlake/_update_directlake_partition_entity.py b/src/sempy_labs/directlake/_update_directlake_partition_entity.py index d710b146..471d3c73 100644 --- a/src/sempy_labs/directlake/_update_directlake_partition_entity.py +++ b/src/sempy_labs/directlake/_update_directlake_partition_entity.py @@ -1,13 +1,16 @@ import sempy.fabric as fabric from sempy_labs._tom import connect_semantic_model +from sempy_labs._helper_functions import resolve_lakehouse_name from typing import List, Optional, Union - +import sempy_labs._icons as icons def update_direct_lake_partition_entity( dataset: str, table_name: Union[str, List[str]], entity_name: Union[str, List[str]], workspace: Optional[str] = None, + lakehouse: Optional[str] = None, + lakehouse_workspace: Optional[str] = None ): """ Remaps a table (or tables) in a Direct Lake semantic model to a table in a lakehouse. @@ -24,10 +27,24 @@ def update_direct_lake_partition_entity( The Fabric workspace name in which the semantic model exists. Defaults to None which resolves to the workspace of the attached lakehouse or if no lakehouse attached, resolves to the workspace of the notebook. + lakehouse : str, default=None + The Fabric lakehouse used by the Direct Lake semantic model. + Defaults to None which resolves to the lakehouse attached to the notebook. + lakehouse_workspace : str, default=None + The Fabric workspace used by the lakehouse. + Defaults to None which resolves to the workspace of the attached lakehouse + or if no lakehouse attached, resolves to the workspace of the notebook. """ workspace = fabric.resolve_workspace_name(workspace) + if lakehouse_workspace == None: + lakehouse_workspace = workspace + + if lakehouse == None: + lakehouse_id = fabric.get_lakehouse_id() + lakehouse = resolve_lakehouse_name(lakehouse_id, lakehouse_workspace) + # Support both str & list types if isinstance(table_name, str): table_name = [table_name] @@ -36,7 +53,7 @@ def update_direct_lake_partition_entity( if len(table_name) != len(entity_name): print( - f"ERROR: The 'table_name' and 'entity_name' arrays must be of equal length." + f"{icons.red_dot} The 'table_name' and 'entity_name' arrays must be of equal length." ) return @@ -46,7 +63,7 @@ def update_direct_lake_partition_entity( if not tom.is_direct_lake(): print( - f"The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode." + f"{icons.yellow_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode." ) return @@ -54,11 +71,11 @@ def update_direct_lake_partition_entity( i = table_name.index(tName) eName = entity_name[i] try: - tom.model.Tables[tName].Partitions[0].EntityName = eName + tom._model.Tables[tName].Partitions[0].EntityName = eName print( - f"The '{tName}' table in the '{dataset}' semantic model has been updated to point to the '{eName}' table in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." + f"{icons.green_dot} The '{tName}' table in the '{dataset}' semantic model has been updated to point to the '{eName}' table in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." ) except: print( - f"ERROR: The '{tName}' table in the '{dataset}' semantic model has not been updated." + f"{icons.red_dot} The '{tName}' table in the '{dataset}' semantic model has not been updated." ) diff --git a/src/sempy_labs/directlake/_warm_cache.py b/src/sempy_labs/directlake/_warm_cache.py index d995b437..2f8b92f7 100644 --- a/src/sempy_labs/directlake/_warm_cache.py +++ b/src/sempy_labs/directlake/_warm_cache.py @@ -169,7 +169,7 @@ def warm_direct_lake_cache_isresident( dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()): print( - f"The '{dataset}' semantic model in the '{workspace}' workspace is not in Direct Lake mode. This function is specifically for semantic models in Direct Lake mode." + f"{icons.red_dot} The '{dataset}' semantic model in the '{workspace}' workspace is not in Direct Lake mode. This function is specifically for semantic models in Direct Lake mode." ) return diff --git a/src/sempy_labs/lakehouse/_get_lakehouse_tables.py b/src/sempy_labs/lakehouse/_get_lakehouse_tables.py index f80b3e18..319191fb 100644 --- a/src/sempy_labs/lakehouse/_get_lakehouse_tables.py +++ b/src/sempy_labs/lakehouse/_get_lakehouse_tables.py @@ -14,7 +14,7 @@ ) from sempy_labs.lakehouse._lakehouse import lakehouse_attached from typing import Optional - +import sempy_labs._icons as icons def get_lakehouse_tables( lakehouse: Optional[str] = None, @@ -172,7 +172,7 @@ def get_lakehouse_tables( lakeAttach = lakehouse_attached() if lakeAttach == False: print( - f"In order to save the report.json file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." + f"{icons.red_dot} In order to save the report.json file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." ) return spark = SparkSession.builder.getOrCreate() @@ -234,7 +234,7 @@ def get_lakehouse_tables( export_df[c] = export_df[c].astype(bool) print( - f"Saving Lakehouse table properties to the '{lakeTName}' table in the lakehouse...\n" + f"{icons.in_progress} Saving Lakehouse table properties to the '{lakeTName}' table in the lakehouse...\n" ) now = datetime.datetime.now() export_df["Timestamp"] = now @@ -244,7 +244,7 @@ def get_lakehouse_tables( spark_df = spark.createDataFrame(export_df) spark_df.write.mode("append").format("delta").saveAsTable(lakeTName) print( - f"\u2022 Lakehouse table properties have been saved to the '{lakeTName}' delta table." + f"{icons.bullet} Lakehouse table properties have been saved to the '{lakeTName}' delta table." ) return df diff --git a/src/sempy_labs/lakehouse/_lakehouse.py b/src/sempy_labs/lakehouse/_lakehouse.py index 50e55c40..a8164e5e 100644 --- a/src/sempy_labs/lakehouse/_lakehouse.py +++ b/src/sempy_labs/lakehouse/_lakehouse.py @@ -4,7 +4,7 @@ from pyspark.sql import SparkSession from sempy_labs._helper_functions import resolve_lakehouse_name from typing import List, Optional, Union - +import sempy_labs._icons as icons def lakehouse_attached() -> bool: """ @@ -80,6 +80,6 @@ def optimize_lakehouse_tables( deltaTable = DeltaTable.forPath(spark, tablePath) deltaTable.optimize().executeCompaction() print( - f"The '{tableName}' table has been optimized. ({str(i)}/{str(tableCount)})" + f"{icons.green_dot} The '{tableName}' table has been optimized. ({str(i)}/{str(tableCount)})" ) i += 1 diff --git a/src/sempy_labs/migration/_migrate_calctables_to_lakehouse.py b/src/sempy_labs/migration/_migrate_calctables_to_lakehouse.py index 854b42a2..a78dd80b 100644 --- a/src/sempy_labs/migration/_migrate_calctables_to_lakehouse.py +++ b/src/sempy_labs/migration/_migrate_calctables_to_lakehouse.py @@ -110,7 +110,7 @@ def migrate_calc_tables_to_lakehouse( dataset=dataset, workspace=workspace, readonly=True ) as tom: success = True - for t in tom.model.Tables: + for t in tom._model.Tables: if tom.is_auto_date_table(table_name=t.Name): print( f"{icons.yellow_dot} The '{t.Name}' table is an auto-datetime table and is not supported in the Direct Lake migration process. Please create a proper Date/Calendar table in your lakehoues and use it in your Direct Lake model." @@ -175,7 +175,7 @@ def migrate_calc_tables_to_lakehouse( try: dataType = next( str(c.DataType) - for c in tom.model.Tables[ + for c in tom._model.Tables[ t.Name ].Columns if str(c.Type) @@ -185,7 +185,7 @@ def migrate_calc_tables_to_lakehouse( except: dataType = next( str(c.DataType) - for c in tom.model.Tables[ + for c in tom._model.Tables[ t.Name ].Columns if str(c.Type) == "Calculated" @@ -236,7 +236,7 @@ def migrate_calc_tables_to_lakehouse( ) as tom2: success2 = True tom2.set_annotation( - object=tom2.model, + object=tom2._model, name=t.Name, value=daxQuery, ) @@ -369,7 +369,7 @@ def migrate_field_parameters( tbl.Columns.Add(col) - tom.model.Tables.Add(tbl) + tom._model.Tables.Add(tbl) ep = TOM.JsonExtendedProperty() ep.Name = "ParameterMetadata" @@ -377,25 +377,25 @@ def migrate_field_parameters( rcd = TOM.RelatedColumnDetails() gpc = TOM.GroupByColumn() - gpc.GroupingColumn = tom.model.Tables[tName].Columns["Value2"] + gpc.GroupingColumn = tom._model.Tables[tName].Columns["Value2"] rcd.GroupByColumns.Add(gpc) # Update column properties - tom.model.Tables[tName].Columns["Value2"].IsHidden = True - tom.model.Tables[tName].Columns["Value3"].IsHidden = True - tom.model.Tables[tName].Columns[ + tom._model.Tables[tName].Columns["Value2"].IsHidden = True + tom._model.Tables[tName].Columns["Value3"].IsHidden = True + tom._model.Tables[tName].Columns[ "Value3" ].DataType = TOM.DataType.Int64 - tom.model.Tables[tName].Columns["Value1"].SortByColumn = ( - tom.model.Tables[tName].Columns["Value3"] + tom._model.Tables[tName].Columns["Value1"].SortByColumn = ( + tom._model.Tables[tName].Columns["Value3"] ) - tom.model.Tables[tName].Columns["Value2"].SortByColumn = ( - tom.model.Tables[tName].Columns["Value3"] + tom._model.Tables[tName].Columns["Value2"].SortByColumn = ( + tom._model.Tables[tName].Columns["Value3"] ) - tom.model.Tables[tName].Columns[ + tom._model.Tables[tName].Columns[ "Value2" ].ExtendedProperties.Add(ep) - tom.model.Tables[tName].Columns[ + tom._model.Tables[tName].Columns[ "Value1" ].RelatedColumnDetails = rcd @@ -412,9 +412,9 @@ def migrate_field_parameters( ] col3 = dfC_filt3["Column Name"].iloc[0] - tom.model.Tables[tName].Columns["Value1"].Name = col1 - tom.model.Tables[tName].Columns["Value2"].Name = col2 - tom.model.Tables[tName].Columns["Value3"].Name = col3 + tom._model.Tables[tName].Columns["Value1"].Name = col1 + tom._model.Tables[tName].Columns["Value2"].Name = col2 + tom._model.Tables[tName].Columns["Value3"].Name = col3 print( f"{icons.green_dot} The '{tName}' table has been added as a field parameter to the '{new_dataset}' semantic model in the '{new_dataset_workspace}' workspace." diff --git a/src/sempy_labs/migration/_migrate_calctables_to_semantic_model.py b/src/sempy_labs/migration/_migrate_calctables_to_semantic_model.py index 1eb4cc68..d3cf288c 100644 --- a/src/sempy_labs/migration/_migrate_calctables_to_semantic_model.py +++ b/src/sempy_labs/migration/_migrate_calctables_to_semantic_model.py @@ -89,7 +89,7 @@ def migrate_calc_tables_to_semantic_model( if tName.lower() in lc["Table Name"].values: try: - tom.model.Tables[tName] + tom._model.Tables[tName] except: tom.add_table(name=tName) tom.add_entity_partition( @@ -128,7 +128,7 @@ def migrate_calc_tables_to_semantic_model( matches = re.findall(pattern, scName) lakeColumn = matches[0].replace(" ", "") try: - tom.model.Tables[tName].Columns[cName] + tom._model.Tables[tName].Columns[cName] except: tom.add_data_column( table_name=tName, diff --git a/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py b/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py index a24cf3c0..645a3ceb 100644 --- a/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +++ b/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py @@ -80,12 +80,12 @@ def migrate_model_objects_to_semantic_model( isDirectLake = any( str(p.Mode) == "DirectLake" - for t in tom.model.Tables + for t in tom._model.Tables for p in t.Partitions ) print(f"\n{icons.in_progress} Updating table properties...") - for t in tom.model.Tables: + for t in tom._model.Tables: t.IsHidden = bool(dfT.loc[dfT["Name"] == t.Name, "Hidden"].iloc[0]) t.Description = dfT.loc[dfT["Name"] == t.Name, "Description"].iloc[ 0 @@ -99,7 +99,7 @@ def migrate_model_objects_to_semantic_model( ) print(f"\n{icons.in_progress} Updating column properties...") - for t in tom.model.Tables: + for t in tom._model.Tables: if ( t.Name not in dfP_fp["Table Name"].values ): # do not include field parameters @@ -131,7 +131,7 @@ def migrate_model_objects_to_semantic_model( if sbc != None: try: - c.SortByColumn = tom.model.Tables[ + c.SortByColumn = tom._model.Tables[ t.Name ].Columns[sbc] except: @@ -165,7 +165,7 @@ def migrate_model_objects_to_semantic_model( lvls = r["Level Name"] try: - tom.model.Tables[tName].Hierarchies[hName] + tom._model.Tables[tName].Hierarchies[hName] except: tom.add_hierarchy( table_name=tName, @@ -190,7 +190,7 @@ def migrate_model_objects_to_semantic_model( mFS = r["Format String"] try: - tom.model.Tables[tName].Measures[mName] + tom._model.Tables[tName].Measures[mName] except: tom.add_measure( table_name=tName, @@ -222,7 +222,7 @@ def migrate_model_objects_to_semantic_model( ].iloc[0] try: - tom.model.Tables[cgName] + tom._model.Tables[cgName] except: tom.add_calculation_group( name=cgName, @@ -233,7 +233,7 @@ def migrate_model_objects_to_semantic_model( print( f"{icons.green_dot} The '{cgName}' calculation group has been added." ) - tom.model.DiscourageImplicitMeasures = True + tom._model.DiscourageImplicitMeasures = True print( f"\n{icons.in_progress} Updating calculation group column name..." @@ -242,7 +242,7 @@ def migrate_model_objects_to_semantic_model( (dfC["Table Name"] == cgName) & (dfC["Hidden"] == False) ] colName = dfC_filt["Column Name"].iloc[0] - tom.model.Tables[cgName].Columns["Name"].Name = colName + tom._model.Tables[cgName].Columns["Name"].Name = colName calcItems = dfCI.loc[ dfCI["Calculation Group Name"] == cgName, @@ -269,7 +269,7 @@ def migrate_model_objects_to_semantic_model( "Format String Expression", ].iloc[0] try: - tom.model.Tables[cgName].CalculationGroup.CalculationItems[ + tom._model.Tables[cgName].CalculationGroup.CalculationItems[ calcItem ] except: @@ -310,7 +310,7 @@ def migrate_model_objects_to_semantic_model( and r.FromColumn.Name == fromColumn and r.ToTable.Name == toTable and r.ToColumn.Name == toColumn - for r in tom.model.Relationships + for r in tom._model.Relationships ): print( f"{icons.yellow_dot} {relName} already exists as a relationship in the semantic model." @@ -324,7 +324,7 @@ def migrate_model_objects_to_semantic_model( r.FromColumn.DataType == "DateTime" or r.ToColumn.DataType == "DateTime" ) - for r in tom.model.Relationships + for r in tom._model.Relationships ): print( f"{icons.yellow_dot} {relName} was not created since relationships based on DateTime columns are not supported." @@ -335,7 +335,7 @@ def migrate_model_objects_to_semantic_model( and r.ToTable.Name == toTable and r.ToColumn.Name == toColumn and (r.FromColumn.DataType != r.ToColumn.DataType) - for r in tom.model.Relationships + for r in tom._model.Relationships ): print( f"{icons.yellow_dot} {relName} was not created since columns used in a relationship must have the same data type." @@ -370,7 +370,7 @@ def migrate_model_objects_to_semantic_model( modPerm = row["Model Permission"] try: - tom.model.Roles[roleName] + tom._model.Roles[roleName] except: tom.add_role( role_name=roleName, @@ -403,7 +403,7 @@ def migrate_model_objects_to_semantic_model( for pName in dfP["Perspective Name"].unique(): try: - tom.model.Perspectives[pName] + tom._model.Perspectives[pName] except: tom.add_perspective(perspective_name=pName) print( @@ -421,21 +421,21 @@ def migrate_model_objects_to_semantic_model( try: if oType == "Table": tom.add_to_perspective( - object=tom.model.Tables[tName], perspective_name=pName + object=tom._model.Tables[tName], perspective_name=pName ) elif oType == "Column": tom.add_to_perspective( - object=tom.model.Tables[tName].Columns[oName], + object=tom._model.Tables[tName].Columns[oName], perspective_name=pName, ) elif oType == "Measure": tom.add_to_perspective( - object=tom.model.Tables[tName].Measures[oName], + object=tom._model.Tables[tName].Measures[oName], perspective_name=pName, ) elif oType == "Hierarchy": tom.add_to_perspective( - object=tom.model.Tables[tName].Hierarchies[oName], + object=tom._model.Tables[tName].Hierarchies[oName], perspective_name=pName, ) except: @@ -444,7 +444,7 @@ def migrate_model_objects_to_semantic_model( print(f"\n{icons.in_progress} Creating translation languages...") for trName in dfTranslation["Culture Name"].unique(): try: - tom.model.Cultures[trName] + tom._model.Cultures[trName] except: tom.add_translation(trName) print( @@ -468,28 +468,28 @@ def migrate_model_objects_to_semantic_model( try: if oType == "Table": tom.set_translation( - object=tom.model.Tables[tName], + object=tom._model.Tables[tName], language=trName, property=prop, value=translation, ) elif oType == "Column": tom.set_translation( - object=tom.model.Tables[tName].Columns[oName], + object=tom._model.Tables[tName].Columns[oName], language=trName, property=prop, value=translation, ) elif oType == "Measure": tom.set_translation( - object=tom.model.Tables[tName].Measures[oName], + object=tom._model.Tables[tName].Measures[oName], language=trName, property=prop, value=translation, ) elif oType == "Hierarchy": tom.set_translation( - object=tom.model.Tables[tName].Hierarchies[oName], + object=tom._model.Tables[tName].Hierarchies[oName], language=trName, property=prop, value=translation, @@ -504,7 +504,7 @@ def migrate_model_objects_to_semantic_model( matches = re.findall(pattern, oName) hName = matches[0] tom.set_translation( - object=tom.model.Tables[tName] + object=tom._model.Tables[tName] .Hierarchies[hName] .Levels[lName], language=trName, diff --git a/src/sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py b/src/sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py index 5a9721d2..a7ce514e 100644 --- a/src/sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +++ b/src/sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py @@ -96,7 +96,7 @@ def migrate_tables_columns_to_semantic_model( ) as tom: success = True try: - tom.model.Expressions["DatabaseQuery"] + tom._model.Expressions["DatabaseQuery"] except: tom.add_expression("DatabaseQuery", expression=shEx) print( @@ -110,7 +110,7 @@ def migrate_tables_columns_to_semantic_model( tDesc = r["Description"] try: - tom.model.Tables[tName] + tom._model.Tables[tName] except: tom.add_table( name=tName, @@ -133,7 +133,7 @@ def migrate_tables_columns_to_semantic_model( cDataType = r["Data Type"] try: - tom.model.Tables[tName].Columns[cName] + tom._model.Tables[tName].Columns[cName] except: tom.add_data_column( table_name=tName, diff --git a/src/sempy_labs/migration/_migration_validation.py b/src/sempy_labs/migration/_migration_validation.py index 42935d6d..e2eb2abd 100644 --- a/src/sempy_labs/migration/_migration_validation.py +++ b/src/sempy_labs/migration/_migration_validation.py @@ -31,7 +31,7 @@ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None): with connect_semantic_model( dataset=dataset, workspace=workspace, readonly=True ) as tom: - for t in tom.model.Tables: + for t in tom._model.Tables: if t.CalculationGroup is not None: new_data = { "Parent Name": t.Parent.Name, @@ -124,7 +124,7 @@ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None): df = pd.concat( [df, pd.DataFrame(new_data, index=[0])], ignore_index=True ) - for r in tom.model.Relationships: + for r in tom._model.Relationships: rName = create_relationship_name( r.FromTable.Name, r.FromColumn.Name, r.ToTable.Name, r.ToColumn.Name ) @@ -134,7 +134,7 @@ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None): "Object Type": str(r.ObjectType), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - for role in tom.model.Roles: + for role in tom._model.Roles: new_data = { "Parent Name": role.Parent.Name, "Object Name": role.Name, @@ -150,14 +150,14 @@ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None): df = pd.concat( [df, pd.DataFrame(new_data, index=[0])], ignore_index=True ) - for tr in tom.model.Cultures: + for tr in tom._model.Cultures: new_data = { "Parent Name": tr.Parent.Name, "Object Name": tr.Name, "Object Type": str(tr.ObjectType), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - for per in tom.model.Perspectives: + for per in tom._model.Perspectives: new_data = { "Parent Name": per.Parent.Name, "Object Name": per.Name, diff --git a/src/sempy_labs/migration/_refresh_calc_tables.py b/src/sempy_labs/migration/_refresh_calc_tables.py index 467b223a..99a714e1 100644 --- a/src/sempy_labs/migration/_refresh_calc_tables.py +++ b/src/sempy_labs/migration/_refresh_calc_tables.py @@ -36,8 +36,8 @@ def refresh_calc_tables(dataset: str, workspace: Optional[str] = None): dataset=dataset, readonly=True, workspace=workspace ) as tom: success = True - for a in tom.model.Annotations: - if any(a.Name == t.Name for t in tom.model.Tables): + for a in tom._model.Annotations: + if any(a.Name == t.Name for t in tom._model.Tables): tName = a.Name query = a.Value diff --git a/src/sempy_labs/report/_generate_report.py b/src/sempy_labs/report/_generate_report.py index a9b560bf..a362e6f4 100644 --- a/src/sempy_labs/report/_generate_report.py +++ b/src/sempy_labs/report/_generate_report.py @@ -4,7 +4,7 @@ import json, base64, time from typing import Optional from sempy_labs._helper_functions import resolve_workspace_name_and_id - +import sempy_labs._icons as icons def create_report_from_reportjson( report: str, @@ -41,7 +41,7 @@ def create_report_from_reportjson( if len(dfI_model) == 0: print( - f"ERROR: The '{dataset}' semantic model does not exist in the '{workspace}' workspace." + f"{icons.red_dot} The '{dataset}' semantic model does not exist in the '{workspace}' workspace." ) return @@ -52,7 +52,7 @@ def create_report_from_reportjson( if len(dfI_rpt) > 0: print( - f"WARNING: '{report}' already exists as a report in the '{workspace}' workspace." + f"{icons.yellow_dot} '{report}' already exists as a report in the '{workspace}' workspace." ) return @@ -132,7 +132,7 @@ def conv_b64(file): response = client.post(f"/v1/workspaces/{workspace_id}/items", json=request_body) if response.status_code == 201: - print("Report creation succeeded") + print(f"{icons.green_dot} Report creation succeeded") print(response.json()) elif response.status_code == 202: operationId = response.headers["x-ms-operation-id"] @@ -143,7 +143,7 @@ def conv_b64(file): response = client.get(f"/v1/operations/{operationId}") response_body = json.loads(response.content) response = client.get(f"/v1/operations/{operationId}/result") - print("Report creation succeeded") + print(f"{icons.green_dot} Report creation succeeded") print(response.json()) @@ -167,13 +167,11 @@ def update_report_from_reportjson( (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) - objectType = "Report" - dfR = fabric.list_reports(workspace=workspace) dfR_filt = dfR[(dfR["Name"] == report) & (dfR["Report Type"] == "PowerBIReport")] if len(dfR_filt) == 0: - print(f"The '{report}' report does not exist in the '{workspace}' workspace.") + print(f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace.") return reportId = dfR_filt["Id"].iloc[0] @@ -215,7 +213,7 @@ def conv_b64(file): request_body = { "displayName": report, - "type": objectType, + "type": 'Report', "definition": { "parts": [ { @@ -238,7 +236,7 @@ def conv_b64(file): ) if response.status_code == 201: - print(f"The '{report}' report has been successfully updated.") + print(f"{icons.green_dot} The '{report}' report has been successfully updated.") # print(response.json()) elif response.status_code == 202: operationId = response.headers["x-ms-operation-id"] @@ -249,5 +247,5 @@ def conv_b64(file): response = client.get(f"/v1/operations/{operationId}") response_body = json.loads(response.content) response = client.get(f"/v1/operations/{operationId}/result") - print(f"The '{report}' report has been successfully updated.") + print(f"{icons.green_dot} The '{report}' report has been successfully updated.") # print(response.json())