Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fixed list_dataflows #364

Merged
merged 6 commits into from
Dec 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 37 additions & 35 deletions src/sempy_labs/_clear_cache.py
Original file line number Diff line number Diff line change
@@ -1,39 +1,41 @@
import sempy.fabric as fabric
from sempy_labs._helper_functions import (
resolve_dataset_id,
is_default_semantic_model,
_get_adls_client,
resolve_workspace_name_and_id,
resolve_dataset_name_and_id,
)
from typing import Optional
import sempy_labs._icons as icons
from sempy._utils._log import log
import pandas as pd
from sempy.fabric.exceptions import FabricHTTPException
from uuid import UUID


def clear_cache(dataset: str, workspace: Optional[str] = None):
def clear_cache(dataset: str | UUID, workspace: Optional[str | UUID] = None):
"""
Clears the cache of a semantic model.
See `here <https://learn.microsoft.com/analysis-services/instances/clear-the-analysis-services-caches?view=asallproducts-allversions>`_ for documentation.

Parameters
----------
dataset : str
Name of the semantic model.
workspace : str, default=None
The Fabric workspace name.
dataset : str | UUID
Name or ID of the semantic model.
workspace : str | UUID, default=None
The Fabric workspace name or ID.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.
"""

workspace = fabric.resolve_workspace_name(workspace)
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
if is_default_semantic_model(dataset=dataset, workspace=workspace):
raise ValueError(
f"{icons.red_dot} Cannot run XMLA operations against a default semantic model. Please choose a different semantic model. "
"See here for more information: https://learn.microsoft.com/fabric/data-warehouse/semantic-models"
)

dataset_id = resolve_dataset_id(dataset=dataset, workspace=workspace)
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)

xmla = f"""
<ClearCache xmlns="http://schemas.microsoft.com/analysisservices/2003/engine">
Expand All @@ -42,27 +44,27 @@ def clear_cache(dataset: str, workspace: Optional[str] = None):
</Object>
</ClearCache>
"""
fabric.execute_xmla(dataset=dataset, xmla_command=xmla, workspace=workspace)
fabric.execute_xmla(dataset=dataset_id, xmla_command=xmla, workspace=workspace_id)
print(
f"{icons.green_dot} Cache cleared for the '{dataset}' semantic model within the '{workspace}' workspace."
f"{icons.green_dot} Cache cleared for the '{dataset_name}' semantic model within the '{workspace_name}' workspace."
)


@log
def backup_semantic_model(
dataset: str,
dataset: str | UUID,
file_path: str,
allow_overwrite: bool = True,
apply_compression: bool = True,
workspace: Optional[str] = None,
workspace: Optional[str | UUID] = None,
):
"""
`Backs up <https://learn.microsoft.com/azure/analysis-services/analysis-services-backup>`_ a semantic model to the ADLS Gen2 storage account connected to the workspace.

Parameters
----------
dataset : str
Name of the semantic model.
dataset : str | UUID
Name or ID of the semantic model.
file_path : str
The ADLS Gen2 storage account location in which to backup the semantic model. Always saves within the 'power-bi-backup/<workspace name>' folder.
Must end in '.abf'.
Expand All @@ -72,8 +74,8 @@ def backup_semantic_model(
If True, overwrites backup files of the same name. If False, the file you are saving cannot have the same name as a file that already exists in the same location.
apply_compression : bool, default=True
If True, compresses the backup file. Compressed backup files save disk space, but require slightly higher CPU utilization.
workspace : str, default=None
The Fabric workspace name.
workspace : str | UUID, default=None
The Fabric workspace name or ID.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.
"""
Expand All @@ -83,40 +85,41 @@ def backup_semantic_model(
f"{icons.red_dot} The backup file for restoring must be in the .abf format."
)

workspace = fabric.resolve_workspace_name(workspace)
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)

tmsl = {
"backup": {
"database": dataset,
"database": dataset_name,
"file": file_path,
"allowOverwrite": allow_overwrite,
"applyCompression": apply_compression,
}
}

fabric.execute_tmsl(script=tmsl, workspace=workspace)
fabric.execute_tmsl(script=tmsl, workspace=workspace_id)
print(
f"{icons.green_dot} The '{dataset}' semantic model within the '{workspace}' workspace has been backed up to the '{file_path}' location."
f"{icons.green_dot} The '{dataset_name}' semantic model within the '{workspace_name}' workspace has been backed up to the '{file_path}' location."
)


@log
def restore_semantic_model(
dataset: str,
dataset: str | UUID,
file_path: str,
allow_overwrite: bool = True,
ignore_incompatibilities: bool = True,
force_restore: bool = False,
workspace: Optional[str] = None,
workspace: Optional[str | UUID] = None,
):
"""
`Restores <https://learn.microsoft.com/power-bi/enterprise/service-premium-backup-restore-dataset>`_ a semantic model based on a backup (.abf) file
within the ADLS Gen2 storage account connected to the workspace.

Parameters
----------
dataset : str
Name of the semantic model.
dataset : str | UUID
Name or ID of the semantic model.
file_path : str
The location in which to backup the semantic model. Must end in '.abf'.
Example 1: file_path = 'MyModel.abf'
Expand All @@ -127,23 +130,23 @@ def restore_semantic_model(
If True, ignores incompatibilities between Azure Analysis Services and Power BI Premium.
force_restore: bool, default=False
If True, restores the semantic model with the existing semantic model unloaded and offline.
workspace : str, default=None
The Fabric workspace name.
workspace : str | UUID, default=None
The Fabric workspace name or ID.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.
"""
# https://learn.microsoft.com/en-us/power-bi/enterprise/service-premium-backup-restore-dataset

if not file_path.endswith(".abf"):
raise ValueError(
f"{icons.red_dot} The backup file for restoring must be in the .abf format."
)

workspace = fabric.resolve_workspace_name(workspace)
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)

tmsl = {
"restore": {
"database": dataset,
"database": dataset_name,
"file": file_path,
"allowOverwrite": allow_overwrite,
"security": "copyAll",
Expand All @@ -154,10 +157,10 @@ def restore_semantic_model(
if force_restore:
tmsl["restore"]["forceRestore"] = force_restore

fabric.execute_tmsl(script=tmsl, workspace=workspace)
fabric.execute_tmsl(script=tmsl, workspace=workspace_id)

print(
f"{icons.green_dot} The '{dataset}' semantic model has been restored to the '{workspace}' workspace based on teh '{file_path}' backup file."
f"{icons.green_dot} The '{dataset_name}' semantic model has been restored to the '{workspace_name}' workspace based on the '{file_path}' backup file."
)


Expand Down Expand Up @@ -243,7 +246,7 @@ def copy_semantic_model_backup_file(


@log
def list_backups(workspace: Optional[str] = None) -> pd.DataFrame:
def list_backups(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
"""
Shows a list of backup files contained within a workspace's ADLS Gen2 storage account.
Requirement: An ADLS Gen2 storage account must be `connected to the workspace <https://learn.microsoft.com/power-bi/transform-model/dataflows/dataflows-azure-data-lake-storage-integration#connect-to-an-azure-data-lake-gen-2-at-a-workspace-level>`_.
Expand All @@ -262,8 +265,7 @@ def list_backups(workspace: Optional[str] = None) -> pd.DataFrame:
"""

client = fabric.PowerBIRestClient()
workspace = fabric.resolve_workspace_name(workspace)
workspace_id = fabric.resolve_workspace_id(workspace)
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
response = client.get(
f"/v1.0/myorg/resources?resourceType=StorageAccount&folderObjectId={workspace_id}"
)
Expand All @@ -274,7 +276,7 @@ def list_backups(workspace: Optional[str] = None) -> pd.DataFrame:
v = response.json().get("value", [])
if not v:
raise ValueError(
f"{icons.red_dot} A storage account is not associated with the '{workspace}' workspace."
f"{icons.red_dot} A storage account is not associated with the '{workspace_name}' workspace."
)
storage_account = v[0]["resourceName"]

Expand Down
12 changes: 6 additions & 6 deletions src/sempy_labs/_connections.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from sempy_labs._helper_functions import (
pagination,
_is_valid_uuid,
resolve_workspace_name_and_id,
)
from uuid import UUID
import sempy_labs._icons as icons
Expand Down Expand Up @@ -205,7 +206,7 @@ def list_connections() -> pd.DataFrame:


def list_item_connections(
item_name: str, item_type: str, workspace: Optional[str] = None
item_name: str, item_type: str, workspace: Optional[str | UUID] = None
) -> pd.DataFrame:
"""
Shows the list of connections that the specified item is connected to.
Expand All @@ -218,8 +219,8 @@ def list_item_connections(
The item name.
item_type : str
The `item type <https://learn.microsoft.com/rest/api/fabric/core/items/update-item?tabs=HTTP#itemtype>`_.
workspace : str, default=None
The Fabric workspace name.
workspace : str | UUID, default=None
The Fabric workspace name or ID.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.

Expand All @@ -229,11 +230,10 @@ def list_item_connections(
A pandas dataframe showing the list of connections that the specified item is connected to.
"""

workspace = fabric.resolve_workspace_name(workspace)
workspace_id = fabric.resolve_workspace_id(workspace)
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
item_type = item_type[0].upper() + item_type[1:]
item_id = fabric.resolve_item_id(
item_name=item_name, type=item_type, workspace=workspace
item_name=item_name, type=item_type, workspace=workspace_id
)

client = fabric.FabricRestClient()
Expand Down
40 changes: 20 additions & 20 deletions src/sempy_labs/_data_pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,18 +9,19 @@
_decode_b64,
)
from sempy.fabric.exceptions import FabricHTTPException
from uuid import UUID


def list_data_pipelines(workspace: Optional[str] = None) -> pd.DataFrame:
def list_data_pipelines(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
"""
Shows the data pipelines within a workspace.

This is a wrapper function for the following API: `Items - List Data Pipelines <https://learn.microsoft.com/rest/api/fabric/datapipeline/items/list-data-pipelines>`_.

Parameters
----------
workspace : str, default=None
The Fabric workspace name.
workspace : str | UUID, default=None
The Fabric workspace name or ID.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.

Expand All @@ -32,7 +33,7 @@ def list_data_pipelines(workspace: Optional[str] = None) -> pd.DataFrame:

df = pd.DataFrame(columns=["Data Pipeline Name", "Data Pipeline ID", "Description"])

(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)

client = fabric.FabricRestClient()
response = client.get(f"/v1/workspaces/{workspace_id}/dataPipelines")
Expand All @@ -54,7 +55,7 @@ def list_data_pipelines(workspace: Optional[str] = None) -> pd.DataFrame:


def create_data_pipeline(
name: str, description: Optional[str] = None, workspace: Optional[str] = None
name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
):
"""
Creates a Fabric data pipeline.
Expand All @@ -67,13 +68,13 @@ def create_data_pipeline(
Name of the data pipeline.
description : str, default=None
A description of the environment.
workspace : str, default=None
The Fabric workspace name.
workspace : str | UUID, default=None
The Fabric workspace name or ID.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.
"""

(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)

request_body = {"displayName": name}

Expand All @@ -88,11 +89,11 @@ def create_data_pipeline(
lro(client, response, status_codes=[201, 202])

print(
f"{icons.green_dot} The '{name}' data pipeline has been created within the '{workspace}' workspace."
f"{icons.green_dot} The '{name}' data pipeline has been created within the '{workspace_name}' workspace."
)


def delete_data_pipeline(name: str, workspace: Optional[str] = None):
def delete_data_pipeline(name: str, workspace: Optional[str | UUID] = None):
"""
Deletes a Fabric data pipeline.

Expand All @@ -102,16 +103,16 @@ def delete_data_pipeline(name: str, workspace: Optional[str] = None):
----------
name: str
Name of the data pipeline.
workspace : str, default=None
workspace : str | UUID, default=None
The Fabric workspace name.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.
"""

(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)

item_id = fabric.resolve_item_id(
item_name=name, type="DataPipeline", workspace=workspace
item_name=name, type="DataPipeline", workspace=workspace_id
)

client = fabric.FabricRestClient()
Expand All @@ -121,12 +122,12 @@ def delete_data_pipeline(name: str, workspace: Optional[str] = None):
raise FabricHTTPException(response)

print(
f"{icons.green_dot} The '{name}' data pipeline within the '{workspace}' workspace has been deleted."
f"{icons.green_dot} The '{name}' data pipeline within the '{workspace_name}' workspace has been deleted."
)


def get_data_pipeline_definition(
name: str, workspace: Optional[str] = None, decode: bool = True
name: str, workspace: Optional[str | UUID] = None, decode: bool = True
) -> dict | pd.DataFrame:
"""
Obtains the definition of a data pipeline.
Expand All @@ -135,8 +136,8 @@ def get_data_pipeline_definition(
----------
name : str
The name of the data pipeline.
workspace : str, default=None
The Fabric workspace name.
workspace : str | UUID, default=None
The Fabric workspace name or ID.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.
decode : bool, default=True
Expand All @@ -150,10 +151,9 @@ def get_data_pipeline_definition(
A pandas dataframe showing the data pipelines within a workspace.
"""

workspace = fabric.resolve_workspace_name(workspace)
workspace_id = fabric.resolve_workspace_id(workspace)
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
item_id = fabric.resolve_item_id(
item_name=name, type="DataPipeline", workspace=workspace
item_name=name, type="DataPipeline", workspace=workspace_id
)

client = fabric.FabricRestClient()
Expand Down
Loading
Loading