Skip to content

Commit

Permalink
added list_item_job_instances, ran black
Browse files Browse the repository at this point in the history
  • Loading branch information
m-kovalsky committed Dec 18, 2024
1 parent 54df7aa commit fd24369
Show file tree
Hide file tree
Showing 26 changed files with 242 additions and 49 deletions.
5 changes: 4 additions & 1 deletion src/sempy_labs/_dax.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,10 @@ def get_dax_query_memory_size(
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)

df = get_dax_query_dependencies(
dataset=dataset_id, workspace=workspace_id, dax_string=dax_string, put_in_memory=True
dataset=dataset_id,
workspace=workspace_id,
dax_string=dax_string,
put_in_memory=True,
)

return df["Total Size"].sum()
5 changes: 4 additions & 1 deletion src/sempy_labs/_environments.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,11 @@
from sempy.fabric.exceptions import FabricHTTPException
from uuid import UUID


def create_environment(
environment: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
environment: str,
description: Optional[str] = None,
workspace: Optional[str | UUID] = None,
):
"""
Creates a Fabric environment.
Expand Down
22 changes: 17 additions & 5 deletions src/sempy_labs/_generate_semantic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,18 +273,25 @@ def deploy_semantic_model(
If set to True, overwrites the existing semantic model in the workspace if it exists.
"""

(source_workspace_name, source_workspace_id) = resolve_workspace_name_and_id(source_workspace)
(source_workspace_name, source_workspace_id) = resolve_workspace_name_and_id(
source_workspace
)

if target_workspace is None:
target_workspace_name = source_workspace_name
target_workspace_id = fabric.resolve_workspace_id(target_workspace_name)
else:
(target_workspace_name, target_workspace_id) = resolve_workspace_name_and_id(target_workspace)
(target_workspace_name, target_workspace_id) = resolve_workspace_name_and_id(
target_workspace
)

if target_dataset is None:
target_dataset = source_dataset

if target_dataset == source_dataset and target_workspace_name == source_workspace_name:
if (
target_dataset == source_dataset
and target_workspace_name == source_workspace_name
):
raise ValueError(
f"{icons.red_dot} The 'dataset' and 'new_dataset' parameters have the same value. And, the 'workspace' and 'new_dataset_workspace' "
f"parameters have the same value. At least one of these must be different. Please update the parameters."
Expand Down Expand Up @@ -350,7 +357,10 @@ def get_semantic_model_bim(
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)

bimJson = get_semantic_model_definition(
dataset=dataset_id, workspace=workspace_id, format="TMSL", return_dataframe=False
dataset=dataset_id,
workspace=workspace_id,
format="TMSL",
return_dataframe=False,
)

if save_to_file_name is not None:
Expand Down Expand Up @@ -444,7 +454,9 @@ def get_semantic_model_definition(
return decoded_parts


def get_semantic_model_size(dataset: str | UUID, workspace: Optional[str | UUID] = None):
def get_semantic_model_size(
dataset: str | UUID, workspace: Optional[str | UUID] = None
):
"""
Gets size of the semantic model in bytes.
Expand Down
4 changes: 3 additions & 1 deletion src/sempy_labs/_git.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,9 @@ def initialize_git_connection(workspace: Optional[str | UUID] = None) -> str:


def commit_to_git(
comment: str, item_ids: str | List[str] = None, workspace: Optional[str | UUID] = None
comment: str,
item_ids: str | List[str] = None,
workspace: Optional[str | UUID] = None,
):
"""
Commits all or a selection of items within a workspace to Git.
Expand Down
40 changes: 30 additions & 10 deletions src/sempy_labs/_helper_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,9 @@ def resolve_report_name(report_id: UUID, workspace: Optional[str | UUID] = None)
)


def resolve_item_name_and_id(item: str | UUID, type: Optional[str] = None, workspace: Optional[str | UUID] = None) -> Tuple[str, UUID]:
def resolve_item_name_and_id(
item: str | UUID, type: Optional[str] = None, workspace: Optional[str | UUID] = None
) -> Tuple[str, UUID]:

(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)

Expand All @@ -159,7 +161,9 @@ def resolve_item_name_and_id(item: str | UUID, type: Optional[str] = None, works
)
else:
if type is None:
raise ValueError(f"{icons.warning} Must specify a 'type' if specifying a name as the 'item'.")
raise ValueError(
f"{icons.warning} Must specify a 'type' if specifying a name as the 'item'."
)
item_name = item
item_id = fabric.resolve_item_id(
item_name=item, type=type, workspace=workspace_id
Expand Down Expand Up @@ -188,7 +192,9 @@ def resolve_dataset_name_and_id(
return dataset_name, dataset_id


def resolve_dataset_id(dataset: str | UUID, workspace: Optional[str | UUID] = None) -> UUID:
def resolve_dataset_id(
dataset: str | UUID, workspace: Optional[str | UUID] = None
) -> UUID:
"""
Obtains the ID of the semantic model.
Expand Down Expand Up @@ -217,7 +223,9 @@ def resolve_dataset_id(dataset: str | UUID, workspace: Optional[str | UUID] = No
return dataset_id


def resolve_dataset_name(dataset_id: UUID, workspace: Optional[str | UUID] = None) -> str:
def resolve_dataset_name(
dataset_id: UUID, workspace: Optional[str | UUID] = None
) -> str:
"""
Obtains the name of the semantic model.
Expand Down Expand Up @@ -271,7 +279,9 @@ def resolve_lakehouse_name(
)


def resolve_lakehouse_id(lakehouse: str, workspace: Optional[str | UUID] = None) -> UUID:
def resolve_lakehouse_id(
lakehouse: str, workspace: Optional[str | UUID] = None
) -> UUID:
"""
Obtains the ID of the Fabric lakehouse.
Expand Down Expand Up @@ -559,7 +569,9 @@ def language_validate(language: str):
return lang


def resolve_workspace_name_and_id(workspace: Optional[str | UUID] = None) -> Tuple[str, str]:
def resolve_workspace_name_and_id(
workspace: Optional[str | UUID] = None,
) -> Tuple[str, str]:
"""
Obtains the name and ID of the Fabric workspace.
Expand Down Expand Up @@ -609,7 +621,9 @@ def _decode_b64(file, format: Optional[str] = "utf-8"):
return result


def is_default_semantic_model(dataset: str, workspace: Optional[str | UUID] = None) -> bool:
def is_default_semantic_model(
dataset: str, workspace: Optional[str | UUID] = None
) -> bool:
"""
Identifies whether a semantic model is a default semantic model.
Expand Down Expand Up @@ -716,7 +730,9 @@ def _add_part(target_dict, path, payload):
target_dict["definition"]["parts"].append(part)


def resolve_workspace_capacity(workspace: Optional[str | UUID] = None) -> Tuple[UUID, str]:
def resolve_workspace_capacity(
workspace: Optional[str | UUID] = None,
) -> Tuple[UUID, str]:
"""
Obtains the capacity Id and capacity name for a given workspace.
Expand Down Expand Up @@ -1083,7 +1099,9 @@ def convert_to_alphanumeric_lowercase(input_string):
return cleaned_string


def resolve_environment_id(environment: str, workspace: Optional[str | UUID] = None) -> UUID:
def resolve_environment_id(
environment: str, workspace: Optional[str | UUID] = None
) -> UUID:
"""
Obtains the environment Id for a given environment.
Expand Down Expand Up @@ -1182,7 +1200,9 @@ def _make_list_unique(my_list):
return list(set(my_list))


def _get_partition_map(dataset: str, workspace: Optional[str | UUID] = None) -> pd.DataFrame:
def _get_partition_map(
dataset: str, workspace: Optional[str | UUID] = None
) -> pd.DataFrame:

(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
Expand Down
88 changes: 88 additions & 0 deletions src/sempy_labs/_job_scheduler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
import sempy.fabric as fabric
import pandas as pd
from typing import Optional
from sempy_labs._helper_functions import (
resolve_workspace_name_and_id,
resolve_item_name_and_id,
pagination,
)
from sempy.fabric.exceptions import FabricHTTPException
from uuid import UUID


def list_item_job_instances(
item: str | UUID, type: Optional[str] = None, workspace: Optional[str | UUID] = None
) -> pd.DataFrame:
"""
Returns a list of job instances for the specified item.
This is a wrapper function for the following API: `Job Scheduler - List Item Job Instances <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/list-item-job-instances>`_.
Parameters
----------
item : str | UUID
The item name or ID
type : str, default=None
The item type. If specifying the item name as the item, the item type is required.
workspace : str | UUID, default=None
The Fabric workspace name or ID used by the lakehouse.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.
Returns
-------
pandas.DataFrame
Shows a list of job instances for the specified item.
"""

(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
(item_name, item_id) = resolve_item_name_and_id(
item=item, type=type, workspace=workspace
)

client = fabric.FabricRestClient()
response = client.get(
f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/instances"
)

if response.status_code != 200:
raise FabricHTTPException(response)

df = pd.DataFrame(
columns=[
"Job Instance Id",
"Item Id",
"Job Type",
"Invoke Type",
"Status",
"Root Activity Id" "Start Time UTC",
"End Time UTC",
"Failure Reason",
]
)

responses = pagination(client, response)

if not responses[0].get("value"):
return df

dfs = []
for r in responses:
for v in r.get("value", []):
new_data = {
"Job Instance Id": v.get("id"),
"Item Id": v.get("itemId"),
"Job Type": v.get("jobType"),
"Invoke Type": v.get("invokeType"),
"Status": v.get("status"),
"Root Activity Id": v.get("rootActivityId"),
"Start Time UTC": v.get("startTimeUtc"),
"End Time UTC": v.get("endTimeUtc"),
"Failure Reason": v.get("failureReason"),
}
dfs.append(pd.DataFrame(new_data, index=[0]))

if dfs:
df = pd.concat(dfs, ignore_index=True)

return df
12 changes: 9 additions & 3 deletions src/sempy_labs/_list_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,9 @@ def list_tables(
return df


def list_annotations(dataset: str | UUID, workspace: Optional[str | UUID] = None) -> pd.DataFrame:
def list_annotations(
dataset: str | UUID, workspace: Optional[str | UUID] = None
) -> pd.DataFrame:
"""
Shows a semantic model's annotations and their properties.
Expand Down Expand Up @@ -939,7 +941,9 @@ def parse_value(text):
return dfR


def list_kpis(dataset: str | UUID, workspace: Optional[str | UUID] = None) -> pd.DataFrame:
def list_kpis(
dataset: str | UUID, workspace: Optional[str | UUID] = None
) -> pd.DataFrame:
"""
Shows a semantic model's KPIs and their properties.
Expand Down Expand Up @@ -1554,7 +1558,9 @@ def list_semantic_model_object_report_usage(
final_df["Object"] = format_dax_object_name(
final_df["Table Name"], final_df["Object Name"]
)
dfC = fabric.list_columns(dataset=dataset_id, workspace=workspace_id, extended=True)
dfC = fabric.list_columns(
dataset=dataset_id, workspace=workspace_id, extended=True
)
dfC["Object"] = format_dax_object_name(dfC["Table Name"], dfC["Column Name"])
final_df = pd.merge(
final_df,
Expand Down
4 changes: 3 additions & 1 deletion src/sempy_labs/_managed_private_endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,9 @@ def create_managed_private_endpoint(
)


def list_managed_private_endpoints(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
def list_managed_private_endpoints(
workspace: Optional[str | UUID] = None,
) -> pd.DataFrame:
"""
Shows the managed private endpoints within a workspace.
Expand Down
4 changes: 3 additions & 1 deletion src/sempy_labs/_mirrored_databases.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,9 @@ def create_mirrored_database(
)


def delete_mirrored_database(mirrored_database: str, workspace: Optional[str | UUID] = None):
def delete_mirrored_database(
mirrored_database: str, workspace: Optional[str | UUID] = None
):
"""
Deletes a mirrored database.
Expand Down
4 changes: 3 additions & 1 deletion src/sempy_labs/_notebooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@ def _get_notebook_definition_base(
return pd.json_normalize(result["definition"]["parts"])


def _get_notebook_type(notebook_name: str, workspace: Optional[str | UUID] = None) -> str:
def _get_notebook_type(
notebook_name: str, workspace: Optional[str | UUID] = None
) -> str:

df_items = _get_notebook_definition_base(
notebook_name=notebook_name, workspace=workspace
Expand Down
4 changes: 3 additions & 1 deletion src/sempy_labs/_query_scale_out.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,9 @@ def qso_sync_status(
return df, dfRep


def disable_qso(dataset: str | UUID, workspace: Optional[str | UUID] = None) -> pd.DataFrame:
def disable_qso(
dataset: str | UUID, workspace: Optional[str | UUID] = None
) -> pd.DataFrame:
"""
Sets the max read-only replicas to 0, disabling query scale out.
Expand Down
8 changes: 6 additions & 2 deletions src/sempy_labs/_refresh_semantic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,9 @@ def display_trace_logs(trace, partition_map, widget, title, stop=False):

@log
def cancel_dataset_refresh(
dataset: str | UUID, request_id: Optional[str] = None, workspace: Optional[str | UUID] = None
dataset: str | UUID,
request_id: Optional[str] = None,
workspace: Optional[str | UUID] = None,
):
"""
Cancels the refresh of a semantic model which was executed via the `Enhanced Refresh API <https://learn.microsoft.com/power-bi/connect-data/asynchronous-refresh>`_
Expand Down Expand Up @@ -323,7 +325,9 @@ def cancel_dataset_refresh(


def get_semantic_model_refresh_history(
dataset: str | UUID, request_id: Optional[str] = None, workspace: Optional[str | UUID] = None
dataset: str | UUID,
request_id: Optional[str] = None,
workspace: Optional[str | UUID] = None,
) -> pd.DataFrame:
"""
Obtains the semantic model refresh history (refreshes executed via the Enhanced Refresh API).
Expand Down
1 change: 1 addition & 0 deletions src/sempy_labs/_sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from sempy_labs._helper_functions import resolve_warehouse_id, resolve_lakehouse_id
from uuid import UUID


def _bytes2mswin_bstr(value: bytes) -> bytes:
"""Convert a sequence of bytes into a (MS-Windows) BSTR (as bytes).
Expand Down
Loading

0 comments on commit fd24369

Please sign in to comment.