diff --git a/.ci/build_wheel.py b/.ci/build_wheel.py index c3d43099ee..6acac20032 100644 --- a/.ci/build_wheel.py +++ b/.ci/build_wheel.py @@ -3,6 +3,7 @@ import argparse import subprocess +from pathlib import Path import os import sys import shutil @@ -39,15 +40,13 @@ print("Created temporary directory: ", tmpdirname) # Create the temporary build-opts.cfg - build_opts_path = os.path.join(tmpdirname, "build-opts.cfg") - with open(build_opts_path, "w") as build_opts_file: - build_opts_file.write(f"[bdist_wheel]\nplat-name={requested_platform}") - os.environ["DIST_EXTRA_CONFIG"] = build_opts_path + build_opts_path = Path(tmpdirname) / "build-opts.cfg" + + build_opts_path.write_text(f"[bdist_wheel]\nplat-name={requested_platform}", encoding="utf-8") + os.environ["DIST_EXTRA_CONFIG"] = str(build_opts_path) # Move the binaries - gatebin_folder_path = os.path.join( - os.path.curdir, os.path.join("src", "ansys", "dpf", "gatebin") - ) + gatebin_folder_path = Path.cwd() / "src" / "ansys" / "dpf" / "gatebin" binaries_to_move = [] moved = [] if "win" in requested_platform or "any" == requested_platform: @@ -60,15 +59,15 @@ binaries_to_move.extend(["_version.py"]) for binary_name in binaries_to_move: - src = os.path.join(gatebin_folder_path, binary_name) - dst = os.path.join(tmpdirname, binary_name) + src = gatebin_folder_path / binary_name + dst = Path(tmpdirname) / binary_name print(f"Moving {src} to {dst}") shutil.move(src=src, dst=dst) moved.append([dst, src]) if "any" == requested_platform: # Also remove the gatebin folder - os.rmdir(gatebin_folder_path) + gatebin_folder_path.rmdir() # Call the build if not args.wheelhouse: @@ -83,7 +82,7 @@ if "any" == requested_platform: # Recreate the gatebin folder - os.mkdir(gatebin_folder_path) + gatebin_folder_path.mkdir() # Move binaries back for move_back in moved: diff --git a/.ci/code_generation.py b/.ci/code_generation.py index cb372e324a..e073929848 100644 --- a/.ci/code_generation.py +++ b/.ci/code_generation.py @@ -8,21 +8,21 @@ import shutil -local_dir = os.path.dirname(os.path.abspath(__file__)) -TARGET_PATH = os.path.join(local_dir, os.pardir, "src", "ansys", "dpf", "core", "operators") -files = glob.glob(os.path.join(TARGET_PATH, "*")) -for f in files: - if Path(f).stem == "specification": +local_dir = Path(__file__).parent +TARGET_PATH = local_dir.parent / "src" / "ansys" / "dpf" / "core" / "operators" +files = TARGET_PATH.glob("*") +for file_path in files: + if file_path.stem == "specification": continue - if Path(f).name == "build.py": + if file_path.name == "build.py": continue - if Path(f).name == "operator.mustache": + if file_path.name == "operator.mustache": continue try: - if os.path.isdir(f): - shutil.rmtree(f) + if file_path.is_dir(): + shutil.rmtree(file_path) else: - os.remove(f) + file_path.unlink() except: pass diff --git a/.ci/run_examples.py b/.ci/run_examples.py index 5a3da2b7bd..91d9d56628 100644 --- a/.ci/run_examples.py +++ b/.ci/run_examples.py @@ -1,6 +1,6 @@ import os import glob -import pathlib +from pathlib import Path import subprocess import sys @@ -11,8 +11,9 @@ os.environ["PYVISTA_OFF_SCREEN"] = "true" os.environ["MPLBACKEND"] = "Agg" -actual_path = pathlib.Path(__file__).parent.absolute() -print(os.path.join(actual_path, os.path.pardir, "examples")) +actual_path = Path(__file__).parent.absolute() +examples_path = actual_path.parent / "examples" +print(examples_path) # Get the DPF server version server = dpf.server.get_or_create_server(None) @@ -20,13 +21,13 @@ server.shutdown() print(f"Server version: {server_version}") -for root, subdirectories, files in os.walk(os.path.join(actual_path, os.path.pardir, "examples")): +for root, subdirectories, files in os.walk(examples_path): for subdirectory in subdirectories: - subdir = os.path.join(root, subdirectory) - for file in glob.iglob(os.path.join(subdir, "*.py")): - if sys.platform == "linux" and "08-python-operators" in file: + subdir = Path(root) / subdirectory + for file in subdir.glob("*.py"): + if sys.platform == "linux" and "08-python-operators" in str(file): continue - elif "win" in sys.platform and "06-distributed_stress_averaging" in file: + elif "win" in sys.platform and "06-distributed_stress_averaging" in str(file): # Currently very unstable in the GH CI continue print("\n--------------------------------------------------") @@ -36,7 +37,7 @@ print(f"Example skipped as it requires DPF {minimum_version_str}.", flush=True) continue try: - out = subprocess.check_output([sys.executable, file]) + out = subprocess.check_output([sys.executable, str(file)]) except subprocess.CalledProcessError as e: sys.stderr.write(str(e.args)) if e.returncode != 3221225477: diff --git a/.ci/run_non_regression_examples.py b/.ci/run_non_regression_examples.py index 247e074531..fc3dc8fedc 100644 --- a/.ci/run_non_regression_examples.py +++ b/.ci/run_non_regression_examples.py @@ -9,49 +9,30 @@ os.environ["MPLBACKEND"] = "Agg" actual_path = pathlib.Path(__file__).parent.absolute() -print(os.path.join(actual_path, os.path.pardir, "examples")) +examples_path = actual_path.parent / "examples" +print(examples_path) list_tests = [ - os.path.join(actual_path, os.path.pardir, "examples", "00-basic"), - os.path.join(actual_path, os.path.pardir, "examples", "01-transient_analyses"), - os.path.join(actual_path, os.path.pardir, "examples", "02-modal_analyses"), - os.path.join(actual_path, os.path.pardir, "examples", "03-harmonic_analyses"), - os.path.join(actual_path, os.path.pardir, "examples", "06-plotting", "00-basic_plotting.py"), - os.path.join( - actual_path, - os.path.pardir, - "examples", - "06-plotting", - "05-plot_on_warped_mesh.py", - ), - os.path.join( - actual_path, - os.path.pardir, - "examples", - "07-distributed-post", - "00-distributed_total_disp.py", - ), + examples_path / "00-basic", + examples_path / "01-transient_analyses", + examples_path / "02-modal_analyses", + examples_path / "03-harmonic_analyses", + examples_path / "06-plotting" / "00-basic_plotting.py", + examples_path / "06-plotting" / "05-plot_on_warped_mesh.py", + examples_path / "07-distributed-post" / "00-distributed_total_disp.py", ] if core.SERVER_CONFIGURATION != core.AvailableServerConfigs.InProcessServer: - list_tests.append( - os.path.join( - actual_path, - os.path.pardir, - "examples", - "08-python-operators", - "00-wrapping_numpy_capabilities.py", - ) - ) + list_tests.append(examples_path / "08-python-operators" / "00-wrapping_numpy_capabilities.py") for path in list_tests: - if os.path.isdir(path): - for file in glob.iglob(os.path.join(path, "*.py")): + if path.is_dir(): + for file in path.glob("*.py"): print("\n--------------------------------------------------") print(file) try: - subprocess.check_call([sys.executable, file]) + subprocess.check_call([sys.executable, str(file)]) except subprocess.CalledProcessError as e: sys.stderr.write(str(e.args)) if e.returncode != 3221225477: @@ -61,7 +42,7 @@ print("\n--------------------------------------------------") print(path) try: - subprocess.check_call([sys.executable, file]) + subprocess.check_call([sys.executable, str(file)]) except subprocess.CalledProcessError as e: sys.stderr.write(str(e.args)) if e.returncode != 3221225477: diff --git a/.ci/update_dpf_dependencies.py b/.ci/update_dpf_dependencies.py index a6f9d72d13..c3426d8a9d 100644 --- a/.ci/update_dpf_dependencies.py +++ b/.ci/update_dpf_dependencies.py @@ -15,7 +15,7 @@ import os import glob -import pathlib +from pathlib import Path import platform import shutil import zipfile @@ -23,21 +23,21 @@ grpc_path_key = "DPFDV_ROOT" gate_path_key = "ANSYSDPFPYGATE_ROOT" -core_path = pathlib.Path(__file__).parent.parent.resolve() +core_path = Path(__file__).parent.parent if "ANSYSDPFCORE_ROOT" in os.environ: core_path = os.environ["ANSYSDPFCORE_ROOT"] grpc_path = os.getenv(grpc_path_key, None) gate_path = os.getenv(gate_path_key, None) -if grpc_path is not None: +if grpc_path: # Update ansys-grpc-dpf with latest in proto/dist print("Updating ansys.grpc.dpf") - dist_path = os.path.join(grpc_path, "proto", "dist", "*") + dist_path = Path(grpc_path) / "proto" / "dist" print(f"from {dist_path}") - destination = os.path.join(core_path, "src") + destination = Path(core_path) / "src" print(f"into {destination}") - latest_wheel = max(glob.glob(dist_path), key=os.path.getctime) + latest_wheel = max(dist_path.glob("*"), key=os.path.getctime) with zipfile.ZipFile(latest_wheel, "r") as wheel: for file in wheel.namelist(): # print(file) @@ -50,40 +50,34 @@ else: print(f"{grpc_path_key} environment variable is not defined. " "Cannot update ansys-grpc-dpf.") -if gate_path is not None: +if gate_path: # Update ansys-dpf-gate print("Updating ansys.dpf.gate generated code") - dist_path = os.path.join(gate_path, "ansys-dpf-gate", "ansys", "dpf", "gate", "generated") + dist_path = Path(gate_path) / "ansys-dpf-gate" / "ansys" / "dpf" / "gate" / "generated" print(f"from {dist_path}") - destination = os.path.join(core_path, "src", "ansys", "dpf", "gate", "generated") + destination = Path(core_path) / "src" / "ansys" / "dpf" / "gate" / "generated" print(f"into {destination}") shutil.copytree( src=dist_path, dst=destination, dirs_exist_ok=True, - ignore=lambda directory, contents: ["__pycache__"] if directory[-5:] == "gate" else [], + ignore=lambda directory, contents: ["__pycache__"] if str(directory)[-5:] == "gate" else [], ) - dist_path = os.path.join(gate_path, "ansys-dpf-gate", "ansys", "dpf", "gate", "__init__.py") + + dist_path = Path(gate_path) / "ansys-dpf-gate" / "ansys" / "dpf" / "gate" / "__init__.py" print(f"from {dist_path}") - destination = os.path.join(core_path, "src", "ansys", "dpf", "gate", "__init__.py") + destination = Path(core_path) / "src" / "ansys" / "dpf" / "gate" / "__init__.py" print(f"into {destination}") - shutil.copy( - src=dist_path, - dst=destination, - ) + shutil.copy(src=dist_path, dst=destination) print("Done updating ansys.dpf.gate generated code") # Update ansys-dpf-gatebin print("Updating ansys.dpf.gatebin") - dist_path = os.path.join(gate_path, "ansys-dpf-gatebin", "ansys") + dist_path = Path(gate_path) / "ansys-dpf-gatebin" / "ansys" print(f"from {dist_path}") - destination = os.path.join(core_path, "src", "ansys") + destination = Path(core_path) / "src" / "ansys" print(f"into {destination}") - shutil.copytree( - src=dist_path, - dst=destination, - dirs_exist_ok=True, - ) + shutil.copytree(src=dist_path, dst=destination, dirs_exist_ok=True) print(f"Done updating ansys.dpf.gatebin for {platform.system()}") else: print( diff --git a/.github/workflows/update_operators.yml b/.github/workflows/update_operators.yml index 0236e94efc..fc1737e20c 100644 --- a/.github/workflows/update_operators.yml +++ b/.github/workflows/update_operators.yml @@ -123,7 +123,7 @@ jobs: git status - name: "Create Pull Request" - uses: peter-evans/create-pull-request@v4 + uses: peter-evans/create-pull-request@v7 with: delete-branch: true add-paths: | diff --git a/doc/source/conf.py b/doc/source/conf.py index ffd92d8e88..e1b7a34961 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -62,7 +62,6 @@ ignored_pattern += f"|{example_name}" ignored_pattern += "|11-server_types.py" ignored_pattern += "|06-distributed_stress_averaging.py" -ignored_pattern += "|02-python_operators_with_dependencies.py" ignored_pattern += r")" # Autoapi ignore pattern diff --git a/examples/05-file-IO/00-hdf5_double_float_comparison.py b/examples/05-file-IO/00-hdf5_double_float_comparison.py index 730b00a72a..43cde7f7a5 100644 --- a/examples/05-file-IO/00-hdf5_double_float_comparison.py +++ b/examples/05-file-IO/00-hdf5_double_float_comparison.py @@ -36,7 +36,7 @@ # Import the ``dpf-core`` module and its examples files, and then create a # temporary directory. -import os +from pathlib import Path from ansys.dpf import core as dpf from ansys.dpf.core import examples @@ -78,8 +78,8 @@ # Define a temporary folder for outputs tmpdir = dpf.core.make_tmp_dir_server(dpf.SERVER) files = [ - dpf.path_utilities.join(tmpdir, "dpf_float.h5"), - dpf.path_utilities.join(tmpdir, "dpf_double.h5"), + Path(dpf.path_utilities.join(tmpdir, "dpf_float.h5")), + Path(dpf.path_utilities.join(tmpdir, "dpf_double.h5")), ] ############################################################################### # Export with simple precision. @@ -98,8 +98,8 @@ # Download the resulting .h5 files if necessary if not dpf.SERVER.local_server: - float_file_path = os.path.join(os.getcwd(), "dpf_float.h5") - double_file_path = os.path.join(os.getcwd(), "dpf_double.h5") + float_file_path = Path.cwd() / "dpf_float.h5" + double_file_path = Path.cwd() / "dpf_double.h5" dpf.download_file(files[0], float_file_path) dpf.download_file(files[1], double_file_path) else: @@ -109,8 +109,8 @@ ############################################################################### # Compare simple precision versus double precision. -float_precision = os.stat(float_file_path).st_size -double_precision = os.stat(double_file_path).st_size +float_precision = float_file_path.stat().st_size +double_precision = double_file_path.stat().st_size print( f"size with float precision: {float_precision}\n" f"size with double precision: {double_precision}" diff --git a/examples/05-file-IO/04-basic-load-file.py b/examples/05-file-IO/04-basic-load-file.py index 47b8fe14ea..adc3467780 100644 --- a/examples/05-file-IO/04-basic-load-file.py +++ b/examples/05-file-IO/04-basic-load-file.py @@ -58,16 +58,16 @@ # ~~~~~~~~~~~~~ # Export the fields container in the CSV format: -import os +from pathlib import Path csv_file_name = "simple_bar_fc.csv" # Define an output path for the resulting .csv file if not dpf.SERVER.local_server: # Define it server-side if using a remote server tmp_dir_path = dpf.core.make_tmp_dir_server(dpf.SERVER) - server_file_path = dpf.path_utilities.join(tmp_dir_path, csv_file_name) + server_file_path = Path(dpf.path_utilities.join(tmp_dir_path, csv_file_name)) else: - server_file_path = os.path.join(os.getcwd(), csv_file_name) + server_file_path = Path.cwd() / csv_file_name # Perform the export to csv on the server side export_csv_operator = dpf.operators.serialization.field_to_csv() @@ -81,7 +81,7 @@ # Download the file ``simple_bar_fc.csv``: if not dpf.SERVER.local_server: - downloaded_client_file_path = os.path.join(os.getcwd(), "simple_bar_fc_downloaded.csv") + downloaded_client_file_path = Path.cwd() / "simple_bar_fc_downloaded.csv" dpf.download_file(server_file_path, downloaded_client_file_path) else: downloaded_client_file_path = server_file_path @@ -98,7 +98,7 @@ mesh.plot(server_fc_out) # Remove file to avoid polluting. -os.remove(downloaded_client_file_path) +downloaded_client_file_path.unlink() ############################################################################### # Make operations over the fields container diff --git a/examples/08-python-operators/00-wrapping_numpy_capabilities.py b/examples/08-python-operators/00-wrapping_numpy_capabilities.py index 63d242ec39..90e0828b92 100644 --- a/examples/08-python-operators/00-wrapping_numpy_capabilities.py +++ b/examples/08-python-operators/00-wrapping_numpy_capabilities.py @@ -57,11 +57,12 @@ # Download and display the Python script. from ansys.dpf.core.examples import download_easy_statistics +from pathlib import Path -operator_file_path = download_easy_statistics() +operator_file_path = Path(download_easy_statistics()) -with open(operator_file_path, "r") as f: - for line in f.readlines(): +with operator_file_path.open() as file: + for line in file.readlines(): print("\t\t\t" + line) ############################################################################### @@ -76,15 +77,14 @@ # - The third argument is the name of the function used to record operators. # -import os from ansys.dpf import core as dpf from ansys.dpf.core import examples # Python plugins are not supported in process. dpf.start_local_server(config=dpf.AvailableServerConfigs.GrpcServer) -operator_server_file_path = dpf.upload_file_in_tmp_folder(operator_file_path) -dpf.load_library(os.path.dirname(operator_server_file_path), "py_easy_statistics", "load_operators") +operator_server_file_path = Path(dpf.upload_file_in_tmp_folder(operator_file_path)) +dpf.load_library(operator_server_file_path.parent, "py_easy_statistics", "load_operators") ############################################################################### # Instantiate the operator. diff --git a/examples/08-python-operators/01-package_python_operators.py b/examples/08-python-operators/01-package_python_operators.py index f6fd6cfc39..b2de42266c 100644 --- a/examples/08-python-operators/01-package_python_operators.py +++ b/examples/08-python-operators/01-package_python_operators.py @@ -72,8 +72,6 @@ # for the plug-in package that is used to record operators. # -import os - from ansys.dpf import core as dpf from ansys.dpf.core import examples @@ -83,7 +81,7 @@ tmp = dpf.make_tmp_dir_server() dpf.upload_files_in_folder(dpf.path_utilities.join(tmp, "average_filter_plugin"), plugin_folder) dpf.load_library( - os.path.join(dpf.path_utilities.join(tmp, "average_filter_plugin")), + dpf.path_utilities.join(tmp, "average_filter_plugin"), "py_average_filter", "load_operators", ) diff --git a/examples/08-python-operators/02-python_operators_with_dependencies.py b/examples/08-python-operators/02-python_operators_with_dependencies.py index d4f80e3199..e46147e7bd 100644 --- a/examples/08-python-operators/02-python_operators_with_dependencies.py +++ b/examples/08-python-operators/02-python_operators_with_dependencies.py @@ -58,11 +58,12 @@ # created for you. import os +from pathlib import Path from ansys.dpf.core import examples -plugin_path = examples.download_gltf_plugin() -folder_root = os.path.join(os.getcwd().rsplit("pydpf-core", 1)[0], "pydpf-core") +plugin_path = Path(examples.download_gltf_plugin()) +folder_root = Path(str(Path.cwd()).rsplit("pydpf-core", 1)[0]) / "pydpf-core" # %% # To add third-party modules as dependencies to a plug-in package, you must @@ -83,8 +84,9 @@ # To simplify this step, you can add a requirements file in the plug-in package: # print("\033[1m gltf_plugin/requirements.txt: \n \033[0m") -with open(os.path.join(plugin_path, "requirements.txt"), "r") as f: - for line in f.readlines(): +requirements_path = plugin_path / "requirements.txt" +with requirements_path.open("r") as file: + for line in file.readlines(): print("\t\t\t" + line) @@ -117,26 +119,21 @@ # # create_sites_for_python_operators.sh -pluginpath /path/to/plugin -zippath /path/to/plugin/assets/linx64.zip # noqa: E501 - -if os.name == "nt" and not os.path.exists( - os.path.join(plugin_path, "assets", "gltf_sites_winx64.zip") -): - cmd_file = os.path.join( - folder_root, - "doc", - "source", - "user_guide", - "create_sites_for_python_operators.ps1", +site_path = plugin_path / "assets" / "gltf_sites_winx64.zip" +if os.name == "nt" and not site_path.exists(): + cmd_file = ( + folder_root / "doc" / "source" / "user_guide" / "create_sites_for_python_operators.ps1" ) args = [ "powershell", - cmd_file, + str(cmd_file), "-pluginpath", - plugin_path, + str(plugin_path), "-zippath", - os.path.join(plugin_path, "assets", "gltf_sites_winx64.zip"), + str(plugin_path / "assets" / "gltf_sites_winx64.zip"), ] print(args) + import subprocess process = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -150,20 +147,15 @@ ) else: print("Installing pygltf in a virtual environment succeeded") -elif os.name == "posix" and not os.path.exists( - os.path.join(plugin_path, "assets", "gltf_sites_linx64.zip") -): - cmd_file = os.path.join( - folder_root, - "doc", - "source", - "user_guide", - "create_sites_for_python_operators.sh", + +elif os.name == "posix" and not site_path.exists(): + cmd_file = ( + folder_root / "doc" / "source" / "user_guide" / "create_sites_for_python_operators.sh" ) run_cmd = f"{cmd_file}" args = ( f' -pluginpath "{plugin_path}" ' - f"-zippath \"{os.path.join(plugin_path, 'assets', 'gltf_sites_linx64.zip')}\"" + f'-zippath "{plugin_path / "assets" / "gltf_sites_winx64.zip"}"' ) print(run_cmd + args) os.system(f"chmod u=rwx,o=x {cmd_file}") @@ -189,12 +181,14 @@ # Python plugins are not supported in process. dpf.start_local_server(config=dpf.AvailableServerConfigs.GrpcServer) -tmp = dpf.make_tmp_dir_server() -dpf.upload_files_in_folder(dpf.path_utilities.join(tmp, "plugins", "gltf_plugin"), plugin_path) -dpf.upload_file(plugin_path + ".xml", dpf.path_utilities.join(tmp, "plugins", "gltf_plugin.xml")) +tmp = Path(dpf.make_tmp_dir_server()) +dpf.upload_files_in_folder(dpf.path_utilities.join(str(tmp), "plugins", "gltf_plugin"), plugin_path) +dpf.upload_file( + str(plugin_path) + ".xml", dpf.path_utilities.join(str(tmp), "plugins", "gltf_plugin.xml") +) dpf.load_library( - dpf.path_utilities.join(tmp, "plugins", "gltf_plugin"), + dpf.path_utilities.join(str(tmp), "plugins", "gltf_plugin"), "py_dpf_gltf", "load_operators", ) @@ -235,8 +229,6 @@ # Use the custom operator # ----------------------- -import os - model = dpf.Model(dpf.upload_file_in_tmp_folder(examples.find_static_rst())) mesh = model.metadata.meshed_region @@ -245,14 +237,14 @@ displacement = model.results.displacement() displacement.inputs.mesh_scoping(skin_mesh) displacement.inputs.mesh(skin_mesh) -new_operator.inputs.path(os.path.join(tmp, "out")) +new_operator.inputs.path(str(tmp / "out")) new_operator.inputs.mesh(skin_mesh) new_operator.inputs.field(displacement.outputs.fields_container()[0]) new_operator.run() print("operator ran successfully") -dpf.download_file(os.path.join(tmp, "out.glb"), os.path.join(os.getcwd(), "out.glb")) +dpf.download_file(tmp / "out.glb", Path.cwd() / "out.glb") # %% # You can download :download:`output ` from the ``gltf`` operator. diff --git a/pyproject.toml b/pyproject.toml index 02b4cb90c2..c0443473a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,7 @@ select = [ # "F", # pyflakes, see https://beta.ruff.rs/docs/rules/#pyflakes-f # "I", # isort, see https://beta.ruff.rs/docs/rules/#isort-i # "N", # pep8-naming, see https://beta.ruff.rs/docs/rules/#pep8-naming-n -# "PTH", # flake9-use-pathlib, https://beta.ruff.rs/docs/rules/#flake8-use-pathlib-pth + "PTH", # flake9-use-pathlib, https://beta.ruff.rs/docs/rules/#flake8-use-pathlib-pth # "TD", # flake8-todos, https://docs.astral.sh/ruff/rules/#flake8-todos-td ] ignore = [ diff --git a/src/ansys/dpf/core/__init__.py b/src/ansys/dpf/core/__init__.py index 1c885d1d70..89711fcf5f 100644 --- a/src/ansys/dpf/core/__init__.py +++ b/src/ansys/dpf/core/__init__.py @@ -118,6 +118,7 @@ CustomTypeFieldsCollection:type = _CollectionFactory(CustomTypeField) GenericDataContainersCollection:type = _CollectionFactory(GenericDataContainer) StringFieldsCollection:type = _CollectionFactory(StringField) +OperatorsCollection: type = _CollectionFactory(Operator) AnyCollection:type = _Collection # for matplotlib diff --git a/src/ansys/dpf/core/common.py b/src/ansys/dpf/core/common.py index 1c13185b64..7e07167a20 100644 --- a/src/ansys/dpf/core/common.py +++ b/src/ansys/dpf/core/common.py @@ -32,6 +32,7 @@ import re import sys from enum import Enum +from typing import Dict from ansys.dpf.core.misc import module_exists from ansys.dpf.gate.common import locations, ProgressBarBase # noqa: F401 @@ -430,6 +431,51 @@ def type_to_special_dpf_constructors(): return _type_to_special_dpf_constructors +_derived_class_name_to_type = None + + +def derived_class_name_to_type() -> Dict[str, type]: + """ + Returns a mapping of derived class names to their corresponding Python classes. + + Returns + ------- + dict[str, type] + A dictionary mapping derived class names (str) to their corresponding + Python class objects. + """ + global _derived_class_name_to_type + if _derived_class_name_to_type is None: + from ansys.dpf.core.workflow_topology import WorkflowTopology + + _derived_class_name_to_type = {"WorkflowTopology": WorkflowTopology} + return _derived_class_name_to_type + + +def record_derived_class(class_name: str, py_class: type, overwrite: bool = False): + """ + Records a new derived class in the mapping of class names to their corresponding Python classes. + + This function updates the global dictionary that maps derived class names (str) to their corresponding + Python class objects (type). If the provided class name already exists in the dictionary, it will either + overwrite the existing mapping or leave it unchanged based on the `overwrite` flag. + + Parameters + ---------- + class_name : str + The name of the derived class to be recorded. + py_class : type + The Python class type corresponding to the derived class. + overwrite : bool, optional + A flag indicating whether to overwrite an existing entry for the `class_name`. + If `True`, the entry will be overwritten. If `False` (default), the entry will + not be overwritten if it already exists. + """ + recorded_classes = derived_class_name_to_type() + if overwrite or class_name not in recorded_classes: + recorded_classes[class_name] = py_class + + def create_dpf_instance(type, internal_obj, server): spe_constructors = type_to_special_dpf_constructors() if type in spe_constructors: diff --git a/src/ansys/dpf/core/core.py b/src/ansys/dpf/core/core.py index 199f5295c4..e3b4cb7948 100644 --- a/src/ansys/dpf/core/core.py +++ b/src/ansys/dpf/core/core.py @@ -29,6 +29,7 @@ import logging import warnings import weakref +from pathlib import Path from ansys.dpf.core import errors, misc from ansys.dpf.core import server as server_module @@ -429,7 +430,6 @@ def load_library(self, file_path, name="", symbol="LoadOperators", generate_oper ) if generate_operators: # TODO: fix code generation upload posix - import os def __generate_code(TARGET_PATH, filename, name, symbol): from ansys.dpf.core.dpf_operator import Operator @@ -444,8 +444,8 @@ def __generate_code(TARGET_PATH, filename, name, symbol): except Exception as e: warnings.warn("Unable to generate the python code with error: " + str(e.args)) - local_dir = os.path.dirname(os.path.abspath(__file__)) - LOCAL_PATH = os.path.join(local_dir, "operators") + local_dir = Path(__file__).parent + LOCAL_PATH = local_dir / "operators" if not self._server().local_server: if self._server().os != "posix" or (not self._server().os and os.name != "posix"): # send local generated code @@ -762,23 +762,24 @@ def upload_files_in_folder( """ server_paths = [] for root, subdirectories, files in os.walk(client_folder_path): + root = Path(root) for subdirectory in subdirectories: - subdir = os.path.join(root, subdirectory) - for filename in os.listdir(subdir): - f = os.path.join(subdir, filename) + subdir = root / subdirectory + for filename in subdir.iterdir(): + f = subdir / filename server_paths = self._upload_and_get_server_path( specific_extension, - f, - filename, + str(f), + filename.name, server_paths, str(to_server_folder_path), subdirectory, ) for file in files: - f = os.path.join(root, file) + f = root / file server_paths = self._upload_and_get_server_path( specific_extension, - f, + str(f), file, server_paths, str(to_server_folder_path), @@ -836,7 +837,8 @@ def upload_file(self, file_path, to_server_file_path): server_file_path : str path generated server side """ - if os.stat(file_path).st_size == 0: + file_path = Path(file_path) + if file_path.stat().st_size == 0: raise ValueError(file_path + " is empty") if not self._server().has_client(): txt = """ @@ -868,11 +870,12 @@ def upload_file_in_tmp_folder(self, file_path, new_file_name=None): server_file_path : str path generated server side """ + file_path = Path(file_path) if new_file_name: file_name = new_file_name else: - file_name = os.path.basename(file_path) - if os.stat(file_path).st_size == 0: + file_name = Path(file_path).name + if file_path.stat().st_size == 0: raise ValueError(file_path + " is empty") if not self._server().has_client(): txt = """ diff --git a/src/ansys/dpf/core/custom_container_base.py b/src/ansys/dpf/core/custom_container_base.py new file mode 100644 index 0000000000..44df013144 --- /dev/null +++ b/src/ansys/dpf/core/custom_container_base.py @@ -0,0 +1,54 @@ +# Copyright (C) 2020 - 2024 ANSYS, Inc. and/or its affiliates. +# SPDX-License-Identifier: MIT +# +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +""" +CustomContainerBase +=================== +This module contains the `CustomContainerBase` class, which serves as a base +for creating wrappers around `GenericDataContainer` objects. + +These wrappers provide an interface for accessing and managing data in +generic containers, enabling more intuitive usage and the addition of custom +behaviors tailored to specific use cases. +""" + +from ansys.dpf.core.generic_data_container import GenericDataContainer + + +class CustomContainerBase: + """ + Base class for custom container wrappers. + + This class provides a common interface for managing an underlying + `GenericDataContainer` object. + """ + + def __init__(self, container: GenericDataContainer) -> None: + """ + Initialize the base container with a `GenericDataContainer`. + + Parameters + ---------- + container : GenericDataContainer + The underlying data container to be wrapped by this class. + """ + self._container = container diff --git a/src/ansys/dpf/core/custom_operator.py b/src/ansys/dpf/core/custom_operator.py index f5e8941951..582cd047d0 100644 --- a/src/ansys/dpf/core/custom_operator.py +++ b/src/ansys/dpf/core/custom_operator.py @@ -30,8 +30,7 @@ import abc import ctypes -import os -import pathlib +from pathlib import Path import re import shutil import tempfile @@ -85,23 +84,23 @@ def update_virtual_environment_for_custom_operators( raise NotImplementedError( "Updating the dpf-site.zip of a DPF Server is only available when InProcess." ) - current_dpf_site_zip_path = os.path.join(server.ansys_path, "dpf", "python", "dpf-site.zip") + current_dpf_site_zip_path = Path(server.ansys_path) / "dpf" / "python" / "dpf-site.zip" # Get the path to where we store the original dpf-site.zip - original_dpf_site_zip_path = os.path.join( - server.ansys_path, "dpf", "python", "original", "dpf-site.zip" + original_dpf_site_zip_path = ( + Path(server.ansys_path) / "dpf" / "python" / "original" / "dpf-site.zip" ) # Restore the original dpf-site.zip if restore_original: - if os.path.exists(original_dpf_site_zip_path): + if original_dpf_site_zip_path.exists(): shutil.move(src=original_dpf_site_zip_path, dst=current_dpf_site_zip_path) - os.rmdir(os.path.dirname(original_dpf_site_zip_path)) + original_dpf_site_zip_path.parent.rmdir() else: warnings.warn("No original dpf-site.zip found. Current is most likely the original.") else: # Store original dpf-site.zip for this DPF Server if no original is stored - if not os.path.exists(os.path.dirname(original_dpf_site_zip_path)): - os.mkdir(os.path.dirname(original_dpf_site_zip_path)) - if not os.path.exists(original_dpf_site_zip_path): + if not original_dpf_site_zip_path.parent.exists(): + original_dpf_site_zip_path.parent.mkdir() + if not original_dpf_site_zip_path.exists(): shutil.move(src=current_dpf_site_zip_path, dst=original_dpf_site_zip_path) # Get the current paths to site_packages import site @@ -111,46 +110,47 @@ def update_virtual_environment_for_custom_operators( # Get the first one targeting an actual site-packages folder for path_to_site_packages in paths_to_current_site_packages: if path_to_site_packages[-13:] == "site-packages": - current_site_packages_path = pathlib.Path(path_to_site_packages) + current_site_packages_path = Path(path_to_site_packages) break if current_site_packages_path is None: warnings.warn("Could not find a currently loaded site-packages folder to update from.") return # If an ansys.dpf.core.path file exists, then the installation is editable - search_path = pathlib.Path(current_site_packages_path) + search_path = current_site_packages_path potential_editable = list(search_path.rglob("__editable__.ansys_dpf_core-*.pth")) if potential_editable: path_file = potential_editable[0] else: # Keep for older setuptools versions - path_file = os.path.join(current_site_packages_path, "ansys.dpf.core.pth") - if os.path.exists(path_file): + path_file = current_site_packages_path / "ansys.dpf.core.pth" + if path_file.exists(): # Treat editable installation of ansys-dpf-core - with open(path_file, "r") as f: - current_site_packages_path = f.readline().strip() + with path_file.open("r") as f: + current_site_packages_path = Path(f.readline().strip()) with tempfile.TemporaryDirectory() as tmpdir: - os.mkdir(os.path.join(tmpdir, "ansys_dpf_core")) - ansys_dir = os.path.join(tmpdir, "ansys_dpf_core") - os.mkdir(os.path.join(ansys_dir, "ansys")) - os.mkdir(os.path.join(ansys_dir, "ansys", "dpf")) - os.mkdir(os.path.join(ansys_dir, "ansys", "grpc")) + tmpdir = Path(tmpdir) + ansys_dir = tmpdir / "ansys_dpf_core" + ansys_dir.mkdir() + ansys_dir.joinpath("ansys").mkdir() + ansys_dir.joinpath("ansys", "dpf").mkdir() + ansys_dir.joinpath("ansys", "grpc").mkdir() shutil.copytree( - src=os.path.join(current_site_packages_path, "ansys", "dpf", "core"), - dst=os.path.join(ansys_dir, "ansys", "dpf", "core"), + src=current_site_packages_path / "ansys" / "dpf" / "core", + dst=ansys_dir / "ansys" / "dpf" / "core", ignore=lambda directory, contents: ["__pycache__", "result_files"], ) shutil.copytree( - src=os.path.join(current_site_packages_path, "ansys", "dpf", "gate"), - dst=os.path.join(ansys_dir, "ansys", "dpf", "gate"), + src=current_site_packages_path / "ansys" / "dpf" / "gate", + dst=ansys_dir / "ansys" / "dpf" / "gate", ignore=lambda directory, contents: ["__pycache__"], ) shutil.copytree( - src=os.path.join(current_site_packages_path, "ansys", "grpc", "dpf"), - dst=os.path.join(ansys_dir, "ansys", "grpc", "dpf"), + src=current_site_packages_path / "ansys" / "grpc" / "dpf", + dst=ansys_dir / "ansys" / "grpc" / "dpf", ignore=lambda directory, contents: ["__pycache__"], ) # Find the .dist_info folder pattern = re.compile(r"^ansys_dpf_core\S*") - for p in pathlib.Path(current_site_packages_path).iterdir(): + for p in current_site_packages_path.iterdir(): if p.is_dir(): # print(p.stem) if re.search(pattern, p.stem): @@ -158,12 +158,12 @@ def update_virtual_environment_for_custom_operators( break shutil.copytree( src=dist_info_path, - dst=os.path.join(ansys_dir, dist_info_path.name), + dst=ansys_dir / dist_info_path.name, ) # Zip the files as dpf-site.zip - base_name = os.path.join(tmpdir, "ansys_dpf_core_zip") + base_name = tmpdir / "ansys_dpf_core_zip" base_dir = "." - root_dir = os.path.join(tmpdir, "ansys_dpf_core") # OK + root_dir = tmpdir / "ansys_dpf_core" # OK shutil.make_archive( base_name=base_name, root_dir=root_dir, base_dir=base_dir, format="zip" ) @@ -173,7 +173,7 @@ def update_virtual_environment_for_custom_operators( for item in original.infolist(): if "ansys" not in item.filename: archive.writestr(item, original.read(item)) - with zipfile.ZipFile(base_name + ".zip", mode="r") as original: + with zipfile.ZipFile(str(base_name) + ".zip", mode="r") as original: for item in original.infolist(): archive.writestr(item, original.read(item)) diff --git a/src/ansys/dpf/core/data_sources.py b/src/ansys/dpf/core/data_sources.py index d694f54b5a..f547f9e700 100644 --- a/src/ansys/dpf/core/data_sources.py +++ b/src/ansys/dpf/core/data_sources.py @@ -28,6 +28,7 @@ """ import os +from pathlib import Path import warnings import traceback from typing import Union @@ -142,7 +143,7 @@ def set_result_file_path(self, filepath, key=""): ['/tmp/file.rst'] """ - extension = os.path.splitext(filepath)[1] + extension = Path(filepath).suffix # Handle .res files from CFX if key == "" and extension == ".res": key = "cas" @@ -162,7 +163,7 @@ def set_result_file_path(self, filepath, key=""): def guess_result_key(filepath: str) -> str: """Guess result key for files without a file extension.""" result_keys = ["d3plot", "binout"] - base_name = os.path.basename(filepath) + base_name = Path(filepath).name # Handle files without extension for result_key in result_keys: if result_key in base_name: @@ -172,14 +173,13 @@ def guess_result_key(filepath: str) -> str: @staticmethod def guess_second_key(filepath: str) -> str: """For files with an h5 or cff extension, look for another extension.""" + + # These files usually end with .cas.h5 or .dat.h5 accepted = ["cas", "dat"] - without_ext = os.path.splitext(filepath)[0] - new_split = os.path.splitext(without_ext) + new_split = Path(filepath).suffixes new_key = "" - if len(new_split) > 1: - key = new_split[1][1:] - if key in accepted: - new_key = key + if new_split[0].strip(".") in accepted: + new_key = new_split[0].strip(".") return new_key def set_domain_result_file_path( @@ -241,9 +241,12 @@ def add_file_path(self, filepath, key="", is_domain: bool = False, domain_id=0): """ # The filename needs to be a fully qualified file name - if not os.path.dirname(filepath): + # if not os.path.dirname(filepath) + + filepath = Path(filepath) + if not filepath.parent.name: # append local path - filepath = os.path.join(os.getcwd(), os.path.basename(filepath)) + filepath = Path.cwd() / filepath.name if is_domain: if key == "": raise NotImplementedError("A key must be given when using is_domain=True.") @@ -280,9 +283,10 @@ def add_domain_file_path(self, filepath, key, domain_id): """ # The filename needs to be a fully qualified file name - if not os.path.dirname(filepath): + filepath = Path(filepath) + if not filepath.parent.name: # append local path - filepath = os.path.join(os.getcwd(), os.path.basename(filepath)) + filepath = Path.cwd() / filepath.name self._api.data_sources_add_domain_file_path_with_key_utf8( self, str(filepath), key, domain_id ) @@ -307,9 +311,10 @@ def add_file_path_for_specified_result(self, filepath, key="", result_key=""): The default is ``""``, in which case the key is found directly. """ # The filename needs to be a fully qualified file name - if not os.path.dirname(filepath): + filepath = Path(filepath) + if not filepath.parent.name: # append local path - filepath = os.path.join(os.getcwd(), os.path.basename(filepath)) + filepath = Path.cwd() / filepath.name self._api.data_sources_add_file_path_for_specified_result_utf8( self, str(filepath), key, result_key diff --git a/src/ansys/dpf/core/dpf_operator.py b/src/ansys/dpf/core/dpf_operator.py index b0e2add66b..c20274848d 100644 --- a/src/ansys/dpf/core/dpf_operator.py +++ b/src/ansys/dpf/core/dpf_operator.py @@ -127,6 +127,7 @@ def __init__(self, name=None, config=None, server=None, operator=None): self._internal_obj = None self._description = None self._inputs = None + self._id = None # step 1: get server self._server = server_module.get_or_create_server( @@ -384,6 +385,7 @@ def _type_to_output_method(self): mesh_info, collection_base, any, + custom_container_base, ) out = [ @@ -481,6 +483,15 @@ def _type_to_output_method(self): self._api.operator_getoutput_as_any, lambda obj, type: any.Any(server=self._server, any_dpf=obj).cast(type), ), + ( + custom_container_base.CustomContainerBase, + self._api.operator_getoutput_generic_data_container, + lambda obj, type: type( + container=generic_data_container.GenericDataContainer( + generic_data_container=obj, server=self._server + ) + ), + ), ] if hasattr(self._api, "operator_getoutput_generic_data_container"): out.append( @@ -651,6 +662,30 @@ def config(self, value): """ self._api.operator_set_config(self, value) + @property + @version_requires("10.0") + def id(self) -> int: + """Retrieve the unique identifier of the operator. + + This property returns the unique ID associated with the operator. + This property is lazily initialized. + + Returns + ------- + int + The unique identifier of the operator. + + Notes + ----- + Property available with server's version starting at 10.0. + """ + if self._id is None: + operator_id_op = Operator("operator_id", server=self._server) + operator_id_op.connect_operator_as_input(0, self) + self._id = operator_id_op.outputs.id() + + return self._id + @property def inputs(self): """Inputs connected to the operator. @@ -726,8 +761,10 @@ def default_config(name, server=None): def __del__(self): try: - if self._internal_obj is not None: - self._deleter_func[0](self._deleter_func[1](self)) + if hasattr(self, "_deleter_func"): + obj = self._deleter_func[1](self) + if obj is not None: + self._deleter_func[0](obj) except: warnings.warn(traceback.format_exc()) diff --git a/src/ansys/dpf/core/examples/downloads.py b/src/ansys/dpf/core/examples/downloads.py index 95fdb30fd0..53b168d145 100644 --- a/src/ansys/dpf/core/examples/downloads.py +++ b/src/ansys/dpf/core/examples/downloads.py @@ -26,6 +26,7 @@ Download example datasets from https://github.com/ansys/example-data""" import os +from pathlib import Path import urllib.request import warnings from typing import Union @@ -45,7 +46,7 @@ def delete_downloads(verbose=True): from ansys.dpf.core import LOCAL_DOWNLOADED_EXAMPLES_PATH, examples not_to_remove = [ - getattr(examples.examples, item) + Path(getattr(examples.examples, item)) for item in dir(examples.examples) if not item.startswith("_") and not item.endswith("_") @@ -53,27 +54,28 @@ def delete_downloads(verbose=True): ] not_to_remove.extend( [ - os.path.join(os.path.dirname(examples.__file__), "__init__.py"), - os.path.join(os.path.dirname(examples.__file__), "downloads.py"), - os.path.join(os.path.dirname(examples.__file__), "examples.py"), + Path(examples.__file__).parent / "__init__.py", + Path(examples.__file__).parent / "downloads.py", + Path(examples.__file__).parent / "examples.py", ] ) for root, dirs, files in os.walk(LOCAL_DOWNLOADED_EXAMPLES_PATH, topdown=False): + root = Path(root) if root not in not_to_remove: for name in files: - if not os.path.join(root, name) in not_to_remove: + file_path = root / name + if not file_path in not_to_remove: try: - os.remove(os.path.join(root, name)) + file_path.unlink() if verbose: - print(f"deleting {os.path.join(root, name)}") + print(f"deleting {file_path}") except Exception as e: - warnings.warn( - f"couldn't delete {os.path.join(root, name)} with error:\n {e.args}" - ) + warnings.warn(f"couldn't delete {file_path} with error:\n {e.args}") for root, dirs, files in os.walk(LOCAL_DOWNLOADED_EXAMPLES_PATH, topdown=False): if len(dirs) == 0 and len(files) == 0: try: - os.rmdir(root) + root = Path(root) + root.rmdir() if verbose: print(f"deleting {root}") except Exception as e: @@ -89,21 +91,22 @@ def _retrieve_file(url, filename, directory): from ansys.dpf.core import LOCAL_DOWNLOADED_EXAMPLES_PATH # First check if file has already been downloaded - local_path = os.path.join(LOCAL_DOWNLOADED_EXAMPLES_PATH, directory, filename) - local_path_no_zip = local_path.replace(".zip", "") - if os.path.isfile(local_path_no_zip) or os.path.isdir(local_path_no_zip): - return local_path_no_zip + local_examples_download_path = Path(LOCAL_DOWNLOADED_EXAMPLES_PATH) + local_path = local_examples_download_path / directory / filename + local_path_no_zip = Path(str(local_path).replace(".zip", "")) + if local_path_no_zip.is_file() or local_path_no_zip.is_dir(): + return str(local_path_no_zip) # grab the correct url retriever urlretrieve = urllib.request.urlretrieve - dirpath = os.path.dirname(local_path) - if not os.path.isdir(dirpath): - os.makedirs(dirpath, exist_ok=True) + dirpath = local_path.parent + if not dirpath.is_dir(): + dirpath.mkdir(parents=True, exist_ok=True) # Perform download _, resp = urlretrieve(url, local_path) - return local_path + return str(local_path) def _download_file(directory, filename, should_upload: bool, server, return_local_path): @@ -1999,7 +2002,7 @@ def find_distributed_msup_folder( server, return_local_path, ) - return os.path.dirname(path) + return str(Path(path).parent) def download_average_filter_plugin( @@ -2135,7 +2138,7 @@ def _retrieve_plugin( for file in file_list: EXAMPLE_FILE = GITHUB_SOURCE_URL + file operator_file_path = _retrieve_file(EXAMPLE_FILE, file, directory="python_plugins") - path = os.path.dirname( - find_files(operator_file_path, should_upload, server, return_local_path) + path = str( + Path(find_files(operator_file_path, should_upload, server, return_local_path)).parent ) return path diff --git a/src/ansys/dpf/core/examples/examples.py b/src/ansys/dpf/core/examples/examples.py index 31f1587a77..b6285df9bf 100644 --- a/src/ansys/dpf/core/examples/examples.py +++ b/src/ansys/dpf/core/examples/examples.py @@ -29,6 +29,7 @@ """ import os +from pathlib import Path from ansys.dpf.core import server as server_module from ansys.dpf.core.core import upload_file_in_tmp_folder @@ -55,7 +56,8 @@ def get_example_required_minimum_dpf_version(file: os.PathLike) -> str: in_header = False previous_line_is_note = False minimum_version_str = "0.0" - with open(file, "r") as f: + file = Path(file) + with file.open("r") as f: for line in f: if line[:3] == header_flag: if not in_header: diff --git a/src/ansys/dpf/core/helpers/utils.py b/src/ansys/dpf/core/helpers/utils.py index 395f4b4ea5..b43986b884 100644 --- a/src/ansys/dpf/core/helpers/utils.py +++ b/src/ansys/dpf/core/helpers/utils.py @@ -22,6 +22,7 @@ import inspect import sys +from typing import Any, Optional def _sort_supported_kwargs(bound_method, **kwargs): @@ -48,3 +49,42 @@ def _sort_supported_kwargs(bound_method, **kwargs): warnings.warn(txt) # Return the accepted arguments return kwargs_in + + +def indent(text: Any, subsequent_indent: str = "", initial_indent: Optional[str] = None) -> str: + """Indents each line of a given text. + + Parameters + ---------- + text : Any + The input text to be indented. If it is not already a string, it will be converted to one. + subsequent_indent : str, optional + The string to prefix all lines of the text after the first line. Default is an empty string. + initial_indent : Optional[str], optional + The string to prefix the first line of the text. If not provided, `subsequent_indent` will be used. + + Returns + ------- + str + The indented text with specified prefixes applied to each line. + + Examples + -------- + >>> text = "Hello\\nWorld" + >>> print(indent(text, subsequent_indent=" ", initial_indent="--> ")) + --> Hello + World + """ + if initial_indent is None: + initial_indent = subsequent_indent + + if not isinstance(text, str): + text = str(text) + + lines = text.rstrip().splitlines() + indented_lines = [ + f"{initial_indent if index == 0 else subsequent_indent}{line}" + for (index, line) in enumerate(lines) + ] + + return "\n".join(indented_lines) diff --git a/src/ansys/dpf/core/misc.py b/src/ansys/dpf/core/misc.py index 2eafb2045a..c10c42a41b 100644 --- a/src/ansys/dpf/core/misc.py +++ b/src/ansys/dpf/core/misc.py @@ -26,6 +26,7 @@ import glob import os import re +from pathlib import Path from pkgutil import iter_modules from ansys.dpf.core import errors @@ -120,7 +121,7 @@ def get_ansys_path(ansys_path=None): '- or by setting it by default with the environment variable "ANSYS_DPF_PATH"' ) # parse the version to an int and check for supported - ansys_folder_name = str(ansys_path).split(os.sep)[-1] + ansys_folder_name = Path(ansys_path).parts[-1] reobj_vXYZ = re.compile("^v[0123456789]{3}$") if reobj_vXYZ.match(ansys_folder_name): # vXYZ Unified Install folder @@ -171,10 +172,10 @@ def find_ansys(): base_path = None if os.name == "nt": - base_path = os.path.join(os.environ["PROGRAMFILES"], "ANSYS INC") + base_path = Path(os.environ["PROGRAMFILES"]) / "ANSYS INC" elif os.name == "posix": - for path in ["/usr/ansys_inc", "/ansys_inc"]: - if os.path.isdir(path): + for path in [Path("/usr/ansys_inc"), Path("/ansys_inc")]: + if path.is_dir(): base_path = path else: raise OSError(f"Unsupported OS {os.name}") @@ -182,16 +183,16 @@ def find_ansys(): if base_path is None: return base_path - paths = glob.glob(os.path.join(base_path, "v*")) + paths = base_path.glob("v*") - if not paths: + if not list(paths): return None versions = {} for path in paths: - ver_str = path[-3:] + ver_str = str(path)[-3:] if is_float(ver_str): - versions[int(ver_str)] = path + versions[int(ver_str)] = str(path) return versions[max(versions.keys())] diff --git a/src/ansys/dpf/core/outputs.py b/src/ansys/dpf/core/outputs.py index e7dc9c3da6..4109a669bc 100644 --- a/src/ansys/dpf/core/outputs.py +++ b/src/ansys/dpf/core/outputs.py @@ -81,17 +81,24 @@ def get_data(self): elif type_output == "int32": type_output = types.int + output = self._operator.get_output(self._pin, type_output) + type_output_derive_class = self._spec.name_derived_class + if type_output_derive_class == "": + return output + + from ansys.dpf.core.common import derived_class_name_to_type + + derived_type = derived_class_name_to_type().get(type_output_derive_class) + if derived_type is not None: + return derived_type(output) - if type_output_derive_class != "": - out_type = [ - type_tuple - for type_tuple in self._operator._type_to_output_method - if type_output_derive_class in type_tuple - ] - return out_type[0][0](self._operator.get_output(self._pin, type_output)) - else: - return self._operator.get_output(self._pin, type_output) + derived_types = [ + type_tuple + for type_tuple in self._operator._type_to_output_method + if type_output_derive_class in type_tuple + ] + return derived_types[0][0](output) def __call__(self): return self.get_data() diff --git a/src/ansys/dpf/core/path_utilities.py b/src/ansys/dpf/core/path_utilities.py index 70d00ad450..d4b8f235e6 100644 --- a/src/ansys/dpf/core/path_utilities.py +++ b/src/ansys/dpf/core/path_utilities.py @@ -61,7 +61,7 @@ def join(*args, **kwargs): server = None parts = [] for a in args: - if isinstance(a, (str, Path)) and len(a) > 0: + if isinstance(a, (str, Path)) and len(str(a)) > 0: parts.append(str(a)) elif isinstance(a, ansys.dpf.core.server_types.LegacyGrpcServer): server = a @@ -73,7 +73,7 @@ def join(*args, **kwargs): if ansys.dpf.core.server_types.RUNNING_DOCKER.use_docker: current_os = "posix" else: - return os.path.join(*args) + return str(Path(args[0]).joinpath(*args[1:])) else: current_os = server.os diff --git a/src/ansys/dpf/core/plotter.py b/src/ansys/dpf/core/plotter.py index cdf4ad5457..7992358291 100644 --- a/src/ansys/dpf/core/plotter.py +++ b/src/ansys/dpf/core/plotter.py @@ -35,6 +35,7 @@ import sys import numpy as np import warnings +from pathlib import Path from typing import TYPE_CHECKING, List, Union from ansys import dpf @@ -1019,7 +1020,7 @@ def _plot_contour_using_vtk_file(self, fields_container, notebook=None): # mesh_provider.inputs.data_sources.connect(self._evaluator._model.metadata.data_sources) # create a temporary file at the default temp directory - path = os.path.join(tempfile.gettempdir(), "dpf_temp_hokflb2j9s.vtk") + path = Path(tempfile.gettempdir()) / "dpf_temp_hokflb2j9s.vtk" vtk_export = dpf.core.Operator("vtk_export") vtk_export.inputs.mesh.connect(self._mesh) @@ -1028,8 +1029,8 @@ def _plot_contour_using_vtk_file(self, fields_container, notebook=None): vtk_export.run() grid = pv.read(path) - if os.path.exists(path): - os.remove(path) + if path.exists(): + path.unlink() names = grid.array_names field_name = fields_container[0].name diff --git a/src/ansys/dpf/core/plugins.py b/src/ansys/dpf/core/plugins.py index e7d728b948..4c2a125c63 100644 --- a/src/ansys/dpf/core/plugins.py +++ b/src/ansys/dpf/core/plugins.py @@ -28,6 +28,7 @@ """ import os.path +from pathlib import Path try: import importlib.metadata as importlib_metadata @@ -60,14 +61,14 @@ def load_plugin_on_server(plugin, server=None, symbol="load_operators", generate # Get the path to the plugin from the package installation if len([p for p in importlib_metadata.files(plugin) if "__init__.py" in str(p)]) > 0: file_path = [p for p in importlib_metadata.files(plugin) if "__init__.py" in str(p)][0] - plugin_path = str(os.path.dirname(file_path.locate())) + plugin_path = str(file_path.locate().parent) # For some reason the "locate()" function returns a path with src doubled - plugin_path = plugin_path.replace("src" + os.path.sep + "src", "src") + plugin_path = Path(plugin_path.replace("src" + os.path.sep + "src", "src")) elif len([p for p in importlib_metadata.files(plugin) if ".pth" in str(p)]) > 0: path_file = [p for p in importlib_metadata.files(plugin) if ".pth" in str(p)][0].locate() - with open(path_file, "r") as file: - plugin_path = file.readline()[:-1] - plugin_path = os.path.join(plugin_path, "ansys", "dpf", "plugins", plugin_name) + with path_file.open("r") as file: + plugin_path = Path(file.readline()[:-1]) + plugin_path = plugin_path / "ansys" / "dpf" / "plugins" / plugin_name else: raise ModuleNotFoundError(f"Could not locate files for plugin {plugin}") @@ -93,7 +94,7 @@ def load_plugin_on_server(plugin, server=None, symbol="load_operators", generate # Upload xml file for the plugin _ = dpf.upload_files_in_folder( target_xml_path, - os.path.join(plugin_path, os.pardir), + plugin_path.parent, specific_extension=".xml", server=server, ) diff --git a/src/ansys/dpf/core/server_types.py b/src/ansys/dpf/core/server_types.py index a3d1f7d904..c1901f7f45 100644 --- a/src/ansys/dpf/core/server_types.py +++ b/src/ansys/dpf/core/server_types.py @@ -41,6 +41,7 @@ from abc import ABC import ctypes from typing import TYPE_CHECKING, Union +from pathlib import Path import psutil @@ -68,14 +69,14 @@ def _get_dll_path(name, ansys_path=None): """Helper function to get the right dll path for Linux or Windows""" ISPOSIX = os.name == "posix" - ANSYS_INSTALL = core.misc.get_ansys_path(ansys_path) + ANSYS_INSTALL = Path(core.misc.get_ansys_path(ansys_path)) api_path = load_api._get_path_in_install() if api_path is None: raise ImportError(f"Could not find API path in install.") - SUB_FOLDERS = os.path.join(ANSYS_INSTALL, api_path) + SUB_FOLDERS = ANSYS_INSTALL / api_path if ISPOSIX: name = "lib" + name - return os.path.join(SUB_FOLDERS, name) + return SUB_FOLDERS / name def check_valid_ip(ip): @@ -92,18 +93,19 @@ def check_valid_ip(ip): def _verify_ansys_path_is_valid(ansys_path, executable, path_in_install=None): if path_in_install is None: path_in_install = load_api._get_path_in_install() - if os.path.isdir(f"{ansys_path}/{path_in_install}"): - dpf_run_dir = f"{ansys_path}/{path_in_install}" + ansys_path = Path(ansys_path) + if ansys_path.joinpath(path_in_install).is_dir(): + dpf_run_dir = ansys_path / path_in_install else: - dpf_run_dir = f"{ansys_path}" - if not os.path.isdir(dpf_run_dir): + dpf_run_dir = ansys_path + if not dpf_run_dir.is_dir(): raise NotADirectoryError( f'Invalid ansys path at "{ansys_path}". ' "Unable to locate the directory containing DPF at " f'"{dpf_run_dir}"' ) else: - if not os.path.exists(os.path.join(dpf_run_dir, executable)): + if not dpf_run_dir.joinpath(executable).exists(): raise FileNotFoundError( f'DPF executable not found at "{dpf_run_dir}". ' f'Unable to locate the executable "{executable}"' @@ -117,7 +119,7 @@ def _run_launch_server_process( bShell = False if docker_config.use_docker: docker_server_port = int(os.environ.get("DOCKER_SERVER_PORT", port)) - dpf_run_dir = os.getcwd() + dpf_run_dir = Path.cwd() if os.name == "posix": bShell = True run_cmd = docker_config.docker_run_cmd_command(docker_server_port, port) @@ -135,7 +137,7 @@ def _run_launch_server_process( path_in_install = load_api._get_path_in_install(internal_folder="bin") dpf_run_dir = _verify_ansys_path_is_valid(ansys_path, executable, path_in_install) - old_dir = os.getcwd() + old_dir = Path.cwd() os.chdir(dpf_run_dir) if not bShell: process = subprocess.Popen(run_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -940,11 +942,11 @@ def __init__( name = "DataProcessingCore" path = _get_dll_path(name, ansys_path) try: - data_processing_core_load_api(path, "common") + data_processing_core_load_api(str(path), "common") except Exception as e: - if not os.path.isdir(os.path.dirname(path)): + if not path.parent.is_dir(): raise NotADirectoryError( - f"DPF directory not found at {os.path.dirname(path)}" + f"DPF directory not found at {path.parent}" f"Unable to locate the following file: {path}" ) raise e diff --git a/src/ansys/dpf/core/workflow.py b/src/ansys/dpf/core/workflow.py index d582f5c176..b0227362a4 100644 --- a/src/ansys/dpf/core/workflow.py +++ b/src/ansys/dpf/core/workflow.py @@ -31,6 +31,7 @@ import os import traceback import warnings +from pathlib import Path from enum import Enum from typing import Union @@ -333,6 +334,7 @@ def _type_to_output_method(self): collection_base, streams_container, ) + from ansys.dpf.core.custom_container_base import CustomContainerBase out = [ (streams_container.StreamsContainer, self._api.work_flow_getoutput_streams), @@ -421,6 +423,15 @@ def _type_to_output_method(self): self._api.work_flow_getoutput_as_any, lambda obj, type: any.Any(server=self._server, any_dpf=obj).cast(type), ), + ( + CustomContainerBase, + self._api.work_flow_getoutput_generic_data_container, + lambda obj, type: type( + container=generic_data_container.GenericDataContainer( + generic_data_container=obj, server=self._server + ) + ), + ), ] if hasattr(self._api, "work_flow_connect_generic_data_container"): out.append( @@ -932,11 +943,11 @@ def view( name = title if save_as: - dot_path = os.path.splitext(str(save_as))[0] + ".dot" - image_path = save_as + image_path = Path(save_as) + dot_path = image_path.parent / image_path.stem / ".dot" else: - dot_path = os.path.join(os.getcwd(), f"{name}.dot") - image_path = os.path.join(os.getcwd(), f"{name}.png") + image_path = Path.cwd() / f"{name}.png" + dot_path = image_path.parent / image_path.stem / ".dot" # Create graphviz file of workflow self.to_graphviz(dot_path) @@ -946,13 +957,33 @@ def view( # View workflow graphviz.view(filepath=image_path) if not keep_dot_file: - os.remove(dot_path) + dot_path.unlink() return image_path def to_graphviz(self, path: Union[os.PathLike, str]): """Saves the workflow to a GraphViz file.""" return self._api.work_flow_export_graphviz(self, str(path)) + @version_requires("10.0") + def get_topology(self): + """Get the topology of the workflow. + + Returns + ------- + workflow_topology : workflow_topology.WorkflowTopology + + Notes + ----- + Available from 10.0 server version. + """ + workflow_to_workflow_topology_op = dpf_operator.Operator( + "workflow_to_workflow_topology", server=self._server + ) + workflow_to_workflow_topology_op.inputs.workflow.connect(self) + workflow_topology = workflow_to_workflow_topology_op.outputs.workflow_topology() + + return workflow_topology + def __del__(self): try: if hasattr(self, "_internal_obj"): diff --git a/src/ansys/dpf/core/workflow_topology/__init__.py b/src/ansys/dpf/core/workflow_topology/__init__.py new file mode 100644 index 0000000000..1b670cd721 --- /dev/null +++ b/src/ansys/dpf/core/workflow_topology/__init__.py @@ -0,0 +1,26 @@ +# Copyright (C) 2020 - 2024 ANSYS, Inc. and/or its affiliates. +# SPDX-License-Identifier: MIT +# +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .workflow_topology import WorkflowTopology +from .operator_connection import OperatorConnection +from .data_connection import DataConnection +from .exposed_pin import ExposedPin diff --git a/src/ansys/dpf/core/workflow_topology/data_connection.py b/src/ansys/dpf/core/workflow_topology/data_connection.py new file mode 100644 index 0000000000..5ccf6e9246 --- /dev/null +++ b/src/ansys/dpf/core/workflow_topology/data_connection.py @@ -0,0 +1,200 @@ +# Copyright (C) 2020 - 2024 ANSYS, Inc. and/or its affiliates. +# SPDX-License-Identifier: MIT +# +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +""" +DataConnection +============== +This module contains the `DataConnection` and `DataConnectionsCollection` +classes, which represent individual connections between data and operator, +and a collection of such connections within a workflow, respectively. +""" + +from typing import Any, Iterator, Optional +from ansys.dpf.core import GenericDataContainersCollection +from ansys.dpf.core.custom_container_base import CustomContainerBase +from ansys.dpf.core.dpf_operator import Operator +from ansys.dpf.core.generic_data_container import GenericDataContainer + + +class DataConnection(CustomContainerBase): + """ + Represents a connection between a data and an operator in a workflow. + + This class provides access to the source data and target operator, as well as its pin ID. + """ + + def __init__(self, container: GenericDataContainer) -> None: + """ + Initialize an DataConnection object. + + Parameters + ---------- + container : GenericDataContainer + The underlying data container that holds the connection's information. + """ + super().__init__(container) + + self._source_data: Optional[Any] = None + self._target_operator: Optional[Operator] = None + self._target_pin_id: Optional[int] = None + + @property + def source_data(self) -> Any: + """ + Retrieve the source data of the connection. + + Returns + ------- + Any + The data serving as the source of this connection. + """ + if self._source_data is None: + self._source_data = self._container.get_property("source_data") + + return self._source_data + + @property + def target_operator(self) -> Operator: + """ + Retrieve the target operator of the connection. + + Returns + ------- + Operator + The operator serving as the target of this connection. + """ + if self._target_operator is None: + self._target_operator = self._container.get_property("target_operator", Operator) + + return self._target_operator + + @property + def target_pin_id(self) -> int: + """ + Retrieve the pin ID of the target operator. + + Returns + ------- + int + The pin ID of the target operator. + """ + if self._target_pin_id is None: + self._target_pin_id = self._container.get_property("target_pin_id", int) + + return self._target_pin_id + + def __str__(self) -> str: + """ + Return a string representation of the data connection. + + This includes the source data and target operator, with its pin ID. + + Returns + ------- + str + String representation of the data connection. + """ + from ansys.dpf.core.helpers.utils import indent + + indents = " " + return ( + "DataConnection with properties:\n" + " - source_data:\n" + f"{indent(self.source_data, indents)}\n" + " - target_operator:\n" + f"{indent(self.target_operator.name, indents)}\n" + " - target_pin_id:\n" + f"{indent(self.target_pin_id, indents)}" + ) + + +class DataConnectionsCollection: + """ + Represents a collection of data connections in a workflow. + + This class provides iterable access to all data connections, allowing retrieval + of individual connections or iteration through the entire collection. + """ + + def __init__(self, collection: GenericDataContainersCollection) -> None: + """ + Initialize an DataConnectionsCollection object. + + Parameters + ---------- + collection : GenericDataContainersCollection + The underlying collection of data connections. + """ + self._collection = collection + + def __len__(self) -> int: + """ + Return the number of data connections in the collection. + + Returns + ------- + int + The number of data connections. + """ + return len(self._collection) + + def __getitem__(self, index: int) -> DataConnection: + """ + Retrieve a data connection by its index. + + Parameters + ---------- + index : int + The index of the data connection to retrieve. + + Returns + ------- + DataConnection + The data connection at the specified index. + """ + return DataConnection(self._collection[index]) + + def __iter__(self) -> Iterator[DataConnection]: + """ + Iterate over the data connections in the collection. + + Yields + ------ + DataConnection + The next data connection in the collection. + """ + for i in range(len(self)): + yield self[i] + + def __str__(self) -> str: + """ + Return a string representation of the data connections collection. + + Returns + ------- + str + String representation of the collection. + """ + from ansys.dpf.core.helpers.utils import indent + + indents = (" ", " - ") + return "\n".join([indent(data_connection, *indents) for data_connection in self]) diff --git a/src/ansys/dpf/core/workflow_topology/exposed_pin.py b/src/ansys/dpf/core/workflow_topology/exposed_pin.py new file mode 100644 index 0000000000..761730a657 --- /dev/null +++ b/src/ansys/dpf/core/workflow_topology/exposed_pin.py @@ -0,0 +1,201 @@ +# Copyright (C) 2020 - 2024 ANSYS, Inc. and/or its affiliates. +# SPDX-License-Identifier: MIT +# +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +""" +ExposedPin +========== +This module contains the `ExposedPin` and `ExposedPinsCollection` classes, +which represent individual exposed pins and a collection of exposed pins in a workflow, +respectively. These classes enable easy access to the pins that serve as input/output points +for the workflow. +""" + +from typing import Iterator, Optional +from ansys.dpf.core import GenericDataContainersCollection +from ansys.dpf.core.custom_container_base import CustomContainerBase +from ansys.dpf.core.dpf_operator import Operator +from ansys.dpf.core.generic_data_container import GenericDataContainer + + +class ExposedPin(CustomContainerBase): + """ + Represents an exposed input or output pin in a workflow. + + This class provides access to the name and the associated operator, as well as its pin ID. + """ + + def __init__(self, container: GenericDataContainer) -> None: + """ + Initialize an ExposedPin object. + + Parameters + ---------- + container : GenericDataContainer + The underlying data container that holds the exposed pin's information. + """ + super().__init__(container) + + self._name: Optional[str] = None + self._operator: Optional[Operator] = None + self._pin_id: Optional[int] = None + + @property + def name(self) -> str: + """ + Retrieve the name of the exposed pin. + + Returns + ------- + str + The name of the exposed pin. + """ + if self._name is None: + self._name = self._container.get_property("name", str) + + return self._name + + @property + def operator(self) -> Operator: + """ + Retrieve the operator associated with the exposed pin. + + Returns + ------- + Operator + The operator associated with this exposed pin. + """ + if self._operator is None: + self._operator = self._container.get_property("operator", Operator) + + return self._operator + + @property + def pin_id(self) -> int: + """ + Retrieve the pin ID of the operator. + + Returns + ------- + int + The pin ID of the operator. + """ + if self._pin_id is None: + self._pin_id = self._container.get_property("pin_id", int) + + return self._pin_id + + def __str__(self) -> str: + """ + Return a string representation of the exposed pin. + + This includes the name and associated operator, with its pin ID. + + Returns + ------- + str + String representation of the exposed pin. + """ + from ansys.dpf.core.helpers.utils import indent + + indents = " " + return ( + "ExposedPin with properties:\n" + " - name:\n" + f"{indent(self.name, indents)}\n" + " - operator:\n" + f"{indent(self.operator.name, indents)}\n" + " - pin_id:\n" + f"{indent(self.pin_id, indents)}" + ) + + +class ExposedPinsCollection: + """ + Represents a collection of exposed pins in a workflow. + + This class provides iterable access to all exposed pins, allowing retrieval + of individual exposed pins or iteration through the entire collection. + """ + + def __init__(self, collection: GenericDataContainersCollection) -> None: + """ + Initialize an ExposedPinsCollection object. + + Parameters + ---------- + collection : GenericDataContainersCollection + The underlying collection of exposed pins. + """ + self._collection = collection + + def __len__(self) -> int: + """ + Return the number of exposed pins in the collection. + + Returns + ------- + int + The number of exposed pins. + """ + return len(self._collection) + + def __getitem__(self, index: int) -> ExposedPin: + """ + Retrieve an exposed pin by its index. + + Parameters + ---------- + index : int + The index of the exposed pin to retrieve. + + Returns + ------- + ExposedPin + The exposed pin at the specified index. + """ + return ExposedPin(self._collection[index]) + + def __iter__(self) -> Iterator[ExposedPin]: + """ + Iterate over the exposed pins in the collection. + + Yields + ------ + ExposedPin + The next exposed pin in the collection. + """ + for i in range(len(self)): + yield self[i] + + def __str__(self) -> str: + """ + Return a string representation of the exposed pins collection. + + Returns + ------- + str + String representation of the collection. + """ + from ansys.dpf.core.helpers.utils import indent + + indents = (" ", " - ") + return "\n".join([indent(exposed_pin, *indents) for exposed_pin in self]) diff --git a/src/ansys/dpf/core/workflow_topology/operator_connection.py b/src/ansys/dpf/core/workflow_topology/operator_connection.py new file mode 100644 index 0000000000..09891ff8da --- /dev/null +++ b/src/ansys/dpf/core/workflow_topology/operator_connection.py @@ -0,0 +1,218 @@ +# Copyright (C) 2020 - 2024 ANSYS, Inc. and/or its affiliates. +# SPDX-License-Identifier: MIT +# +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +""" +OperatorConnection +================== +This module contains the `OperatorConnection` and `OperatorConnectionsCollection` +classes, which represent individual connections between operators and a +collection of such connections within a workflow, respectively. +""" + +from typing import Iterator, Optional +from ansys.dpf.core import GenericDataContainersCollection +from ansys.dpf.core.custom_container_base import CustomContainerBase +from ansys.dpf.core.dpf_operator import Operator +from ansys.dpf.core.generic_data_container import GenericDataContainer + + +class OperatorConnection(CustomContainerBase): + """ + Represents a connection between two operators in a workflow. + + This class provides access to the source and target operators, as well as their respective pin IDs. + """ + + def __init__(self, container: GenericDataContainer) -> None: + """ + Initialize an OperatorConnection object. + + Parameters + ---------- + container : GenericDataContainer + The underlying data container that holds the connection's information. + """ + super().__init__(container) + + self._source_operator: Optional[Operator] = None + self._source_pin_id: Optional[int] = None + self._target_operator: Optional[Operator] = None + self._target_pin_id: Optional[int] = None + + @property + def source_operator(self) -> Operator: + """ + Retrieve the source operator of the connection. + + Returns + ------- + Operator + The operator serving as the source of this connection. + """ + if self._source_operator is None: + self._source_operator = self._container.get_property("source_operator", Operator) + + return self._source_operator + + @property + def source_pin_id(self) -> int: + """ + Retrieve the pin ID of the source operator. + + Returns + ------- + int + The pin ID of the source operator. + """ + if self._source_pin_id is None: + self._source_pin_id = self._container.get_property("source_pin_id", int) + + return self._source_pin_id + + @property + def target_operator(self) -> Operator: + """ + Retrieve the target operator of the connection. + + Returns + ------- + Operator + The operator serving as the target of this connection. + """ + if self._target_operator is None: + self._target_operator = self._container.get_property("target_operator", Operator) + + return self._target_operator + + @property + def target_pin_id(self) -> int: + """ + Retrieve the pin ID of the target operator. + + Returns + ------- + int + The pin ID of the target operator. + """ + if self._target_pin_id is None: + self._target_pin_id = self._container.get_property("target_pin_id", int) + + return self._target_pin_id + + def __str__(self) -> str: + """ + Return a string representation of the operator connection. + + This includes the source and target operators and their respective pin IDs. + + Returns + ------- + str + String representation of the operator connection. + """ + from ansys.dpf.core.helpers.utils import indent + + indents = " " + return ( + "OperatorConnection with properties:\n" + " - source_operator:\n" + f"{indent(self.source_operator.name, indents)}\n" + " - source_pin_id:\n" + f"{indent(self.source_pin_id, indents)}\n" + " - target_operator:\n" + f"{indent(self.target_operator.name, indents)}\n" + " - target_pin_id:\n" + f"{indent(self.target_pin_id, indents)}" + ) + + +class OperatorConnectionsCollection: + """ + Represents a collection of operator connections in a workflow. + + This class provides iterable access to all operator connections, allowing retrieval + of individual connections or iteration through the entire collection. + """ + + def __init__(self, collection: GenericDataContainersCollection) -> None: + """ + Initialize an OperatorConnectionsCollection object. + + Parameters + ---------- + collection : GenericDataContainersCollection + The underlying collection of operator connections. + """ + self._collection = collection + + def __len__(self) -> int: + """ + Return the number of operator connections in the collection. + + Returns + ------- + int + The number of operator connections. + """ + return len(self._collection) + + def __getitem__(self, index: int) -> OperatorConnection: + """ + Retrieve an operator connection by its index. + + Parameters + ---------- + index : int + The index of the operator connection to retrieve. + + Returns + ------- + OperatorConnection + The operator connection at the specified index. + """ + return OperatorConnection(self._collection[index]) + + def __iter__(self) -> Iterator[OperatorConnection]: + """ + Iterate over the operator connections in the collection. + + Yields + ------ + OperatorConnection + The next operator connection in the collection. + """ + for i in range(len(self)): + yield self[i] + + def __str__(self) -> str: + """ + Return a string representation of the operator connections collection. + + Returns + ------- + str + String representation of the collection. + """ + from ansys.dpf.core.helpers.utils import indent + + indents = (" ", " - ") + return "\n".join([indent(operator_connection, *indents) for operator_connection in self]) diff --git a/src/ansys/dpf/core/workflow_topology/workflow_topology.py b/src/ansys/dpf/core/workflow_topology/workflow_topology.py new file mode 100644 index 0000000000..8728afb31a --- /dev/null +++ b/src/ansys/dpf/core/workflow_topology/workflow_topology.py @@ -0,0 +1,185 @@ +# Copyright (C) 2020 - 2024 ANSYS, Inc. and/or its affiliates. +# SPDX-License-Identifier: MIT +# +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +""" +WorkflowTopology +================ +This module contains the `WorkflowTopology` class, which represents +the structure and relationships within a workflow, including its operators, +connections, and exposed input/output pins. +""" + +from typing import Optional +from ansys.dpf.core import OperatorsCollection +from ansys.dpf.core.custom_container_base import CustomContainerBase +from ansys.dpf.core.generic_data_container import GenericDataContainer +from ansys.dpf.core.workflow_topology.data_connection import DataConnectionsCollection +from ansys.dpf.core.workflow_topology.exposed_pin import ExposedPinsCollection +from ansys.dpf.core.workflow_topology.operator_connection import OperatorConnectionsCollection + + +class WorkflowTopology(CustomContainerBase): + """ + Represents the topology of a workflow, including its operators, connections, and exposed input/output pins. + """ + + def __init__(self, container: GenericDataContainer) -> None: + """ + Initialize a WorkflowTopology object. + + Parameters + ---------- + container : GenericDataContainer + The underlying data container that holds the workflow topology information. + """ + super().__init__(container) + + self._operators: Optional[OperatorsCollection] = None + self._operator_connections: Optional[OperatorConnectionsCollection] = None + self._data_connections: Optional[DataConnectionsCollection] = None + self._exposed_inputs: Optional[ExposedPinsCollection] = None + self._exposed_outputs: Optional[ExposedPinsCollection] = None + + @property + def operators(self) -> OperatorsCollection: + """ + Retrieve the operators in the workflow. + + Returns + ------- + OperatorsCollection + A collection of all the operators in the workflow. + """ + if self._operators is None: + self._operators = self._container.get_property("operators", OperatorsCollection) + + return self._operators + + @property + def operator_connections(self) -> OperatorConnectionsCollection: + """ + Retrieve the operator connections in the workflow. + + Returns + ------- + OperatorConnectionsCollection + A collection of all the operator connections in the workflow. + """ + from ansys.dpf.core import GenericDataContainersCollection + + if self._operator_connections is None: + self._operator_connections = OperatorConnectionsCollection( + self._container.get_property( + "operator_connections", GenericDataContainersCollection + ) + ) + + return self._operator_connections + + @property + def data_connections(self) -> DataConnectionsCollection: + """ + Retrieve the data connections in the workflow. + + Returns + ------- + OperatorConnectionsCollection + A collection of all the data connections in the workflow. + """ + from ansys.dpf.core import GenericDataContainersCollection + + if self._data_connections is None: + self._data_connections = DataConnectionsCollection( + self._container.get_property("data_connections", GenericDataContainersCollection) + ) + + return self._data_connections + + @property + def exposed_inputs(self) -> ExposedPinsCollection: + """ + Retrieve the exposed inputs in the workflow. + + Returns + ------- + ExposedPinsCollection + A collection of all the exposed inputs in the workflow. + """ + from ansys.dpf.core import GenericDataContainersCollection + + if self._exposed_inputs is None: + self._exposed_inputs = ExposedPinsCollection( + self._container.get_property("exposed_inputs", GenericDataContainersCollection) + ) + + return self._exposed_inputs + + @property + def exposed_outputs(self) -> ExposedPinsCollection: + """ + Retrieve the exposed outputs in the workflow. + + Returns + ------- + ExposedPinsCollection + A collection of all the exposed outputs in the workflow. + """ + from ansys.dpf.core import GenericDataContainersCollection + + if self._exposed_outputs is None: + self._exposed_outputs = ExposedPinsCollection( + self._container.get_property("exposed_outputs", GenericDataContainersCollection) + ) + + return self._exposed_outputs + + def __str__(self) -> str: + """ + Return a string representation of the workflow topology. + + The string provides details about the workflow's operators, connections, and exposed pins. + + Returns + ------- + str + String representation of the workflow topology. + """ + from ansys.dpf.core.helpers.utils import indent + + def indent_operators(operators): + indents = (" ", " - ") + return "\n".join([indent(operator.name, *indents) for operator in operators]) + + indents = " " + return ( + "WorkflowTopology with properties:\n" + f" - operators (len: {len(self.operators)}):\n" + f"{indent_operators(self.operators)}\n" + f" - operator_connections (len: {len(self.operator_connections)}):\n" + f"{indent(self.operator_connections, indents)}\n" + f" - data_connections (len: {len(self.data_connections)}):\n" + f"{indent(self.data_connections, indents)}\n" + f" - exposed_inputs (len: {len(self.exposed_inputs)}):\n" + f"{indent(self.exposed_inputs, indents)}\n" + f" - exposed_outputs (len: {len(self.exposed_outputs)}):\n" + f"{indent(self.exposed_outputs, indents)}" + ) diff --git a/tests/conftest.py b/tests/conftest.py index 73c3f59ce9..6dc7353e4d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -28,6 +28,7 @@ import os import functools +from pathlib import Path import psutil import pytest @@ -54,10 +55,10 @@ def _get_test_files_directory(): if local_test_repo is False: - test_path = os.path.join(os.path.dirname(os.path.abspath(__file__))) - return os.path.join(test_path, os.pardir, "tests", "testfiles") + test_path = Path(__file__).parent + return str(test_path.parent / "tests" / "testfiles") else: - return os.path.join(os.environ["AWP_UNIT_TEST_FILES"], "python") + return str(Path(os.environ["AWP_UNIT_TEST_FILES"]).joinpath("python")) if os.name == "posix": @@ -94,11 +95,11 @@ def resolve_test_file(basename, additional_path="", is_in_examples=None): if is_in_examples: return examples.find_files(getattr(examples, is_in_examples)) else: - test_files_path = _get_test_files_directory() - filename = os.path.join(test_files_path, additional_path, basename) - if not os.path.isfile(filename): + test_files_path = Path(_get_test_files_directory()) + filename = test_files_path.joinpath(additional_path, basename) + if not filename.is_file(): raise FileNotFoundError(f"Unable to locate {basename} at {test_files_path}") - return examples.find_files(filename) + return examples.find_files(str(filename)) @pytest.fixture() @@ -391,6 +392,8 @@ def decorator(func): if version == "5.0" else not SERVERS_VERSION_GREATER_THAN_OR_EQUAL_TO_6_0 if version == "6.0" + else not SERVERS_VERSION_GREATER_THAN_OR_EQUAL_TO_10_0 + if version == "10.0" else True, reason=f"Requires server version greater than or equal to {version}", raises=core.errors.DpfVersionNotSupported, diff --git a/tests/entry/conftest.py b/tests/entry/conftest.py index 027c1ca64e..4ae3a6041f 100644 --- a/tests/entry/conftest.py +++ b/tests/entry/conftest.py @@ -29,6 +29,7 @@ """ import os +from pathlib import Path import functools import pytest @@ -54,10 +55,10 @@ def _get_test_files_directory(): if local_test_repo is False: - test_path = os.path.join(os.path.dirname(os.path.abspath(__file__))) - return os.path.join(test_path, os.pardir, "tests", "testfiles") + test_path = Path(__file__).parent + return str(test_path.parent / "testfiles") else: - return os.path.join(os.environ["AWP_UNIT_TEST_FILES"], "python") + return str(Path(os.environ["AWP_UNIT_TEST_FILES"]).joinpath("python")) if os.name == "posix": diff --git a/tests/test_animation.py b/tests/test_animation.py index d1f7185b3e..4161bb9845 100644 --- a/tests/test_animation.py +++ b/tests/test_animation.py @@ -21,6 +21,7 @@ # SOFTWARE. import os +from pathlib import Path import pytest @@ -43,8 +44,8 @@ def remove_gifs(request): """Remove GIF once finished.""" def remove_gif(): - if os.path.exists(os.path.join(os.getcwd(), gif_name)): - os.remove(os.path.join(os.getcwd(), gif_name)) + if Path.cwd().joinpath(gif_name).exists(): + Path.cwd().joinpath(gif_name).unlink() request.addfinalizer(remove_gif) diff --git a/tests/test_animator.py b/tests/test_animator.py index d0fada4cd8..c5ef1f5fab 100644 --- a/tests/test_animator.py +++ b/tests/test_animator.py @@ -21,6 +21,7 @@ # SOFTWARE. import os +from pathlib import Path import pytest @@ -42,8 +43,8 @@ def remove_gifs(request): """Remove GIF once finished.""" def remove_gif(): - if os.path.exists(os.path.join(os.getcwd(), gif_name)): - os.remove(os.path.join(os.getcwd(), gif_name)) + if Path.cwd().joinpath(gif_name).exists(): + Path.cwd().joinpath(gif_name).unlink() request.addfinalizer(remove_gif) @@ -250,5 +251,5 @@ def test_animator_animate_fields_container_cpos(remove_gifs, displacement_fields off_screen=True, show_axes=True, ) - assert os.path.isfile(gif_name) - assert os.path.getsize(gif_name) > 6000 + assert Path(gif_name).is_file() + assert Path(gif_name).stat().st_size > 6000 diff --git a/tests/test_any.py b/tests/test_any.py index d91c8042b7..468cccd301 100644 --- a/tests/test_any.py +++ b/tests/test_any.py @@ -136,7 +136,7 @@ def test_cast_workflow_any(server_type): @pytest.mark.skipif( not conftest.SERVERS_VERSION_GREATER_THAN_OR_EQUAL_TO_10_0, - reason="any does not support operator below 8.0", + reason="any does not support operator below 10.0", ) def test_cast_operator_any(server_type): entity = dpf.Operator(server=server_type, name="U") diff --git a/tests/test_code_docstrings.py b/tests/test_code_docstrings.py index 5b518a0d34..af3287e795 100644 --- a/tests/test_code_docstrings.py +++ b/tests/test_code_docstrings.py @@ -28,7 +28,7 @@ import doctest import os -import pathlib +from pathlib import Path import pytest @@ -36,14 +36,13 @@ @pytest.mark.skipif(True, reason="examples are created for windows") def test_doctest_allfiles(): directory = r"../ansys/dpf/core" - actual_path = pathlib.Path(__file__).parent.absolute() - # actual_path = os.getcwd() + actual_path = Path(__file__).parent.absolute() print(actual_path) - for filename in os.listdir(os.path.join(actual_path, directory)): + for filename in os.listdir(actual_path / directory): if filename.endswith(".py"): - path = os.path.join(directory, filename) + path = Path(directory) / filename print(path) - doctest.testfile(path, verbose=True, raise_on_error=True) + doctest.testfile(str(path), verbose=True, raise_on_error=True) else: continue @@ -51,21 +50,21 @@ def test_doctest_allfiles(): @pytest.mark.skipif(True, reason="examples are created for windows") def test_doctest_allexamples(): directory = r"../examples" - actual_path = pathlib.Path(__file__).parent.absolute() + actual_path = Path(__file__).parent.absolute() handled_files = [] - for root, subdirectories, files in os.walk(os.path.join(actual_path, directory)): + for root, subdirectories, _ in os.walk(actual_path / directory): for subdirectory in subdirectories: - subdir = os.path.join(root, subdirectory) + subdir = Path(root) / subdirectory print(subdir) for filename in os.listdir(subdir): if filename.endswith(".py"): - path = os.path.join(subdir, filename) - if ".ipynb_checkpoints" in path: + path = subdir / filename + if ".ipynb_checkpoints" in str(path): continue print(path) - handled_files.append(path) + handled_files.append(str(path)) exec( - open(path, mode="r", encoding="utf8").read(), + path.read_text(encoding="utf-8"), globals(), globals(), ) diff --git a/tests/test_codegeneration.py b/tests/test_codegeneration.py index a673928560..a0e2b12543 100644 --- a/tests/test_codegeneration.py +++ b/tests/test_codegeneration.py @@ -24,6 +24,7 @@ import os import copy import tempfile +from pathlib import Path import ansys.grpc.dpf import numpy as np @@ -153,7 +154,7 @@ def test_operator_any_input(allkindofcomplexity): serialization.inputs.any_input3.connect(u.outputs) # create a temporary file at the default temp directory - path = os.path.join(tempfile.gettempdir(), "dpf_temp_ser.txt") + path = str(Path(tempfile.gettempdir()) / "dpf_temp_ser.txt") if not core.SERVER.local_server: core.upload_file_in_tmp_folder(examples.find_static_rst(return_local_path=True)) path = core.path_utilities.join(core.make_tmp_dir_server(), "dpf_temp_ser.txt") @@ -171,8 +172,9 @@ def test_operator_any_input(allkindofcomplexity): assert hasattr(fc, "outputs") == False - if os.path.exists(path): - os.remove(path) + path = Path(path) + if path.exists(): + path.unlink() def test_create_op_with_inputs(plate_msup): diff --git a/tests/test_data_tree.py b/tests/test_data_tree.py index 08a660c31c..10416d38c4 100644 --- a/tests/test_data_tree.py +++ b/tests/test_data_tree.py @@ -24,6 +24,7 @@ import os import pytest import conftest +from pathlib import Path @conftest.raises_for_servers_version_under("4.0") @@ -174,16 +175,16 @@ def test_write_to_file_data_tree(tmpdir, server_type): to_fill.list_int = [1, 2] to_fill.list_double = [1.5, 2.5] to_fill.list_string = ["hello", "bye"] - data_tree.write_to_txt(os.path.join(tmpdir, "file.txt")) - data_tree = dpf.DataTree.read_from_txt(os.path.join(tmpdir, "file.txt"), server=server_type) + data_tree.write_to_txt(str(Path(tmpdir) / "file.txt")) + data_tree = dpf.DataTree.read_from_txt(str(Path(tmpdir) / "file.txt"), server=server_type) assert data_tree.has("int") assert data_tree.has("double") assert data_tree.has("string") assert data_tree.has("list_int") assert data_tree.has("list_double") assert data_tree.has("list_string") - data_tree.write_to_json(os.path.join(tmpdir, "file.json")) - data_tree = dpf.DataTree.read_from_json(os.path.join(tmpdir, "file.json"), server=server_type) + data_tree.write_to_json(str(Path(tmpdir) / "file.json")) + data_tree = dpf.DataTree.read_from_json(str(Path(tmpdir) / "file.json"), server=server_type) assert data_tree.has("int") assert data_tree.has("double") assert data_tree.has("string") @@ -207,19 +208,17 @@ def test_write_to_file_remote_data_tree(tmpdir, server_clayer_remote_process): to_fill.list_int = [1, 2] to_fill.list_double = [1.5, 2.5] to_fill.list_string = ["hello", "bye"] - data_tree.write_to_txt(os.path.join(tmpdir, "file.txt")) - data_tree = dpf.DataTree.read_from_txt( - os.path.join(tmpdir, "file.txt"), server=server_connected - ) + data_tree.write_to_txt(str(Path(tmpdir) / "file.txt")) + data_tree = dpf.DataTree.read_from_txt(str(Path(tmpdir) / "file.txt"), server=server_connected) assert data_tree.has("int") assert data_tree.has("double") assert data_tree.has("string") assert data_tree.has("list_int") assert data_tree.has("list_double") assert data_tree.has("list_string") - data_tree.write_to_json(os.path.join(tmpdir, "file.json")) + data_tree.write_to_json(str(Path(tmpdir) / "file.json")) data_tree = dpf.DataTree.read_from_json( - os.path.join(tmpdir, "file.json"), server=server_connected + str(Path(tmpdir) / "file.json"), server=server_connected ) assert data_tree.has("int") assert data_tree.has("double") diff --git a/tests/test_examples.py b/tests/test_examples.py index b7989a0a0f..d5764adc91 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -22,7 +22,7 @@ """Verify all examples can be accessed or downloaded""" -import os.path +from pathlib import Path import pytest @@ -152,12 +152,12 @@ def test_find_examples(example, server_type_remote_process): def test_delete_downloaded_files(): - path = examples.download_multi_stage_cyclic_result(return_local_path=True) - assert os.path.exists(path) + path = Path(examples.download_multi_stage_cyclic_result(return_local_path=True)) + assert path.exists() examples.delete_downloads(verbose=False) - assert not os.path.exists(path) - path = examples.download_multi_stage_cyclic_result(return_local_path=True) - assert os.path.exists(path) + assert not path.exists() + path = Path(examples.download_multi_stage_cyclic_result(return_local_path=True)) + assert path.exists() def test_get_example_required_minimum_dpf_version(tmp_path): @@ -197,12 +197,12 @@ def test_get_example_required_minimum_dpf_version(tmp_path): def test_download_easy_statistics(): - assert os.path.exists(examples.download_easy_statistics(return_local_path=True)) + assert Path(examples.download_easy_statistics(return_local_path=True)).exists() def test_download_average_filter_plugin(): - assert os.path.exists(examples.download_average_filter_plugin(return_local_path=True)) + assert Path(examples.download_average_filter_plugin(return_local_path=True)).exists() def test_download_gltf_plugin(): - assert os.path.exists(examples.download_gltf_plugin(return_local_path=True)) + assert Path(examples.download_gltf_plugin(return_local_path=True)).exists() diff --git a/tests/test_launcher.py b/tests/test_launcher.py index 293d4f165d..be99ea4171 100644 --- a/tests/test_launcher.py +++ b/tests/test_launcher.py @@ -21,6 +21,7 @@ # SOFTWARE. import os +from pathlib import Path import pytest import psutil @@ -192,10 +193,10 @@ def test_start_local_wrong_ansys_path(self, server_config): def test_launch_server_full_path(self, server_config): ansys_path = core.misc.get_ansys_path() if os.name == "nt": - path = os.path.join(ansys_path, "aisol", "bin", "winx64") + path = Path(ansys_path) / "aisol" / "bin" / "winx64" else: if server_config.protocol == core.server_factory.CommunicationProtocols.InProcess: - path = os.path.join(ansys_path, "aisol", "dll", "linx64") + path = Path(ansys_path) / "aisol" / "dll" / "linx64" elif ( server_config.protocol == core.server_factory.CommunicationProtocols.gRPC and server_config.legacy is False @@ -204,11 +205,13 @@ def test_launch_server_full_path(self, server_config): # Ans.Dpf.Grpc.sh reside in two different folders return else: - path = os.path.join(ansys_path, "aisol", "bin", "linx64") + path = Path(ansys_path) / "aisol" / "bin" / "linx64" # print("trying to launch on ", path) # print(os.listdir(path)) - server = core.start_local_server(as_global=False, ansys_path=path, config=server_config) + server = core.start_local_server( + as_global=False, ansys_path=str(path), config=server_config + ) assert "server_port" in server.info @@ -219,7 +222,7 @@ def test_start_local_failed_executable(remote_config_server_type): with pytest.raises(FileNotFoundError): path = Path(get_ansys_path()).parent.absolute() - core.start_local_server(ansys_path=path, config=remote_config_server_type) + core.start_local_server(ansys_path=str(path), config=remote_config_server_type) @pytest.mark.skipif(not running_docker, reason="Checks docker start server") diff --git a/tests/test_operator.py b/tests/test_operator.py index 8cfadd185b..64294c37e7 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -25,6 +25,7 @@ import shutil import types import weakref +from pathlib import Path import numpy as np import pytest @@ -33,8 +34,11 @@ from ansys import dpf from ansys.dpf.core import errors from ansys.dpf.core import operators as ops +from ansys.dpf.core.common import derived_class_name_to_type, record_derived_class +from ansys.dpf.core.custom_container_base import CustomContainerBase from ansys.dpf.core.misc import get_ansys_path from ansys.dpf.core.operator_specification import Specification +from ansys.dpf.core.workflow_topology import WorkflowTopology import conftest from conftest import ( SERVERS_VERSION_GREATER_THAN_OR_EQUAL_TO_3_0, @@ -446,8 +450,8 @@ def find_mapdl(): try: path = get_ansys_path() if dpf.core.SERVER.os == "nt": - exe = os.path.join(path, "ansys", "bin", "winx64", "ANSYS.exe") - return os.path.isfile(exe) + exe = Path(path).joinpath("ansys", "bin", "winx64", "ANSYS.exe") + return exe.is_file() else: return False @@ -465,8 +469,8 @@ def test_inputs_outputs_datasources_operator(cyclic_ds, server_type): dsout = op.outputs.data_sources() assert dsout is not None assert dsout.result_key == "rst" - path = os.path.join(dsout.result_files[0]) - shutil.rmtree(os.path.dirname(path)) + path = Path(dsout.result_files[0]) + shutil.rmtree(path.parent) def test_subresults_operator(cyclic_lin_rst, cyclic_ds): @@ -1424,3 +1428,71 @@ def test_operator_input_output_streams(server_in_process, simple_bar): time_provider.connect(pin=3, inpt=streams) times = time_provider.outputs.time_freq_support() assert times + + +@pytest.mark.skipif( + not conftest.SERVERS_VERSION_GREATER_THAN_OR_EQUAL_TO_10_0, + reason="Operator `workflow_to_workflow_topology` does not exist below 10.0", +) +def test_operator_outputs_derived_class(server_type): + workflow = dpf.core.Workflow(server=server_type) + + workflow_to_workflow_topology_op = dpf.core.Operator( + "workflow_to_workflow_topology", server=server_type + ) + workflow_to_workflow_topology_op.inputs.workflow.connect(workflow) + + workflow_topology = workflow_to_workflow_topology_op.outputs.workflow_topology() + assert workflow_topology + + +@pytest.mark.skipif( + not conftest.SERVERS_VERSION_GREATER_THAN_OR_EQUAL_TO_10_0, + reason="Operator `workflow_to_workflow_topology` does not exist below 10.0", +) +def test_operator_get_output_derived_class(server_type): + workflow = dpf.core.Workflow(server=server_type) + + workflow_to_workflow_topology_op = dpf.core.Operator( + "workflow_to_workflow_topology", server=server_type + ) + workflow_to_workflow_topology_op.inputs.workflow.connect(workflow) + + workflow_topology = workflow_to_workflow_topology_op.get_output(0, WorkflowTopology) + assert workflow_topology + + +def test_record_derived_type(): + class TestContainer(CustomContainerBase): + pass + + class TestContainer2(CustomContainerBase): + pass + + class_name = "TestContainer" + + derived_classes = derived_class_name_to_type() + assert class_name not in derived_classes + + record_derived_class(class_name, TestContainer) + assert class_name in derived_classes + assert derived_classes[class_name] is TestContainer + + record_derived_class(class_name, TestContainer2) + assert derived_classes[class_name] is TestContainer + + record_derived_class(class_name, TestContainer2, overwrite=True) + assert derived_classes[class_name] is TestContainer2 + + +@conftest.raises_for_servers_version_under("10.0") +def test_operator_id(server_type): + ids = set() + + for _ in range(10): + op = ops.utility.forward(server=server_type) + + assert op.id >= 0 + assert op.id not in ids + + ids.add(op.id) diff --git a/tests/test_plotter.py b/tests/test_plotter.py index 577bda5fb9..ad8d41d7da 100644 --- a/tests/test_plotter.py +++ b/tests/test_plotter.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import os +from pathlib import Path import pytest @@ -42,8 +42,8 @@ def remove_picture(picture): - if os.path.exists(os.path.join(os.getcwd(), picture)): - os.remove(os.path.join(os.getcwd(), picture)) + if Path.cwd().joinpath(picture).exists(): + Path.cwd().joinpath(picture).unlink() @pytest.mark.skipif(not HAS_PYVISTA, reason="Please install pyvista") @@ -53,7 +53,7 @@ def test_plotter_on_model(plate_msup): picture = "model_plot.png" remove_picture(picture) model.plot(off_screen=True, screenshot=picture) - assert os.path.exists(os.path.join(os.getcwd(), picture)) + assert Path.cwd().joinpath(picture).exists() remove_picture(picture) @@ -155,7 +155,7 @@ def test_plot_fieldscontainer_on_mesh(allkindofcomplexity): picture = "mesh_plot.png" remove_picture(picture) mesh.plot(fc, off_screen=True, screenshot=picture) - assert os.path.exists(os.path.join(os.getcwd(), picture)) + assert Path.cwd().joinpath(picture).exists() remove_picture(picture) @@ -193,7 +193,7 @@ def test_field_nodal_plot(allkindofcomplexity): picture = "field_plot.png" remove_picture(picture) f.plot(off_screen=True, screenshot=picture) - assert os.path.exists(os.path.join(os.getcwd(), picture)) + assert Path.cwd().joinpath(picture).exists() remove_picture(picture) @@ -372,7 +372,7 @@ def test_plot_meshes_container_1(multishells): picture = "meshes_cont_plot.png" remove_picture(picture) meshes_cont.plot(disp_fc, off_screen=True, screenshot=picture) - assert os.path.exists(os.path.join(os.getcwd(), picture)) + assert Path.cwd().joinpath(picture).exists() remove_picture(picture) @@ -656,7 +656,7 @@ def test_plot_chart(allkindofcomplexity): picture = "plot_chart.png" remove_picture(picture) plot_chart(new_fields_container, off_screen=True, screenshot=picture) - assert os.path.exists(os.path.join(os.getcwd(), picture)) + assert Path.cwd().joinpath(picture).exists() remove_picture(picture) diff --git a/tests/test_plugins.py b/tests/test_plugins.py index 55b2f34a86..4b2b7090db 100644 --- a/tests/test_plugins.py +++ b/tests/test_plugins.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import os.path +from pathlib import Path import pytest @@ -124,7 +124,7 @@ def test_vtk(server_type, tmpdir): u = model.operator("U") op.inputs.fields1.connect(u) op.inputs.mesh.connect(model.metadata.mesh_provider) - op.inputs.directory.connect(os.path.dirname(rst_file)) + op.inputs.directory.connect(str(Path(rst_file).parent)) out_path = op.eval() # assert out_path.result_files is not [] # try: diff --git a/tests/test_python_plugins.py b/tests/test_python_plugins.py index 815555fda1..f95aad8108 100644 --- a/tests/test_python_plugins.py +++ b/tests/test_python_plugins.py @@ -22,6 +22,7 @@ import pytest import os +from pathlib import Path import platform import numpy as np from conftest import SERVERS_VERSION_GREATER_THAN_OR_EQUAL_TO_4_0 @@ -56,7 +57,7 @@ @pytest.fixture(scope="module") def load_all_types_plugin(testfiles_dir): return dpf.load_library( - dpf.path_utilities.to_server_os(os.path.join(testfiles_dir, "pythonPlugins", "all_types")), + dpf.path_utilities.to_server_os(Path(testfiles_dir) / "pythonPlugins" / "all_types"), "py_test_types", "load_operators", ) @@ -65,7 +66,7 @@ def load_all_types_plugin(testfiles_dir): def load_all_types_plugin_with_serv(my_server, testfiles_dir): return dpf.load_library( dpf.path_utilities.to_server_os( - os.path.join(testfiles_dir, "pythonPlugins", "all_types"), my_server + Path(testfiles_dir) / "pythonPlugins" / "all_types", my_server ), "py_test_types", "load_operators", @@ -242,7 +243,7 @@ def test_generic_data_container(server_clayer_remote_process, testfiles_dir): def test_syntax_error(server_type_remote_process, testfiles_dir): dpf.load_library( dpf.path_utilities.to_server_os( - os.path.join(testfiles_dir, "pythonPlugins", "syntax_error_plugin"), + Path(testfiles_dir) / "pythonPlugins" / "syntax_error_plugin", server_type_remote_process, ), "py_raising", @@ -381,7 +382,7 @@ def test_create_properties_specification(server_in_process): def test_custom_op_with_spec(server_type_remote_process, testfiles_dir): dpf.load_library( dpf.path_utilities.to_server_os( - os.path.join(testfiles_dir, "pythonPlugins"), server_type_remote_process + Path(testfiles_dir) / "pythonPlugins", server_type_remote_process ), "py_operator_with_spec", "load_operators", diff --git a/tests/test_service.py b/tests/test_service.py index f654323558..c128a480ed 100644 --- a/tests/test_service.py +++ b/tests/test_service.py @@ -21,6 +21,7 @@ # SOFTWARE. import os +from pathlib import Path import pytest import conftest @@ -77,7 +78,7 @@ def test_loadplugin(server_type): reason="Random SEGFAULT in the GitHub pipeline for 3.7-8 on Windows", ) def test_upload_download(tmpdir, server_type_remote_process): - tmpdir = str(tmpdir) + tmpdir = Path(tmpdir) file = dpf.core.upload_file_in_tmp_folder( examples.download_all_kinds_of_complexity(return_local_path=True), server=server_type_remote_process, @@ -91,17 +92,14 @@ def test_upload_download(tmpdir, server_type_remote_process): fielddef = f.field_definition assert fielddef.unit == "Pa" - dir = os.path.dirname(file) - vtk_path = os.path.join(dir, "file.vtk") + vtk_path = Path(file).parent / "file.vtk" vtk = dpf.core.operators.serialization.vtk_export( - file_path=vtk_path, fields1=fcOut, server=server_type_remote_process + file_path=str(vtk_path), fields1=fcOut, server=server_type_remote_process ) vtk.run() - dpf.core.download_file( - vtk_path, os.path.join(tmpdir, "file.vtk"), server=server_type_remote_process - ) - assert os.path.exists(os.path.join(tmpdir, "file.vtk")) + dpf.core.download_file(vtk_path, str(tmpdir / "file.vtk"), server=server_type_remote_process) + assert tmpdir.joinpath("file.vtk").exists() @pytest.mark.skipif(running_docker, reason="Path hidden within docker container") @@ -114,18 +112,18 @@ def test_download_folder( ) file = dpf.core.upload_file_in_tmp_folder(plate_msup, server=server_type_remote_process) file = dpf.core.upload_file_in_tmp_folder(multishells, server=server_type_remote_process) - parent_path = os.path.dirname(file) + parent_path = str(Path(file).parent) dpf.core.download_files_in_folder(parent_path, tmpdir, server=server_type_remote_process) import ntpath - assert os.path.exists(os.path.join(tmpdir, ntpath.basename(allkindofcomplexity))) - assert os.path.exists(os.path.join(tmpdir, ntpath.basename(plate_msup))) - assert os.path.exists(os.path.join(tmpdir, ntpath.basename(multishells))) + assert Path(tmpdir).joinpath(ntpath.basename(allkindofcomplexity)).exists() + assert Path(tmpdir).joinpath(ntpath.basename(plate_msup)).exists() + assert Path(tmpdir).joinpath(ntpath.basename(multishells)).exists() @pytest.mark.skipif(running_docker, reason="Path hidden within docker container") def test_download_with_subdir(multishells, tmpdir, server_type_remote_process): - tmpdir = str(tmpdir) + tmpdir = Path(tmpdir) file = dpf.core.upload_file_in_tmp_folder(multishells, server=server_type_remote_process) base = dpf.core.BaseService(server=server_type_remote_process) @@ -134,56 +132,56 @@ def test_download_with_subdir(multishells, tmpdir, server_type_remote_process): import ntpath filename = ntpath.basename(file) - parent_path = os.path.dirname(file) + parent_path = str(Path(file).parent) to_server_path = parent_path + separator + "subdir" + separator + filename subdir_filepath = dpf.core.upload_file(file, to_server_path, server=server_type_remote_process) folder = parent_path - out = dpf.core.download_files_in_folder(folder, tmpdir, server=server_type_remote_process) - p1 = os.path.join(tmpdir, filename) - p2 = os.path.join(tmpdir, "subdir", filename) + _ = dpf.core.download_files_in_folder(folder, str(tmpdir), server=server_type_remote_process) + p1 = tmpdir / filename + p2 = tmpdir / "subdir" / filename # p1 = tmpdir + "/" + filename # p2 = tmpdir + "/subdir/" + filename - assert os.path.exists(p1) - assert os.path.exists(p2) + assert p1.exists() + assert p2.exists() @pytest.mark.skipif(running_docker, reason="Path hidden within docker container") def test_downloadinfolder_uploadinfolder(multishells, tmpdir, server_type_remote_process): - tmpdir = str(tmpdir) + tmpdir = Path(tmpdir) base = dpf.core.BaseService(server=server_type_remote_process) # create in tmpdir some architecture with subfolder in subfolder - path1 = os.path.join(tmpdir, os.path.basename(multishells)) - path2 = os.path.join(tmpdir, "subdirA", os.path.basename(multishells)) - path4 = os.path.join(tmpdir, "subdirB", os.path.basename(multishells)) + path1 = tmpdir / Path(multishells).name + path2 = tmpdir / "subdirA" / Path(multishells).name + path4 = tmpdir / "subdirB" / Path(multishells).name from shutil import copyfile copyfile(multishells, path1) - os.mkdir(os.path.join(tmpdir, "subdirA")) + tmpdir.joinpath("subdirA").mkdir() copyfile(multishells, path2) - os.mkdir(os.path.join(tmpdir, "subdirB")) + tmpdir.joinpath("subdirB").mkdir() copyfile(multishells, path4) # upload it TARGET_PATH = base.make_tmp_dir_server() dpf.core.upload_files_in_folder( to_server_folder_path=TARGET_PATH, - client_folder_path=tmpdir, + client_folder_path=str(tmpdir), specific_extension="rst", server=server_type_remote_process, ) # download it - new_tmpdir = os.path.join(tmpdir, "my_tmp_dir") - os.mkdir(new_tmpdir) + new_tmpdir = tmpdir / "my_tmp_dir" + new_tmpdir.mkdir() out = dpf.core.download_files_in_folder( - TARGET_PATH, new_tmpdir, server=server_type_remote_process + TARGET_PATH, str(new_tmpdir), server=server_type_remote_process ) # check if the architecture of the download is ok - path1_check = os.path.join(new_tmpdir, os.path.basename(multishells)) - path2_check = os.path.join(new_tmpdir, "subdirA", os.path.basename(multishells)) - path4_check = os.path.join(new_tmpdir, "subdirB", os.path.basename(multishells)) - assert os.path.exists(path1_check) - assert os.path.exists(path2_check) - assert os.path.exists(path4_check) + path1_check = new_tmpdir / Path(multishells).name + path2_check = new_tmpdir / "subdirA" / Path(multishells).name + path4_check = new_tmpdir / "subdirB" / Path(multishells).name + assert path1_check.exists() + assert path2_check.exists() + assert path4_check.exists() # clean # os.remove(os.path.join(tmpdir, "tmpdir")) # os.remove(os.path.join(tmpdir, "subdirA")) @@ -243,18 +241,18 @@ def test_uploadinfolder_emptyfolder(tmpdir, server_type_remote_process): def test_load_plugin_correctly(server_type): from ansys.dpf import core as dpf - actual_path = os.path.dirname(pkgutil.get_loader("ansys.dpf.core").path) + actual_path = Path(pkgutil.get_loader("ansys.dpf.core").path).parent base = dpf.BaseService(server=server_type) if server_type.os == "nt": base.load_library("Ans.Dpf.Math.dll", "math_operators", generate_operators=True) - t = os.path.getmtime(os.path.join(actual_path, r"operators/math/fft_eval.py")) + t = actual_path.joinpath("operators/math/fft_eval.py").stat().st_mtime assert datetime.datetime.fromtimestamp(t).date() == datetime.datetime.today().date() else: base.load_library("libAns.Dpf.Math.so", "math_operators") - exists = os.path.exists(os.path.join(actual_path, r"operators/fft_eval.py")) + exists = actual_path.joinpath("operators/fft_eval.py").exists() assert not exists - num_lines = sum(1 for line in open(os.path.join(actual_path, r"operators/math/__init__.py"))) + num_lines = sum(1 for line in actual_path.joinpath("operators/math/__init__.py").open()) assert num_lines >= 11 @@ -267,18 +265,16 @@ def test_load_plugin_correctly_remote(): server.external_ip, server.external_port, as_global=False ) - actual_path = os.path.dirname(pkgutil.get_loader("ansys.dpf.core").path) + actual_path = Path(pkgutil.get_loader("ansys.dpf.core").path).parent if server.os == "posix": dpf.load_library("libAns.Dpf.Math.so", "math_operators", server=server_connected) else: dpf.load_library("Ans.Dpf.Math.dll", "math_operators", server=server_connected) - t = os.path.getmtime(os.path.join(actual_path, r"operators/math/fft_eval.py")) + t = actual_path.joinpath("operators/math/fft_eval.py").stat().st_mtime assert datetime.datetime.fromtimestamp(t).date() == datetime.datetime.today().date() - actual_path = os.path.dirname(pkgutil.get_loader("ansys.dpf.core").path) - - assert os.path.exists(os.path.join(actual_path, r"operators/math/fft_eval.py")) + assert actual_path.joinpath("operators/math/fft_eval.py").exists() def test_dpf_join(server_type): @@ -320,7 +316,7 @@ def test_load_api_without_awp_root(restore_awp_root): assert serv._client_api_path is not None assert serv._grpc_client_path is not None - dpf_inner_path = os.path.join("ansys", "dpf", "gatebin") + dpf_inner_path = str(Path("ansys") / "dpf" / "gatebin") assert dpf_inner_path in serv._client_api_path assert dpf_inner_path in serv._grpc_client_path @@ -339,7 +335,7 @@ def test_load_api_with_awp_root(): assert serv_2._client_api_path is not None assert serv_2._grpc_client_path is not None - dpf_inner_path = os.path.join("ansys", "dpf", "gatebin") + dpf_inner_path = str(Path("ansys") / "dpf" / "gatebin") assert dpf_inner_path in serv_2._client_api_path assert dpf_inner_path in serv_2._grpc_client_path @@ -366,7 +362,7 @@ def test_load_api_with_awp_root_2(): assert serv._client_api_path is not None assert serv._grpc_client_path is not None - dpf_inner_path = os.path.join("ansys", "dpf", "gatebin") + dpf_inner_path = str(Path("ansys") / "dpf" / "gatebin") assert dpf_inner_path in serv._client_api_path assert dpf_inner_path in serv._grpc_client_path @@ -421,9 +417,9 @@ def test_load_api_with_awp_root_no_gatebin(): assert serv_2._grpc_client_path is not None ISPOSIX = os.name == "posix" if not ISPOSIX: - dpf_inner_path = os.path.join("aisol", "bin", "winx64") + dpf_inner_path = str(Path("aisol") / "bin" / "winx64") else: - dpf_inner_path = os.path.join("aisol", "dll", "linx64") + dpf_inner_path = str(Path("aisol") / "dll" / "linx64") assert dpf_inner_path in serv_2._client_api_path assert dpf_inner_path in serv_2._grpc_client_path @@ -449,9 +445,9 @@ def test_load_api_with_awp_root_2_no_gatebin(): assert serv._grpc_client_path is not None ISPOSIX = os.name == "posix" if not ISPOSIX: - dpf_inner_path = os.path.join("aisol", "bin", "winx64") + dpf_inner_path = str(Path("aisol") / "bin" / "winx64") else: - dpf_inner_path = os.path.join("aisol", "dll", "linx64") + dpf_inner_path = str(Path("aisol") / "dll" / "linx64") assert dpf_inner_path in serv._client_api_path assert dpf_inner_path in serv._grpc_client_path diff --git a/tests/test_session.py b/tests/test_session.py index b9b4e8074d..a2076badb7 100644 --- a/tests/test_session.py +++ b/tests/test_session.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import os +from pathlib import Path import conftest import tempfile from ansys.dpf import core @@ -31,10 +31,10 @@ def get_log_file(log_path, server): if not isinstance(server, core.server_types.InProcessServer): core.core.download_file( log_path, - os.path.join(tempfile.gettempdir(), "log2.txt"), + str(Path(tempfile.gettempdir()) / "log2.txt"), server=server, ) - return os.path.join(tempfile.gettempdir(), "log2.txt") + return str(Path(tempfile.gettempdir()) / "log2.txt") else: return log_path @@ -47,14 +47,14 @@ def test_logging(tmpdir, server_type): examples.find_static_rst(return_local_path=True, server=server_type), server=server_type, ) - log_path = os.path.join(server_tmp, "log.txt") + log_path = Path(server_tmp) / "log.txt" else: - log_path = os.path.join(tmpdir, "log.txt") + log_path = Path(tmpdir) / "log.txt" result_file = examples.find_static_rst(server=server_type) # download it - new_tmpdir = os.path.join(tmpdir, "my_tmp_dir") - server_type.session.handle_events_with_file_logger(log_path, 2) + _ = Path(tmpdir) / "my_tmp_dir" + server_type.session.handle_events_with_file_logger(str(log_path), 2) wf = core.Workflow(server=server_type) wf.progress_bar = False @@ -65,13 +65,13 @@ def test_logging(tmpdir, server_type): wf.set_output_name("out", to_nodal.outputs.fields_container) wf.get_output("out", core.types.fields_container) - download_log_path = get_log_file(log_path, server_type) - assert os.path.exists(download_log_path) - file_size = os.path.getsize(download_log_path) + download_log_path = Path(get_log_file(str(log_path), server_type)) + assert download_log_path.exists() + file_size = download_log_path.stat().st_size assert file_size > 20 server_type._del_session() - download_log_path = get_log_file(log_path, server_type) - file_size = os.path.getsize(download_log_path) + download_log_path = Path(get_log_file(str(log_path), server_type)) + file_size = download_log_path.stat().st_size wf = core.Workflow(server=server_type) wf.progress_bar = False @@ -82,8 +82,8 @@ def test_logging(tmpdir, server_type): wf.set_output_name("out", to_nodal.outputs.fields_container) wf.get_output("out", core.types.fields_container) - download_log_path = get_log_file(log_path, server_type) - assert file_size == os.path.getsize(download_log_path) + download_log_path = Path(get_log_file(str(log_path), server_type)) + assert file_size == download_log_path.stat().st_size @conftest.raises_for_servers_version_under("6.1") @@ -93,8 +93,8 @@ def test_logging_remote(tmpdir, server_type_remote_process): examples.find_multishells_rst(return_local_path=True), server=server_type_remote_process, ) - log_path = os.path.join(server_tmp, "log.txt") - server_type_remote_process.session.handle_events_with_file_logger(log_path, 2) + log_path = Path(server_tmp) / "log.txt" + server_type_remote_process.session.handle_events_with_file_logger(str(log_path), 2) server_type_remote_process.session.start_emitting_rpc_log() wf = core.Workflow(server=server_type_remote_process) @@ -107,13 +107,13 @@ def test_logging_remote(tmpdir, server_type_remote_process): wf.set_output_name("out", to_nodal.outputs.fields_container) wf.get_output("out", core.types.fields_container) - download_log_path = get_log_file(log_path, server_type_remote_process) - assert os.path.exists(download_log_path) - file_size = os.path.getsize(download_log_path) + download_log_path = Path(get_log_file(str(log_path), server_type_remote_process)) + assert download_log_path.exists() + file_size = download_log_path.stat().st_size assert file_size > 3000 server_type_remote_process._del_session() - download_log_path = get_log_file(log_path, server_type_remote_process) - file_size = os.path.getsize(download_log_path) + download_log_path = Path(get_log_file(str(log_path), server_type_remote_process)) + file_size = download_log_path.stat().st_size wf = core.Workflow(server=server_type_remote_process) wf.progress_bar = False @@ -125,5 +125,5 @@ def test_logging_remote(tmpdir, server_type_remote_process): wf.set_output_name("out", to_nodal.outputs.fields_container) wf.get_output("out", core.types.fields_container) - download_log_path = get_log_file(log_path, server_type_remote_process) - assert file_size == os.path.getsize(download_log_path) + download_log_path = Path(get_log_file(str(log_path), server_type_remote_process)) + assert file_size == download_log_path.stat().st_size diff --git a/tests/test_streams_container.py b/tests/test_streams_container.py index 01e116345e..c2577aee54 100644 --- a/tests/test_streams_container.py +++ b/tests/test_streams_container.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import os +from pathlib import Path import shutil from ansys import dpf @@ -34,27 +34,27 @@ def test_create_streams_container(server_in_process, simple_bar): def test_release_handles(server_in_process, simple_bar): - split = os.path.splitext(simple_bar) - copy_path = split[0] + "copy" + split[1] + simple_bar = Path(simple_bar) + copy_path = simple_bar.parent / (simple_bar.stem + "copy" + simple_bar.suffix) shutil.copyfile(simple_bar, copy_path) - model = dpf.core.Model(copy_path, server=server_in_process) + model = dpf.core.Model(str(copy_path), server=server_in_process) # Evaluate something from the rst _ = model.metadata.meshed_region streams_provider = model.metadata.streams_provider sc = streams_provider.outputs.streams_container() sc.release_handles() - os.remove(copy_path) + copy_path.unlink() def test_release_streams_model(server_in_process, simple_bar): - split = os.path.splitext(simple_bar) - copy_path = split[0] + "copy2" + split[1] + simple_bar = Path(simple_bar) + copy_path = simple_bar.parent / (simple_bar.stem + "copy2" + simple_bar.suffix) shutil.copyfile(simple_bar, copy_path) - model = dpf.core.Model(copy_path, server=server_in_process) + model = dpf.core.Model(str(copy_path), server=server_in_process) # Evaluate something from the rst _ = model.metadata.meshed_region model.metadata.release_streams() - os.remove(copy_path) + copy_path.unlink() def test_release_streams_model_empty(server_in_process): diff --git a/tests/test_workflow.py b/tests/test_workflow.py index 5576cbe555..9ff5e51812 100644 --- a/tests/test_workflow.py +++ b/tests/test_workflow.py @@ -20,13 +20,14 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import os +from pathlib import Path import numpy as np import pytest import platform import ansys.dpf.core.operators as op +from ansys.dpf.core.workflow_topology import WorkflowTopology import conftest from ansys import dpf from ansys.dpf.core import misc @@ -46,17 +47,17 @@ def test_create_workflow(server_type): def remove_dot_file(request): """Cleanup a testing directory once we are finished.""" - dot_path = os.path.join(os.getcwd(), "test.dot") - png_path = os.path.join(os.getcwd(), "test.png") - png_path1 = os.path.join(os.getcwd(), "test1.png") + dot_path = Path.cwd() / "test.dot" + png_path = Path.cwd() / "test.png" + png_path1 = Path.cwd() / "test1.png" def remove_files(): - if os.path.exists(dot_path): - os.remove(os.path.join(os.getcwd(), dot_path)) - if os.path.exists(png_path): - os.remove(os.path.join(os.getcwd(), png_path)) - if os.path.exists(png_path1): - os.remove(os.path.join(os.getcwd(), png_path1)) + if dot_path.exists(): + dot_path.unlink() + if png_path.exists(): + png_path.unlink() + if png_path1.exists(): + png_path1.unlink() request.addfinalizer(remove_files) @@ -77,11 +78,11 @@ def test_workflow_view(server_in_process, remove_dot_file): wf.connect_with(pre_wf, {"prewf_output": "wf_input"}) wf.view(off_screen=True, title="test1") - assert not os.path.exists("test1.dot") - assert os.path.exists("test1.png") + assert not Path("test1.dot").exists() + assert Path("test1.png").exists() wf.view(off_screen=True, save_as="test.png", keep_dot_file=True) - assert os.path.exists("test.dot") - assert os.path.exists("test.png") + assert Path("test.dot").exists() + assert Path("test.png").exists() def test_connect_field_workflow(server_type): @@ -1030,6 +1031,26 @@ def test_workflow_input_output_streams(server_in_process, simple_bar): assert times +@pytest.mark.skipif( + not conftest.SERVERS_VERSION_GREATER_THAN_OR_EQUAL_TO_10_0, + reason="Operator `workflow_to_workflow_topology` does not exist below 10.0", +) +def test_workflow_get_output_derived_class(server_type): + workflow = dpf.core.Workflow(server=server_type) + + workflow_to_workflow_topology_op = dpf.core.Operator( + "workflow_to_workflow_topology", server=server_type + ) + dpf_workflow_wrapper = dpf.core.Workflow(server=server_type) + dpf_workflow_wrapper.add_operator(workflow_to_workflow_topology_op) + dpf_workflow_wrapper.set_input_name("input", workflow_to_workflow_topology_op, 0) + dpf_workflow_wrapper.set_output_name("output", workflow_to_workflow_topology_op, 0) + dpf_workflow_wrapper.connect("input", workflow) + + workflow_topology = dpf_workflow_wrapper.get_output("output", WorkflowTopology) + assert workflow_topology + + def main(): test_connect_field_workflow() velocity_acceleration = conftest.resolve_test_file("velocity_acceleration.rst", "rst_operators") diff --git a/tests/test_workflow_topology.py b/tests/test_workflow_topology.py new file mode 100644 index 0000000000..beea6c01d4 --- /dev/null +++ b/tests/test_workflow_topology.py @@ -0,0 +1,198 @@ +# Copyright (C) 2020 - 2024 ANSYS, Inc. and/or its affiliates. +# SPDX-License-Identifier: MIT +# +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import pytest + +from ansys import dpf +import ansys.dpf.core.operators as op +import conftest +from conftest import raises_for_servers_version_under + + +def workflow_forward(server_type) -> dpf.core.Workflow: + """ + ┌─────────┐ ┌────────────┐ ┌────────────┐ ┌──────────┐ + │"Input 0"├─────►│forward_op_1├─────►│forward_op_1├─────►│"Output 0"│ + └─────────┘ └────────────┘ └────────────┘ └──────────┘ + ┌───────┐ ┌────────────┐ ┌──────────┐ + │"hello"├──────────►│forward_op_3├─────►│"Output 1"│ + └───────┘ └────────────┘ └──────────┘ + """ + + forward_op_1 = op.utility.forward(server=server_type) + forward_op_2 = op.utility.forward(server=server_type) + forward_op_3 = op.utility.forward(server=server_type) + + forward_op_2.inputs.connect(forward_op_1.outputs) + forward_op_3.inputs.connect("hello") + + workflow = dpf.core.Workflow(server=server_type) + + workflow.add_operators([forward_op_1, forward_op_2, forward_op_3]) + + workflow.set_input_name("Input 0", forward_op_1.inputs.any) + workflow.set_output_name("Output 0", forward_op_2.outputs.any) + workflow.set_output_name("Output 1", forward_op_3.outputs.any) + + return workflow + + +def workflow_forward_5(server_type) -> dpf.core.Workflow: + """ + ┌─────────┐ ┌──────────┐ + │"Input 0"├──┐ ┌──►│"Output 0"│ + └─────────┘ │ │ └──────────┘ + ┌─────────┐ │ │ ┌──────────┐ + │"Input 1"├──┤ ├──►│"Output 1"│ + └─────────┘ │ │ └──────────┘ + ┌─────────┐ │ ┌──────────┐ │ ┌──────────┐ + │"Input 2"├──┼──►│forward_op├──┼──►│"Output 2"│ + └─────────┘ │ └──────────┘ │ └──────────┘ + ┌─────────┐ │ │ ┌──────────┐ + │"Input 3"├──┤ ├──►│"Output 3"│ + └─────────┘ │ │ └──────────┘ + ┌─────────┐ │ │ ┌──────────┐ + │"Input 4"├──┘ └──►│"Output 4"│ + └─────────┘ └──────────┘ + """ + + forward_op = op.utility.forward(server=server_type) + + workflow = dpf.core.Workflow(server=server_type) + + workflow.add_operators([forward_op]) + + for i in range(5): + workflow.set_input_name(f"Input {i}", forward_op, i) + workflow.set_output_name(f"Output {i}", forward_op, i) + + return workflow + + +def workflow_disp_min_max(server_type) -> dpf.core.Workflow: + """ + ┌──────────────┐ ┌───────┐ ┌─────────────┐ ┌─────┐ + │"data_sources"├─────►│disp_op├─────►│min_max_fc_op├──┬──►│"min"│ + └──────────────┘ └───────┘ └─────────────┘ │ └─────┘ + │ ┌─────┐ + └──►│"max"│ + └─────┘ + """ + + disp_op = op.result.displacement(server=server_type) + min_max_fc_op = op.min_max.min_max_fc(disp_op, server=server_type) + + workflow = dpf.core.Workflow(server=server_type) + + workflow.add_operators([disp_op, min_max_fc_op]) + + workflow.set_input_name("data_sources", disp_op.inputs.data_sources) + workflow.set_output_name("min", min_max_fc_op.outputs.field_min) + workflow.set_output_name("max", min_max_fc_op.outputs.field_max) + + return workflow + + +workflows = { + "workflow_forward": workflow_forward, + "workflow_forward_5": workflow_forward_5, + "workflow_disp_min_max": workflow_disp_min_max, +} +workflow_topologies = { + "workflow_forward": { + "operators": 3, + "operator_connections": 1, + "data_connections": 1, + "exposed_inputs": 1, + "exposed_outputs": 2, + }, + "workflow_forward_5": { + "operators": 1, + "operator_connections": 0, + "data_connections": 0, + "exposed_inputs": 5, + "exposed_outputs": 5, + }, + "workflow_disp_min_max": { + "operators": 2, + "operator_connections": 1, + "data_connections": 0, + "exposed_inputs": 1, + "exposed_outputs": 2, + }, +} + + +@pytest.fixture( + params=list(workflows.values()), + ids=list(workflows.keys()), +) +def workflow(server_type, request) -> dpf.core.Workflow: + wf = request.param(server_type) + wf.name = list(workflows.keys())[request.param_index] + return wf + + +@pytest.fixture() +def expected_workflow_topology(workflow): + return workflow_topologies[workflow.name] + + +@pytest.mark.skipif( + not conftest.SERVERS_VERSION_GREATER_THAN_OR_EQUAL_TO_10_0, + reason="Operator `workflow_to_workflow_topology` does not exist below 10.0", +) +def test_instantiate_workflow_to_workflow_topology_op(server_type): + workflow_to_workflow_topology_op = dpf.core.Operator( + "workflow_to_workflow_topology", server=server_type + ) + + assert workflow_to_workflow_topology_op + + +@raises_for_servers_version_under("10.0") +def test_workflow_get_topology(workflow): + workflow_topology = workflow.get_topology() + + assert workflow_topology + + +@raises_for_servers_version_under("10.0") +def test_workflow_topology_sizes(workflow, expected_workflow_topology): + workflow_topology = workflow.get_topology() + + assert len(workflow_topology.operators) == expected_workflow_topology["operators"] + assert ( + len(workflow_topology.operator_connections) + == expected_workflow_topology["operator_connections"] + ) + assert len(workflow_topology.data_connections) == expected_workflow_topology["data_connections"] + assert len(workflow_topology.exposed_inputs) == expected_workflow_topology["exposed_inputs"] + assert len(workflow_topology.exposed_outputs) == expected_workflow_topology["exposed_outputs"] + + +@raises_for_servers_version_under("10.0") +def test_workflow_topology_str(workflow): + workflow_topology = workflow.get_topology() + + # We only check that it does not raise + assert str(workflow_topology)